xref: /openbmc/qemu/accel/tcg/cputlb.c (revision 22879b66)
1 /*
2  *  Common CPU TLB handling
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "hw/core/tcg-cpu-ops.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/cputlb.h"
27 #include "exec/tb-flush.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "exec/mmu-access-type.h"
31 #include "exec/tlb-common.h"
32 #include "exec/vaddr.h"
33 #include "tcg/tcg.h"
34 #include "qemu/error-report.h"
35 #include "exec/log.h"
36 #include "exec/helper-proto-common.h"
37 #include "qemu/atomic.h"
38 #include "qemu/atomic128.h"
39 #include "exec/translate-all.h"
40 #include "trace.h"
41 #include "tb-hash.h"
42 #include "internal-common.h"
43 #include "internal-target.h"
44 #ifdef CONFIG_PLUGIN
45 #include "qemu/plugin-memory.h"
46 #endif
47 #include "tcg/tcg-ldst.h"
48 #include "tcg/oversized-guest.h"
49 
50 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
51 /* #define DEBUG_TLB */
52 /* #define DEBUG_TLB_LOG */
53 
54 #ifdef DEBUG_TLB
55 # define DEBUG_TLB_GATE 1
56 # ifdef DEBUG_TLB_LOG
57 #  define DEBUG_TLB_LOG_GATE 1
58 # else
59 #  define DEBUG_TLB_LOG_GATE 0
60 # endif
61 #else
62 # define DEBUG_TLB_GATE 0
63 # define DEBUG_TLB_LOG_GATE 0
64 #endif
65 
66 #define tlb_debug(fmt, ...) do { \
67     if (DEBUG_TLB_LOG_GATE) { \
68         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
69                       ## __VA_ARGS__); \
70     } else if (DEBUG_TLB_GATE) { \
71         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
72     } \
73 } while (0)
74 
75 #define assert_cpu_is_self(cpu) do {                              \
76         if (DEBUG_TLB_GATE) {                                     \
77             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
78         }                                                         \
79     } while (0)
80 
81 /* run_on_cpu_data.target_ptr should always be big enough for a
82  * vaddr even on 32 bit builds
83  */
84 QEMU_BUILD_BUG_ON(sizeof(vaddr) > sizeof(run_on_cpu_data));
85 
86 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
87  */
88 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
89 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
90 
91 static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
92 {
93     return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
94 }
95 
96 static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
97 {
98     return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
99 }
100 
101 static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry,
102                                     MMUAccessType access_type)
103 {
104     /* Do not rearrange the CPUTLBEntry structure members. */
105     QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_read) !=
106                       MMU_DATA_LOAD * sizeof(uint64_t));
107     QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) !=
108                       MMU_DATA_STORE * sizeof(uint64_t));
109     QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) !=
110                       MMU_INST_FETCH * sizeof(uint64_t));
111 
112 #if TARGET_LONG_BITS == 32
113     /* Use qatomic_read, in case of addr_write; only care about low bits. */
114     const uint32_t *ptr = (uint32_t *)&entry->addr_idx[access_type];
115     ptr += HOST_BIG_ENDIAN;
116     return qatomic_read(ptr);
117 #else
118     const uint64_t *ptr = &entry->addr_idx[access_type];
119 # if TCG_OVERSIZED_GUEST
120     return *ptr;
121 # else
122     /* ofs might correspond to .addr_write, so use qatomic_read */
123     return qatomic_read(ptr);
124 # endif
125 #endif
126 }
127 
128 static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry)
129 {
130     return tlb_read_idx(entry, MMU_DATA_STORE);
131 }
132 
133 /* Find the TLB index corresponding to the mmu_idx + address pair.  */
134 static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx,
135                                   vaddr addr)
136 {
137     uintptr_t size_mask = cpu->neg.tlb.f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
138 
139     return (addr >> TARGET_PAGE_BITS) & size_mask;
140 }
141 
142 /* Find the TLB entry corresponding to the mmu_idx + address pair.  */
143 static inline CPUTLBEntry *tlb_entry(CPUState *cpu, uintptr_t mmu_idx,
144                                      vaddr addr)
145 {
146     return &cpu->neg.tlb.f[mmu_idx].table[tlb_index(cpu, mmu_idx, addr)];
147 }
148 
149 static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
150                              size_t max_entries)
151 {
152     desc->window_begin_ns = ns;
153     desc->window_max_entries = max_entries;
154 }
155 
156 static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr)
157 {
158     CPUJumpCache *jc = cpu->tb_jmp_cache;
159     int i, i0;
160 
161     if (unlikely(!jc)) {
162         return;
163     }
164 
165     i0 = tb_jmp_cache_hash_page(page_addr);
166     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
167         qatomic_set(&jc->array[i0 + i].tb, NULL);
168     }
169 }
170 
171 /**
172  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
173  * @desc: The CPUTLBDesc portion of the TLB
174  * @fast: The CPUTLBDescFast portion of the same TLB
175  *
176  * Called with tlb_lock_held.
177  *
178  * We have two main constraints when resizing a TLB: (1) we only resize it
179  * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
180  * the array or unnecessarily flushing it), which means we do not control how
181  * frequently the resizing can occur; (2) we don't have access to the guest's
182  * future scheduling decisions, and therefore have to decide the magnitude of
183  * the resize based on past observations.
184  *
185  * In general, a memory-hungry process can benefit greatly from an appropriately
186  * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
187  * we just have to make the TLB as large as possible; while an oversized TLB
188  * results in minimal TLB miss rates, it also takes longer to be flushed
189  * (flushes can be _very_ frequent), and the reduced locality can also hurt
190  * performance.
191  *
192  * To achieve near-optimal performance for all kinds of workloads, we:
193  *
194  * 1. Aggressively increase the size of the TLB when the use rate of the
195  * TLB being flushed is high, since it is likely that in the near future this
196  * memory-hungry process will execute again, and its memory hungriness will
197  * probably be similar.
198  *
199  * 2. Slowly reduce the size of the TLB as the use rate declines over a
200  * reasonably large time window. The rationale is that if in such a time window
201  * we have not observed a high TLB use rate, it is likely that we won't observe
202  * it in the near future. In that case, once a time window expires we downsize
203  * the TLB to match the maximum use rate observed in the window.
204  *
205  * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
206  * since in that range performance is likely near-optimal. Recall that the TLB
207  * is direct mapped, so we want the use rate to be low (or at least not too
208  * high), since otherwise we are likely to have a significant amount of
209  * conflict misses.
210  */
211 static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
212                                   int64_t now)
213 {
214     size_t old_size = tlb_n_entries(fast);
215     size_t rate;
216     size_t new_size = old_size;
217     int64_t window_len_ms = 100;
218     int64_t window_len_ns = window_len_ms * 1000 * 1000;
219     bool window_expired = now > desc->window_begin_ns + window_len_ns;
220 
221     if (desc->n_used_entries > desc->window_max_entries) {
222         desc->window_max_entries = desc->n_used_entries;
223     }
224     rate = desc->window_max_entries * 100 / old_size;
225 
226     if (rate > 70) {
227         new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
228     } else if (rate < 30 && window_expired) {
229         size_t ceil = pow2ceil(desc->window_max_entries);
230         size_t expected_rate = desc->window_max_entries * 100 / ceil;
231 
232         /*
233          * Avoid undersizing when the max number of entries seen is just below
234          * a pow2. For instance, if max_entries == 1025, the expected use rate
235          * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
236          * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
237          * later. Thus, make sure that the expected use rate remains below 70%.
238          * (and since we double the size, that means the lowest rate we'd
239          * expect to get is 35%, which is still in the 30-70% range where
240          * we consider that the size is appropriate.)
241          */
242         if (expected_rate > 70) {
243             ceil *= 2;
244         }
245         new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
246     }
247 
248     if (new_size == old_size) {
249         if (window_expired) {
250             tlb_window_reset(desc, now, desc->n_used_entries);
251         }
252         return;
253     }
254 
255     g_free(fast->table);
256     g_free(desc->fulltlb);
257 
258     tlb_window_reset(desc, now, 0);
259     /* desc->n_used_entries is cleared by the caller */
260     fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
261     fast->table = g_try_new(CPUTLBEntry, new_size);
262     desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
263 
264     /*
265      * If the allocations fail, try smaller sizes. We just freed some
266      * memory, so going back to half of new_size has a good chance of working.
267      * Increased memory pressure elsewhere in the system might cause the
268      * allocations to fail though, so we progressively reduce the allocation
269      * size, aborting if we cannot even allocate the smallest TLB we support.
270      */
271     while (fast->table == NULL || desc->fulltlb == NULL) {
272         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
273             error_report("%s: %s", __func__, strerror(errno));
274             abort();
275         }
276         new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
277         fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
278 
279         g_free(fast->table);
280         g_free(desc->fulltlb);
281         fast->table = g_try_new(CPUTLBEntry, new_size);
282         desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
283     }
284 }
285 
286 static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
287 {
288     desc->n_used_entries = 0;
289     desc->large_page_addr = -1;
290     desc->large_page_mask = -1;
291     desc->vindex = 0;
292     memset(fast->table, -1, sizeof_tlb(fast));
293     memset(desc->vtable, -1, sizeof(desc->vtable));
294 }
295 
296 static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx,
297                                         int64_t now)
298 {
299     CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx];
300     CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx];
301 
302     tlb_mmu_resize_locked(desc, fast, now);
303     tlb_mmu_flush_locked(desc, fast);
304 }
305 
306 static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
307 {
308     size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
309 
310     tlb_window_reset(desc, now, 0);
311     desc->n_used_entries = 0;
312     fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
313     fast->table = g_new(CPUTLBEntry, n_entries);
314     desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
315     tlb_mmu_flush_locked(desc, fast);
316 }
317 
318 static inline void tlb_n_used_entries_inc(CPUState *cpu, uintptr_t mmu_idx)
319 {
320     cpu->neg.tlb.d[mmu_idx].n_used_entries++;
321 }
322 
323 static inline void tlb_n_used_entries_dec(CPUState *cpu, uintptr_t mmu_idx)
324 {
325     cpu->neg.tlb.d[mmu_idx].n_used_entries--;
326 }
327 
328 void tlb_init(CPUState *cpu)
329 {
330     int64_t now = get_clock_realtime();
331     int i;
332 
333     qemu_spin_init(&cpu->neg.tlb.c.lock);
334 
335     /* All tlbs are initialized flushed. */
336     cpu->neg.tlb.c.dirty = 0;
337 
338     for (i = 0; i < NB_MMU_MODES; i++) {
339         tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now);
340     }
341 }
342 
343 void tlb_destroy(CPUState *cpu)
344 {
345     int i;
346 
347     qemu_spin_destroy(&cpu->neg.tlb.c.lock);
348     for (i = 0; i < NB_MMU_MODES; i++) {
349         CPUTLBDesc *desc = &cpu->neg.tlb.d[i];
350         CPUTLBDescFast *fast = &cpu->neg.tlb.f[i];
351 
352         g_free(fast->table);
353         g_free(desc->fulltlb);
354     }
355 }
356 
357 /* flush_all_helper: run fn across all cpus
358  *
359  * If the wait flag is set then the src cpu's helper will be queued as
360  * "safe" work and the loop exited creating a synchronisation point
361  * where all queued work will be finished before execution starts
362  * again.
363  */
364 static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
365                              run_on_cpu_data d)
366 {
367     CPUState *cpu;
368 
369     CPU_FOREACH(cpu) {
370         if (cpu != src) {
371             async_run_on_cpu(cpu, fn, d);
372         }
373     }
374 }
375 
376 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
377 {
378     uint16_t asked = data.host_int;
379     uint16_t all_dirty, work, to_clean;
380     int64_t now = get_clock_realtime();
381 
382     assert_cpu_is_self(cpu);
383 
384     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
385 
386     qemu_spin_lock(&cpu->neg.tlb.c.lock);
387 
388     all_dirty = cpu->neg.tlb.c.dirty;
389     to_clean = asked & all_dirty;
390     all_dirty &= ~to_clean;
391     cpu->neg.tlb.c.dirty = all_dirty;
392 
393     for (work = to_clean; work != 0; work &= work - 1) {
394         int mmu_idx = ctz32(work);
395         tlb_flush_one_mmuidx_locked(cpu, mmu_idx, now);
396     }
397 
398     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
399 
400     tcg_flush_jmp_cache(cpu);
401 
402     if (to_clean == ALL_MMUIDX_BITS) {
403         qatomic_set(&cpu->neg.tlb.c.full_flush_count,
404                     cpu->neg.tlb.c.full_flush_count + 1);
405     } else {
406         qatomic_set(&cpu->neg.tlb.c.part_flush_count,
407                     cpu->neg.tlb.c.part_flush_count + ctpop16(to_clean));
408         if (to_clean != asked) {
409             qatomic_set(&cpu->neg.tlb.c.elide_flush_count,
410                         cpu->neg.tlb.c.elide_flush_count +
411                         ctpop16(asked & ~to_clean));
412         }
413     }
414 }
415 
416 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
417 {
418     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
419 
420     if (cpu->created && !qemu_cpu_is_self(cpu)) {
421         async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
422                          RUN_ON_CPU_HOST_INT(idxmap));
423     } else {
424         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
425     }
426 }
427 
428 void tlb_flush(CPUState *cpu)
429 {
430     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
431 }
432 
433 void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
434 {
435     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
436 
437     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
438 
439     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
440     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
441 }
442 
443 void tlb_flush_all_cpus(CPUState *src_cpu)
444 {
445     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
446 }
447 
448 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
449 {
450     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
451 
452     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
453 
454     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
455     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
456 }
457 
458 void tlb_flush_all_cpus_synced(CPUState *src_cpu)
459 {
460     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
461 }
462 
463 static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
464                                       vaddr page, vaddr mask)
465 {
466     page &= mask;
467     mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
468 
469     return (page == (tlb_entry->addr_read & mask) ||
470             page == (tlb_addr_write(tlb_entry) & mask) ||
471             page == (tlb_entry->addr_code & mask));
472 }
473 
474 static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page)
475 {
476     return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
477 }
478 
479 /**
480  * tlb_entry_is_empty - return true if the entry is not in use
481  * @te: pointer to CPUTLBEntry
482  */
483 static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
484 {
485     return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
486 }
487 
488 /* Called with tlb_c.lock held */
489 static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
490                                         vaddr page,
491                                         vaddr mask)
492 {
493     if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
494         memset(tlb_entry, -1, sizeof(*tlb_entry));
495         return true;
496     }
497     return false;
498 }
499 
500 static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, vaddr page)
501 {
502     return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
503 }
504 
505 /* Called with tlb_c.lock held */
506 static void tlb_flush_vtlb_page_mask_locked(CPUState *cpu, int mmu_idx,
507                                             vaddr page,
508                                             vaddr mask)
509 {
510     CPUTLBDesc *d = &cpu->neg.tlb.d[mmu_idx];
511     int k;
512 
513     assert_cpu_is_self(cpu);
514     for (k = 0; k < CPU_VTLB_SIZE; k++) {
515         if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
516             tlb_n_used_entries_dec(cpu, mmu_idx);
517         }
518     }
519 }
520 
521 static inline void tlb_flush_vtlb_page_locked(CPUState *cpu, int mmu_idx,
522                                               vaddr page)
523 {
524     tlb_flush_vtlb_page_mask_locked(cpu, mmu_idx, page, -1);
525 }
526 
527 static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page)
528 {
529     vaddr lp_addr = cpu->neg.tlb.d[midx].large_page_addr;
530     vaddr lp_mask = cpu->neg.tlb.d[midx].large_page_mask;
531 
532     /* Check if we need to flush due to large pages.  */
533     if ((page & lp_mask) == lp_addr) {
534         tlb_debug("forcing full flush midx %d (%016"
535                   VADDR_PRIx "/%016" VADDR_PRIx ")\n",
536                   midx, lp_addr, lp_mask);
537         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
538     } else {
539         if (tlb_flush_entry_locked(tlb_entry(cpu, midx, page), page)) {
540             tlb_n_used_entries_dec(cpu, midx);
541         }
542         tlb_flush_vtlb_page_locked(cpu, midx, page);
543     }
544 }
545 
546 /**
547  * tlb_flush_page_by_mmuidx_async_0:
548  * @cpu: cpu on which to flush
549  * @addr: page of virtual address to flush
550  * @idxmap: set of mmu_idx to flush
551  *
552  * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
553  * at @addr from the tlbs indicated by @idxmap from @cpu.
554  */
555 static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
556                                              vaddr addr,
557                                              uint16_t idxmap)
558 {
559     int mmu_idx;
560 
561     assert_cpu_is_self(cpu);
562 
563     tlb_debug("page addr: %016" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap);
564 
565     qemu_spin_lock(&cpu->neg.tlb.c.lock);
566     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
567         if ((idxmap >> mmu_idx) & 1) {
568             tlb_flush_page_locked(cpu, mmu_idx, addr);
569         }
570     }
571     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
572 
573     /*
574      * Discard jump cache entries for any tb which might potentially
575      * overlap the flushed page, which includes the previous.
576      */
577     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
578     tb_jmp_cache_clear_page(cpu, addr);
579 }
580 
581 /**
582  * tlb_flush_page_by_mmuidx_async_1:
583  * @cpu: cpu on which to flush
584  * @data: encoded addr + idxmap
585  *
586  * Helper for tlb_flush_page_by_mmuidx and friends, called through
587  * async_run_on_cpu.  The idxmap parameter is encoded in the page
588  * offset of the target_ptr field.  This limits the set of mmu_idx
589  * that can be passed via this method.
590  */
591 static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
592                                              run_on_cpu_data data)
593 {
594     vaddr addr_and_idxmap = data.target_ptr;
595     vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK;
596     uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
597 
598     tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
599 }
600 
601 typedef struct {
602     vaddr addr;
603     uint16_t idxmap;
604 } TLBFlushPageByMMUIdxData;
605 
606 /**
607  * tlb_flush_page_by_mmuidx_async_2:
608  * @cpu: cpu on which to flush
609  * @data: allocated addr + idxmap
610  *
611  * Helper for tlb_flush_page_by_mmuidx and friends, called through
612  * async_run_on_cpu.  The addr+idxmap parameters are stored in a
613  * TLBFlushPageByMMUIdxData structure that has been allocated
614  * specifically for this helper.  Free the structure when done.
615  */
616 static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
617                                              run_on_cpu_data data)
618 {
619     TLBFlushPageByMMUIdxData *d = data.host_ptr;
620 
621     tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
622     g_free(d);
623 }
624 
625 void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
626 {
627     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
628 
629     /* This should already be page aligned */
630     addr &= TARGET_PAGE_MASK;
631 
632     if (qemu_cpu_is_self(cpu)) {
633         tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
634     } else if (idxmap < TARGET_PAGE_SIZE) {
635         /*
636          * Most targets have only a few mmu_idx.  In the case where
637          * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
638          * allocating memory for this operation.
639          */
640         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
641                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
642     } else {
643         TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
644 
645         /* Otherwise allocate a structure, freed by the worker.  */
646         d->addr = addr;
647         d->idxmap = idxmap;
648         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
649                          RUN_ON_CPU_HOST_PTR(d));
650     }
651 }
652 
653 void tlb_flush_page(CPUState *cpu, vaddr addr)
654 {
655     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
656 }
657 
658 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr,
659                                        uint16_t idxmap)
660 {
661     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
662 
663     /* This should already be page aligned */
664     addr &= TARGET_PAGE_MASK;
665 
666     /*
667      * Allocate memory to hold addr+idxmap only when needed.
668      * See tlb_flush_page_by_mmuidx for details.
669      */
670     if (idxmap < TARGET_PAGE_SIZE) {
671         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
672                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
673     } else {
674         CPUState *dst_cpu;
675 
676         /* Allocate a separate data block for each destination cpu.  */
677         CPU_FOREACH(dst_cpu) {
678             if (dst_cpu != src_cpu) {
679                 TLBFlushPageByMMUIdxData *d
680                     = g_new(TLBFlushPageByMMUIdxData, 1);
681 
682                 d->addr = addr;
683                 d->idxmap = idxmap;
684                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
685                                  RUN_ON_CPU_HOST_PTR(d));
686             }
687         }
688     }
689 
690     tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
691 }
692 
693 void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
694 {
695     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
696 }
697 
698 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
699                                               vaddr addr,
700                                               uint16_t idxmap)
701 {
702     tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
703 
704     /* This should already be page aligned */
705     addr &= TARGET_PAGE_MASK;
706 
707     /*
708      * Allocate memory to hold addr+idxmap only when needed.
709      * See tlb_flush_page_by_mmuidx for details.
710      */
711     if (idxmap < TARGET_PAGE_SIZE) {
712         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
713                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
714         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
715                               RUN_ON_CPU_TARGET_PTR(addr | idxmap));
716     } else {
717         CPUState *dst_cpu;
718         TLBFlushPageByMMUIdxData *d;
719 
720         /* Allocate a separate data block for each destination cpu.  */
721         CPU_FOREACH(dst_cpu) {
722             if (dst_cpu != src_cpu) {
723                 d = g_new(TLBFlushPageByMMUIdxData, 1);
724                 d->addr = addr;
725                 d->idxmap = idxmap;
726                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
727                                  RUN_ON_CPU_HOST_PTR(d));
728             }
729         }
730 
731         d = g_new(TLBFlushPageByMMUIdxData, 1);
732         d->addr = addr;
733         d->idxmap = idxmap;
734         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
735                               RUN_ON_CPU_HOST_PTR(d));
736     }
737 }
738 
739 void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
740 {
741     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
742 }
743 
744 static void tlb_flush_range_locked(CPUState *cpu, int midx,
745                                    vaddr addr, vaddr len,
746                                    unsigned bits)
747 {
748     CPUTLBDesc *d = &cpu->neg.tlb.d[midx];
749     CPUTLBDescFast *f = &cpu->neg.tlb.f[midx];
750     vaddr mask = MAKE_64BIT_MASK(0, bits);
751 
752     /*
753      * If @bits is smaller than the tlb size, there may be multiple entries
754      * within the TLB; otherwise all addresses that match under @mask hit
755      * the same TLB entry.
756      * TODO: Perhaps allow bits to be a few bits less than the size.
757      * For now, just flush the entire TLB.
758      *
759      * If @len is larger than the tlb size, then it will take longer to
760      * test all of the entries in the TLB than it will to flush it all.
761      */
762     if (mask < f->mask || len > f->mask) {
763         tlb_debug("forcing full flush midx %d ("
764                   "%016" VADDR_PRIx "/%016" VADDR_PRIx "+%016" VADDR_PRIx ")\n",
765                   midx, addr, mask, len);
766         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
767         return;
768     }
769 
770     /*
771      * Check if we need to flush due to large pages.
772      * Because large_page_mask contains all 1's from the msb,
773      * we only need to test the end of the range.
774      */
775     if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
776         tlb_debug("forcing full flush midx %d ("
777                   "%016" VADDR_PRIx "/%016" VADDR_PRIx ")\n",
778                   midx, d->large_page_addr, d->large_page_mask);
779         tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
780         return;
781     }
782 
783     for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) {
784         vaddr page = addr + i;
785         CPUTLBEntry *entry = tlb_entry(cpu, midx, page);
786 
787         if (tlb_flush_entry_mask_locked(entry, page, mask)) {
788             tlb_n_used_entries_dec(cpu, midx);
789         }
790         tlb_flush_vtlb_page_mask_locked(cpu, midx, page, mask);
791     }
792 }
793 
794 typedef struct {
795     vaddr addr;
796     vaddr len;
797     uint16_t idxmap;
798     uint16_t bits;
799 } TLBFlushRangeData;
800 
801 static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
802                                               TLBFlushRangeData d)
803 {
804     int mmu_idx;
805 
806     assert_cpu_is_self(cpu);
807 
808     tlb_debug("range: %016" VADDR_PRIx "/%u+%016" VADDR_PRIx " mmu_map:0x%x\n",
809               d.addr, d.bits, d.len, d.idxmap);
810 
811     qemu_spin_lock(&cpu->neg.tlb.c.lock);
812     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
813         if ((d.idxmap >> mmu_idx) & 1) {
814             tlb_flush_range_locked(cpu, mmu_idx, d.addr, d.len, d.bits);
815         }
816     }
817     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
818 
819     /*
820      * If the length is larger than the jump cache size, then it will take
821      * longer to clear each entry individually than it will to clear it all.
822      */
823     if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
824         tcg_flush_jmp_cache(cpu);
825         return;
826     }
827 
828     /*
829      * Discard jump cache entries for any tb which might potentially
830      * overlap the flushed pages, which includes the previous.
831      */
832     d.addr -= TARGET_PAGE_SIZE;
833     for (vaddr i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
834         tb_jmp_cache_clear_page(cpu, d.addr);
835         d.addr += TARGET_PAGE_SIZE;
836     }
837 }
838 
839 static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
840                                               run_on_cpu_data data)
841 {
842     TLBFlushRangeData *d = data.host_ptr;
843     tlb_flush_range_by_mmuidx_async_0(cpu, *d);
844     g_free(d);
845 }
846 
847 void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
848                                vaddr len, uint16_t idxmap,
849                                unsigned bits)
850 {
851     TLBFlushRangeData d;
852 
853     /*
854      * If all bits are significant, and len is small,
855      * this devolves to tlb_flush_page.
856      */
857     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
858         tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
859         return;
860     }
861     /* If no page bits are significant, this devolves to tlb_flush. */
862     if (bits < TARGET_PAGE_BITS) {
863         tlb_flush_by_mmuidx(cpu, idxmap);
864         return;
865     }
866 
867     /* This should already be page aligned */
868     d.addr = addr & TARGET_PAGE_MASK;
869     d.len = len;
870     d.idxmap = idxmap;
871     d.bits = bits;
872 
873     if (qemu_cpu_is_self(cpu)) {
874         tlb_flush_range_by_mmuidx_async_0(cpu, d);
875     } else {
876         /* Otherwise allocate a structure, freed by the worker.  */
877         TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
878         async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
879                          RUN_ON_CPU_HOST_PTR(p));
880     }
881 }
882 
883 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
884                                    uint16_t idxmap, unsigned bits)
885 {
886     tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
887 }
888 
889 void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
890                                         vaddr addr, vaddr len,
891                                         uint16_t idxmap, unsigned bits)
892 {
893     TLBFlushRangeData d;
894     CPUState *dst_cpu;
895 
896     /*
897      * If all bits are significant, and len is small,
898      * this devolves to tlb_flush_page.
899      */
900     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
901         tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
902         return;
903     }
904     /* If no page bits are significant, this devolves to tlb_flush. */
905     if (bits < TARGET_PAGE_BITS) {
906         tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
907         return;
908     }
909 
910     /* This should already be page aligned */
911     d.addr = addr & TARGET_PAGE_MASK;
912     d.len = len;
913     d.idxmap = idxmap;
914     d.bits = bits;
915 
916     /* Allocate a separate data block for each destination cpu.  */
917     CPU_FOREACH(dst_cpu) {
918         if (dst_cpu != src_cpu) {
919             TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
920             async_run_on_cpu(dst_cpu,
921                              tlb_flush_range_by_mmuidx_async_1,
922                              RUN_ON_CPU_HOST_PTR(p));
923         }
924     }
925 
926     tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
927 }
928 
929 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
930                                             vaddr addr, uint16_t idxmap,
931                                             unsigned bits)
932 {
933     tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
934                                        idxmap, bits);
935 }
936 
937 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
938                                                vaddr addr,
939                                                vaddr len,
940                                                uint16_t idxmap,
941                                                unsigned bits)
942 {
943     TLBFlushRangeData d, *p;
944     CPUState *dst_cpu;
945 
946     /*
947      * If all bits are significant, and len is small,
948      * this devolves to tlb_flush_page.
949      */
950     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
951         tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
952         return;
953     }
954     /* If no page bits are significant, this devolves to tlb_flush. */
955     if (bits < TARGET_PAGE_BITS) {
956         tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
957         return;
958     }
959 
960     /* This should already be page aligned */
961     d.addr = addr & TARGET_PAGE_MASK;
962     d.len = len;
963     d.idxmap = idxmap;
964     d.bits = bits;
965 
966     /* Allocate a separate data block for each destination cpu.  */
967     CPU_FOREACH(dst_cpu) {
968         if (dst_cpu != src_cpu) {
969             p = g_memdup(&d, sizeof(d));
970             async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
971                              RUN_ON_CPU_HOST_PTR(p));
972         }
973     }
974 
975     p = g_memdup(&d, sizeof(d));
976     async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
977                           RUN_ON_CPU_HOST_PTR(p));
978 }
979 
980 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
981                                                    vaddr addr,
982                                                    uint16_t idxmap,
983                                                    unsigned bits)
984 {
985     tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
986                                               idxmap, bits);
987 }
988 
989 /* update the TLBs so that writes to code in the virtual page 'addr'
990    can be detected */
991 void tlb_protect_code(ram_addr_t ram_addr)
992 {
993     cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
994                                              TARGET_PAGE_SIZE,
995                                              DIRTY_MEMORY_CODE);
996 }
997 
998 /* update the TLB so that writes in physical page 'phys_addr' are no longer
999    tested for self modifying code */
1000 void tlb_unprotect_code(ram_addr_t ram_addr)
1001 {
1002     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
1003 }
1004 
1005 
1006 /*
1007  * Dirty write flag handling
1008  *
1009  * When the TCG code writes to a location it looks up the address in
1010  * the TLB and uses that data to compute the final address. If any of
1011  * the lower bits of the address are set then the slow path is forced.
1012  * There are a number of reasons to do this but for normal RAM the
1013  * most usual is detecting writes to code regions which may invalidate
1014  * generated code.
1015  *
1016  * Other vCPUs might be reading their TLBs during guest execution, so we update
1017  * te->addr_write with qatomic_set. We don't need to worry about this for
1018  * oversized guests as MTTCG is disabled for them.
1019  *
1020  * Called with tlb_c.lock held.
1021  */
1022 static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
1023                                          uintptr_t start, uintptr_t length)
1024 {
1025     uintptr_t addr = tlb_entry->addr_write;
1026 
1027     if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
1028                  TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
1029         addr &= TARGET_PAGE_MASK;
1030         addr += tlb_entry->addend;
1031         if ((addr - start) < length) {
1032 #if TARGET_LONG_BITS == 32
1033             uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write;
1034             ptr_write += HOST_BIG_ENDIAN;
1035             qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY);
1036 #elif TCG_OVERSIZED_GUEST
1037             tlb_entry->addr_write |= TLB_NOTDIRTY;
1038 #else
1039             qatomic_set(&tlb_entry->addr_write,
1040                         tlb_entry->addr_write | TLB_NOTDIRTY);
1041 #endif
1042         }
1043     }
1044 }
1045 
1046 /*
1047  * Called with tlb_c.lock held.
1048  * Called only from the vCPU context, i.e. the TLB's owner thread.
1049  */
1050 static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
1051 {
1052     *d = *s;
1053 }
1054 
1055 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
1056  * the target vCPU).
1057  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
1058  * thing actually updated is the target TLB entry ->addr_write flags.
1059  */
1060 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1061 {
1062     int mmu_idx;
1063 
1064     qemu_spin_lock(&cpu->neg.tlb.c.lock);
1065     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1066         unsigned int i;
1067         unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]);
1068 
1069         for (i = 0; i < n; i++) {
1070             tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i],
1071                                          start1, length);
1072         }
1073 
1074         for (i = 0; i < CPU_VTLB_SIZE; i++) {
1075             tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i],
1076                                          start1, length);
1077         }
1078     }
1079     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1080 }
1081 
1082 /* Called with tlb_c.lock held */
1083 static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
1084                                          vaddr addr)
1085 {
1086     if (tlb_entry->addr_write == (addr | TLB_NOTDIRTY)) {
1087         tlb_entry->addr_write = addr;
1088     }
1089 }
1090 
1091 /* update the TLB corresponding to virtual page vaddr
1092    so that it is no longer dirty */
1093 static void tlb_set_dirty(CPUState *cpu, vaddr addr)
1094 {
1095     int mmu_idx;
1096 
1097     assert_cpu_is_self(cpu);
1098 
1099     addr &= TARGET_PAGE_MASK;
1100     qemu_spin_lock(&cpu->neg.tlb.c.lock);
1101     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1102         tlb_set_dirty1_locked(tlb_entry(cpu, mmu_idx, addr), addr);
1103     }
1104 
1105     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1106         int k;
1107         for (k = 0; k < CPU_VTLB_SIZE; k++) {
1108             tlb_set_dirty1_locked(&cpu->neg.tlb.d[mmu_idx].vtable[k], addr);
1109         }
1110     }
1111     qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1112 }
1113 
1114 /* Our TLB does not support large pages, so remember the area covered by
1115    large pages and trigger a full TLB flush if these are invalidated.  */
1116 static void tlb_add_large_page(CPUState *cpu, int mmu_idx,
1117                                vaddr addr, uint64_t size)
1118 {
1119     vaddr lp_addr = cpu->neg.tlb.d[mmu_idx].large_page_addr;
1120     vaddr lp_mask = ~(size - 1);
1121 
1122     if (lp_addr == (vaddr)-1) {
1123         /* No previous large page.  */
1124         lp_addr = addr;
1125     } else {
1126         /* Extend the existing region to include the new page.
1127            This is a compromise between unnecessary flushes and
1128            the cost of maintaining a full variable size TLB.  */
1129         lp_mask &= cpu->neg.tlb.d[mmu_idx].large_page_mask;
1130         while (((lp_addr ^ addr) & lp_mask) != 0) {
1131             lp_mask <<= 1;
1132         }
1133     }
1134     cpu->neg.tlb.d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1135     cpu->neg.tlb.d[mmu_idx].large_page_mask = lp_mask;
1136 }
1137 
1138 static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
1139                                    vaddr address, int flags,
1140                                    MMUAccessType access_type, bool enable)
1141 {
1142     if (enable) {
1143         address |= flags & TLB_FLAGS_MASK;
1144         flags &= TLB_SLOW_FLAGS_MASK;
1145         if (flags) {
1146             address |= TLB_FORCE_SLOW;
1147         }
1148     } else {
1149         address = -1;
1150         flags = 0;
1151     }
1152     ent->addr_idx[access_type] = address;
1153     full->slow_flags[access_type] = flags;
1154 }
1155 
1156 /*
1157  * Add a new TLB entry. At most one entry for a given virtual address
1158  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1159  * supplied size is only used by tlb_flush_page.
1160  *
1161  * Called from TCG-generated code, which is under an RCU read-side
1162  * critical section.
1163  */
1164 void tlb_set_page_full(CPUState *cpu, int mmu_idx,
1165                        vaddr addr, CPUTLBEntryFull *full)
1166 {
1167     CPUTLB *tlb = &cpu->neg.tlb;
1168     CPUTLBDesc *desc = &tlb->d[mmu_idx];
1169     MemoryRegionSection *section;
1170     unsigned int index, read_flags, write_flags;
1171     uintptr_t addend;
1172     CPUTLBEntry *te, tn;
1173     hwaddr iotlb, xlat, sz, paddr_page;
1174     vaddr addr_page;
1175     int asidx, wp_flags, prot;
1176     bool is_ram, is_romd;
1177 
1178     assert_cpu_is_self(cpu);
1179 
1180     if (full->lg_page_size <= TARGET_PAGE_BITS) {
1181         sz = TARGET_PAGE_SIZE;
1182     } else {
1183         sz = (hwaddr)1 << full->lg_page_size;
1184         tlb_add_large_page(cpu, mmu_idx, addr, sz);
1185     }
1186     addr_page = addr & TARGET_PAGE_MASK;
1187     paddr_page = full->phys_addr & TARGET_PAGE_MASK;
1188 
1189     prot = full->prot;
1190     asidx = cpu_asidx_from_attrs(cpu, full->attrs);
1191     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
1192                                                 &xlat, &sz, full->attrs, &prot);
1193     assert(sz >= TARGET_PAGE_SIZE);
1194 
1195     tlb_debug("vaddr=%016" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx
1196               " prot=%x idx=%d\n",
1197               addr, full->phys_addr, prot, mmu_idx);
1198 
1199     read_flags = full->tlb_fill_flags;
1200     if (full->lg_page_size < TARGET_PAGE_BITS) {
1201         /* Repeat the MMU check and TLB fill on every access.  */
1202         read_flags |= TLB_INVALID_MASK;
1203     }
1204 
1205     is_ram = memory_region_is_ram(section->mr);
1206     is_romd = memory_region_is_romd(section->mr);
1207 
1208     if (is_ram || is_romd) {
1209         /* RAM and ROMD both have associated host memory. */
1210         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
1211     } else {
1212         /* I/O does not; force the host address to NULL. */
1213         addend = 0;
1214     }
1215 
1216     write_flags = read_flags;
1217     if (is_ram) {
1218         iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1219         assert(!(iotlb & ~TARGET_PAGE_MASK));
1220         /*
1221          * Computing is_clean is expensive; avoid all that unless
1222          * the page is actually writable.
1223          */
1224         if (prot & PAGE_WRITE) {
1225             if (section->readonly) {
1226                 write_flags |= TLB_DISCARD_WRITE;
1227             } else if (cpu_physical_memory_is_clean(iotlb)) {
1228                 write_flags |= TLB_NOTDIRTY;
1229             }
1230         }
1231     } else {
1232         /* I/O or ROMD */
1233         iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
1234         /*
1235          * Writes to romd devices must go through MMIO to enable write.
1236          * Reads to romd devices go through the ram_ptr found above,
1237          * but of course reads to I/O must go through MMIO.
1238          */
1239         write_flags |= TLB_MMIO;
1240         if (!is_romd) {
1241             read_flags = write_flags;
1242         }
1243     }
1244 
1245     wp_flags = cpu_watchpoint_address_matches(cpu, addr_page,
1246                                               TARGET_PAGE_SIZE);
1247 
1248     index = tlb_index(cpu, mmu_idx, addr_page);
1249     te = tlb_entry(cpu, mmu_idx, addr_page);
1250 
1251     /*
1252      * Hold the TLB lock for the rest of the function. We could acquire/release
1253      * the lock several times in the function, but it is faster to amortize the
1254      * acquisition cost by acquiring it just once. Note that this leads to
1255      * a longer critical section, but this is not a concern since the TLB lock
1256      * is unlikely to be contended.
1257      */
1258     qemu_spin_lock(&tlb->c.lock);
1259 
1260     /* Note that the tlb is no longer clean.  */
1261     tlb->c.dirty |= 1 << mmu_idx;
1262 
1263     /* Make sure there's no cached translation for the new page.  */
1264     tlb_flush_vtlb_page_locked(cpu, mmu_idx, addr_page);
1265 
1266     /*
1267      * Only evict the old entry to the victim tlb if it's for a
1268      * different page; otherwise just overwrite the stale data.
1269      */
1270     if (!tlb_hit_page_anyprot(te, addr_page) && !tlb_entry_is_empty(te)) {
1271         unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1272         CPUTLBEntry *tv = &desc->vtable[vidx];
1273 
1274         /* Evict the old entry into the victim tlb.  */
1275         copy_tlb_helper_locked(tv, te);
1276         desc->vfulltlb[vidx] = desc->fulltlb[index];
1277         tlb_n_used_entries_dec(cpu, mmu_idx);
1278     }
1279 
1280     /* refill the tlb */
1281     /*
1282      * When memory region is ram, iotlb contains a TARGET_PAGE_BITS
1283      * aligned ram_addr_t of the page base of the target RAM.
1284      * Otherwise, iotlb contains
1285      *  - a physical section number in the lower TARGET_PAGE_BITS
1286      *  - the offset within section->mr of the page base (I/O, ROMD) with the
1287      *    TARGET_PAGE_BITS masked off.
1288      * We subtract addr_page (which is page aligned and thus won't
1289      * disturb the low bits) to give an offset which can be added to the
1290      * (non-page-aligned) vaddr of the eventual memory access to get
1291      * the MemoryRegion offset for the access. Note that the vaddr we
1292      * subtract here is that of the page base, and not the same as the
1293      * vaddr we add back in io_prepare()/get_page_addr_code().
1294      */
1295     desc->fulltlb[index] = *full;
1296     full = &desc->fulltlb[index];
1297     full->xlat_section = iotlb - addr_page;
1298     full->phys_addr = paddr_page;
1299 
1300     /* Now calculate the new entry */
1301     tn.addend = addend - addr_page;
1302 
1303     tlb_set_compare(full, &tn, addr_page, read_flags,
1304                     MMU_INST_FETCH, prot & PAGE_EXEC);
1305 
1306     if (wp_flags & BP_MEM_READ) {
1307         read_flags |= TLB_WATCHPOINT;
1308     }
1309     tlb_set_compare(full, &tn, addr_page, read_flags,
1310                     MMU_DATA_LOAD, prot & PAGE_READ);
1311 
1312     if (prot & PAGE_WRITE_INV) {
1313         write_flags |= TLB_INVALID_MASK;
1314     }
1315     if (wp_flags & BP_MEM_WRITE) {
1316         write_flags |= TLB_WATCHPOINT;
1317     }
1318     tlb_set_compare(full, &tn, addr_page, write_flags,
1319                     MMU_DATA_STORE, prot & PAGE_WRITE);
1320 
1321     copy_tlb_helper_locked(te, &tn);
1322     tlb_n_used_entries_inc(cpu, mmu_idx);
1323     qemu_spin_unlock(&tlb->c.lock);
1324 }
1325 
1326 void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
1327                              hwaddr paddr, MemTxAttrs attrs, int prot,
1328                              int mmu_idx, uint64_t size)
1329 {
1330     CPUTLBEntryFull full = {
1331         .phys_addr = paddr,
1332         .attrs = attrs,
1333         .prot = prot,
1334         .lg_page_size = ctz64(size)
1335     };
1336 
1337     assert(is_power_of_2(size));
1338     tlb_set_page_full(cpu, mmu_idx, addr, &full);
1339 }
1340 
1341 void tlb_set_page(CPUState *cpu, vaddr addr,
1342                   hwaddr paddr, int prot,
1343                   int mmu_idx, uint64_t size)
1344 {
1345     tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
1346                             prot, mmu_idx, size);
1347 }
1348 
1349 /*
1350  * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
1351  * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
1352  * be discarded and looked up again (e.g. via tlb_entry()).
1353  */
1354 static void tlb_fill(CPUState *cpu, vaddr addr, int size,
1355                      MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1356 {
1357     bool ok;
1358 
1359     /*
1360      * This is not a probe, so only valid return is success; failure
1361      * should result in exception + longjmp to the cpu loop.
1362      */
1363     ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
1364                                     access_type, mmu_idx, false, retaddr);
1365     assert(ok);
1366 }
1367 
1368 static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
1369                                         MMUAccessType access_type,
1370                                         int mmu_idx, uintptr_t retaddr)
1371 {
1372     cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
1373                                           mmu_idx, retaddr);
1374 }
1375 
1376 static MemoryRegionSection *
1377 io_prepare(hwaddr *out_offset, CPUState *cpu, hwaddr xlat,
1378            MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
1379 {
1380     MemoryRegionSection *section;
1381     hwaddr mr_offset;
1382 
1383     section = iotlb_to_section(cpu, xlat, attrs);
1384     mr_offset = (xlat & TARGET_PAGE_MASK) + addr;
1385     cpu->mem_io_pc = retaddr;
1386     if (!cpu->neg.can_do_io) {
1387         cpu_io_recompile(cpu, retaddr);
1388     }
1389 
1390     *out_offset = mr_offset;
1391     return section;
1392 }
1393 
1394 static void io_failed(CPUState *cpu, CPUTLBEntryFull *full, vaddr addr,
1395                       unsigned size, MMUAccessType access_type, int mmu_idx,
1396                       MemTxResult response, uintptr_t retaddr)
1397 {
1398     if (!cpu->ignore_memory_transaction_failures
1399         && cpu->cc->tcg_ops->do_transaction_failed) {
1400         hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
1401 
1402         cpu->cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
1403                                                 access_type, mmu_idx,
1404                                                 full->attrs, response, retaddr);
1405     }
1406 }
1407 
1408 /* Return true if ADDR is present in the victim tlb, and has been copied
1409    back to the main tlb.  */
1410 static bool victim_tlb_hit(CPUState *cpu, size_t mmu_idx, size_t index,
1411                            MMUAccessType access_type, vaddr page)
1412 {
1413     size_t vidx;
1414 
1415     assert_cpu_is_self(cpu);
1416     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1417         CPUTLBEntry *vtlb = &cpu->neg.tlb.d[mmu_idx].vtable[vidx];
1418         uint64_t cmp = tlb_read_idx(vtlb, access_type);
1419 
1420         if (cmp == page) {
1421             /* Found entry in victim tlb, swap tlb and iotlb.  */
1422             CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index];
1423 
1424             qemu_spin_lock(&cpu->neg.tlb.c.lock);
1425             copy_tlb_helper_locked(&tmptlb, tlb);
1426             copy_tlb_helper_locked(tlb, vtlb);
1427             copy_tlb_helper_locked(vtlb, &tmptlb);
1428             qemu_spin_unlock(&cpu->neg.tlb.c.lock);
1429 
1430             CPUTLBEntryFull *f1 = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1431             CPUTLBEntryFull *f2 = &cpu->neg.tlb.d[mmu_idx].vfulltlb[vidx];
1432             CPUTLBEntryFull tmpf;
1433             tmpf = *f1; *f1 = *f2; *f2 = tmpf;
1434             return true;
1435         }
1436     }
1437     return false;
1438 }
1439 
1440 static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1441                            CPUTLBEntryFull *full, uintptr_t retaddr)
1442 {
1443     ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
1444 
1445     trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1446 
1447     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1448         tb_invalidate_phys_range_fast(ram_addr, size, retaddr);
1449     }
1450 
1451     /*
1452      * Set both VGA and migration bits for simplicity and to remove
1453      * the notdirty callback faster.
1454      */
1455     cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1456 
1457     /* We remove the notdirty callback only if the code has been flushed. */
1458     if (!cpu_physical_memory_is_clean(ram_addr)) {
1459         trace_memory_notdirty_set_dirty(mem_vaddr);
1460         tlb_set_dirty(cpu, mem_vaddr);
1461     }
1462 }
1463 
1464 static int probe_access_internal(CPUState *cpu, vaddr addr,
1465                                  int fault_size, MMUAccessType access_type,
1466                                  int mmu_idx, bool nonfault,
1467                                  void **phost, CPUTLBEntryFull **pfull,
1468                                  uintptr_t retaddr, bool check_mem_cbs)
1469 {
1470     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1471     CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
1472     uint64_t tlb_addr = tlb_read_idx(entry, access_type);
1473     vaddr page_addr = addr & TARGET_PAGE_MASK;
1474     int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
1475     bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(cpu);
1476     CPUTLBEntryFull *full;
1477 
1478     if (!tlb_hit_page(tlb_addr, page_addr)) {
1479         if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) {
1480             if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
1481                                             mmu_idx, nonfault, retaddr)) {
1482                 /* Non-faulting page table read failed.  */
1483                 *phost = NULL;
1484                 *pfull = NULL;
1485                 return TLB_INVALID_MASK;
1486             }
1487 
1488             /* TLB resize via tlb_fill may have moved the entry.  */
1489             index = tlb_index(cpu, mmu_idx, addr);
1490             entry = tlb_entry(cpu, mmu_idx, addr);
1491 
1492             /*
1493              * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
1494              * to force the next access through tlb_fill.  We've just
1495              * called tlb_fill, so we know that this entry *is* valid.
1496              */
1497             flags &= ~TLB_INVALID_MASK;
1498         }
1499         tlb_addr = tlb_read_idx(entry, access_type);
1500     }
1501     flags &= tlb_addr;
1502 
1503     *pfull = full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1504     flags |= full->slow_flags[access_type];
1505 
1506     /* Fold all "mmio-like" bits into TLB_MMIO.  This is not RAM.  */
1507     if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY | TLB_CHECK_ALIGNED))
1508         || (access_type != MMU_INST_FETCH && force_mmio)) {
1509         *phost = NULL;
1510         return TLB_MMIO;
1511     }
1512 
1513     /* Everything else is RAM. */
1514     *phost = (void *)((uintptr_t)addr + entry->addend);
1515     return flags;
1516 }
1517 
1518 int probe_access_full(CPUArchState *env, vaddr addr, int size,
1519                       MMUAccessType access_type, int mmu_idx,
1520                       bool nonfault, void **phost, CPUTLBEntryFull **pfull,
1521                       uintptr_t retaddr)
1522 {
1523     int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1524                                       mmu_idx, nonfault, phost, pfull, retaddr,
1525                                       true);
1526 
1527     /* Handle clean RAM pages.  */
1528     if (unlikely(flags & TLB_NOTDIRTY)) {
1529         int dirtysize = size == 0 ? 1 : size;
1530         notdirty_write(env_cpu(env), addr, dirtysize, *pfull, retaddr);
1531         flags &= ~TLB_NOTDIRTY;
1532     }
1533 
1534     return flags;
1535 }
1536 
1537 int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
1538                           MMUAccessType access_type, int mmu_idx,
1539                           void **phost, CPUTLBEntryFull **pfull)
1540 {
1541     void *discard_phost;
1542     CPUTLBEntryFull *discard_tlb;
1543 
1544     /* privately handle users that don't need full results */
1545     phost = phost ? phost : &discard_phost;
1546     pfull = pfull ? pfull : &discard_tlb;
1547 
1548     int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1549                                       mmu_idx, true, phost, pfull, 0, false);
1550 
1551     /* Handle clean RAM pages.  */
1552     if (unlikely(flags & TLB_NOTDIRTY)) {
1553         int dirtysize = size == 0 ? 1 : size;
1554         notdirty_write(env_cpu(env), addr, dirtysize, *pfull, 0);
1555         flags &= ~TLB_NOTDIRTY;
1556     }
1557 
1558     return flags;
1559 }
1560 
1561 int probe_access_flags(CPUArchState *env, vaddr addr, int size,
1562                        MMUAccessType access_type, int mmu_idx,
1563                        bool nonfault, void **phost, uintptr_t retaddr)
1564 {
1565     CPUTLBEntryFull *full;
1566     int flags;
1567 
1568     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1569 
1570     flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1571                                   mmu_idx, nonfault, phost, &full, retaddr,
1572                                   true);
1573 
1574     /* Handle clean RAM pages. */
1575     if (unlikely(flags & TLB_NOTDIRTY)) {
1576         int dirtysize = size == 0 ? 1 : size;
1577         notdirty_write(env_cpu(env), addr, dirtysize, full, retaddr);
1578         flags &= ~TLB_NOTDIRTY;
1579     }
1580 
1581     return flags;
1582 }
1583 
1584 void *probe_access(CPUArchState *env, vaddr addr, int size,
1585                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1586 {
1587     CPUTLBEntryFull *full;
1588     void *host;
1589     int flags;
1590 
1591     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1592 
1593     flags = probe_access_internal(env_cpu(env), addr, size, access_type,
1594                                   mmu_idx, false, &host, &full, retaddr,
1595                                   true);
1596 
1597     /* Per the interface, size == 0 merely faults the access. */
1598     if (size == 0) {
1599         return NULL;
1600     }
1601 
1602     if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
1603         /* Handle watchpoints.  */
1604         if (flags & TLB_WATCHPOINT) {
1605             int wp_access = (access_type == MMU_DATA_STORE
1606                              ? BP_MEM_WRITE : BP_MEM_READ);
1607             cpu_check_watchpoint(env_cpu(env), addr, size,
1608                                  full->attrs, wp_access, retaddr);
1609         }
1610 
1611         /* Handle clean RAM pages.  */
1612         if (flags & TLB_NOTDIRTY) {
1613             notdirty_write(env_cpu(env), addr, size, full, retaddr);
1614         }
1615     }
1616 
1617     return host;
1618 }
1619 
1620 void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1621                         MMUAccessType access_type, int mmu_idx)
1622 {
1623     CPUTLBEntryFull *full;
1624     void *host;
1625     int flags;
1626 
1627     flags = probe_access_internal(env_cpu(env), addr, 0, access_type,
1628                                   mmu_idx, true, &host, &full, 0, false);
1629 
1630     /* No combination of flags are expected by the caller. */
1631     return flags ? NULL : host;
1632 }
1633 
1634 /*
1635  * Return a ram_addr_t for the virtual address for execution.
1636  *
1637  * Return -1 if we can't translate and execute from an entire page
1638  * of RAM.  This will force us to execute by loading and translating
1639  * one insn at a time, without caching.
1640  *
1641  * NOTE: This function will trigger an exception if the page is
1642  * not executable.
1643  */
1644 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
1645                                         void **hostp)
1646 {
1647     CPUTLBEntryFull *full;
1648     void *p;
1649 
1650     (void)probe_access_internal(env_cpu(env), addr, 1, MMU_INST_FETCH,
1651                                 cpu_mmu_index(env_cpu(env), true), false,
1652                                 &p, &full, 0, false);
1653     if (p == NULL) {
1654         return -1;
1655     }
1656 
1657     if (full->lg_page_size < TARGET_PAGE_BITS) {
1658         return -1;
1659     }
1660 
1661     if (hostp) {
1662         *hostp = p;
1663     }
1664     return qemu_ram_addr_from_host_nofail(p);
1665 }
1666 
1667 /* Load/store with atomicity primitives. */
1668 #include "ldst_atomicity.c.inc"
1669 
1670 #ifdef CONFIG_PLUGIN
1671 /*
1672  * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1673  * This should be a hot path as we will have just looked this path up
1674  * in the softmmu lookup code (or helper). We don't handle re-fills or
1675  * checking the victim table. This is purely informational.
1676  *
1677  * The one corner case is i/o write, which can cause changes to the
1678  * address space.  Those changes, and the corresponding tlb flush,
1679  * should be delayed until the next TB, so even then this ought not fail.
1680  * But check, Just in Case.
1681  */
1682 bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
1683                        bool is_store, struct qemu_plugin_hwaddr *data)
1684 {
1685     CPUTLBEntry *tlbe = tlb_entry(cpu, mmu_idx, addr);
1686     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1687     MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD;
1688     uint64_t tlb_addr = tlb_read_idx(tlbe, access_type);
1689     CPUTLBEntryFull *full;
1690 
1691     if (unlikely(!tlb_hit(tlb_addr, addr))) {
1692         return false;
1693     }
1694 
1695     full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1696     data->phys_addr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
1697 
1698     /* We must have an iotlb entry for MMIO */
1699     if (tlb_addr & TLB_MMIO) {
1700         MemoryRegionSection *section =
1701             iotlb_to_section(cpu, full->xlat_section & ~TARGET_PAGE_MASK,
1702                              full->attrs);
1703         data->is_io = true;
1704         data->mr = section->mr;
1705     } else {
1706         data->is_io = false;
1707         data->mr = NULL;
1708     }
1709     return true;
1710 }
1711 #endif
1712 
1713 /*
1714  * Probe for a load/store operation.
1715  * Return the host address and into @flags.
1716  */
1717 
1718 typedef struct MMULookupPageData {
1719     CPUTLBEntryFull *full;
1720     void *haddr;
1721     vaddr addr;
1722     int flags;
1723     int size;
1724 } MMULookupPageData;
1725 
1726 typedef struct MMULookupLocals {
1727     MMULookupPageData page[2];
1728     MemOp memop;
1729     int mmu_idx;
1730 } MMULookupLocals;
1731 
1732 /**
1733  * mmu_lookup1: translate one page
1734  * @cpu: generic cpu state
1735  * @data: lookup parameters
1736  * @mmu_idx: virtual address context
1737  * @access_type: load/store/code
1738  * @ra: return address into tcg generated code, or 0
1739  *
1740  * Resolve the translation for the one page at @data.addr, filling in
1741  * the rest of @data with the results.  If the translation fails,
1742  * tlb_fill will longjmp out.  Return true if the softmmu tlb for
1743  * @mmu_idx may have resized.
1744  */
1745 static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
1746                         int mmu_idx, MMUAccessType access_type, uintptr_t ra)
1747 {
1748     vaddr addr = data->addr;
1749     uintptr_t index = tlb_index(cpu, mmu_idx, addr);
1750     CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
1751     uint64_t tlb_addr = tlb_read_idx(entry, access_type);
1752     bool maybe_resized = false;
1753     CPUTLBEntryFull *full;
1754     int flags;
1755 
1756     /* If the TLB entry is for a different page, reload and try again.  */
1757     if (!tlb_hit(tlb_addr, addr)) {
1758         if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
1759                             addr & TARGET_PAGE_MASK)) {
1760             tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra);
1761             maybe_resized = true;
1762             index = tlb_index(cpu, mmu_idx, addr);
1763             entry = tlb_entry(cpu, mmu_idx, addr);
1764         }
1765         tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
1766     }
1767 
1768     full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1769     flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
1770     flags |= full->slow_flags[access_type];
1771 
1772     data->full = full;
1773     data->flags = flags;
1774     /* Compute haddr speculatively; depending on flags it might be invalid. */
1775     data->haddr = (void *)((uintptr_t)addr + entry->addend);
1776 
1777     return maybe_resized;
1778 }
1779 
1780 /**
1781  * mmu_watch_or_dirty
1782  * @cpu: generic cpu state
1783  * @data: lookup parameters
1784  * @access_type: load/store/code
1785  * @ra: return address into tcg generated code, or 0
1786  *
1787  * Trigger watchpoints for @data.addr:@data.size;
1788  * record writes to protected clean pages.
1789  */
1790 static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data,
1791                                MMUAccessType access_type, uintptr_t ra)
1792 {
1793     CPUTLBEntryFull *full = data->full;
1794     vaddr addr = data->addr;
1795     int flags = data->flags;
1796     int size = data->size;
1797 
1798     /* On watchpoint hit, this will longjmp out.  */
1799     if (flags & TLB_WATCHPOINT) {
1800         int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ;
1801         cpu_check_watchpoint(cpu, addr, size, full->attrs, wp, ra);
1802         flags &= ~TLB_WATCHPOINT;
1803     }
1804 
1805     /* Note that notdirty is only set for writes. */
1806     if (flags & TLB_NOTDIRTY) {
1807         notdirty_write(cpu, addr, size, full, ra);
1808         flags &= ~TLB_NOTDIRTY;
1809     }
1810     data->flags = flags;
1811 }
1812 
1813 /**
1814  * mmu_lookup: translate page(s)
1815  * @cpu: generic cpu state
1816  * @addr: virtual address
1817  * @oi: combined mmu_idx and MemOp
1818  * @ra: return address into tcg generated code, or 0
1819  * @access_type: load/store/code
1820  * @l: output result
1821  *
1822  * Resolve the translation for the page(s) beginning at @addr, for MemOp.size
1823  * bytes.  Return true if the lookup crosses a page boundary.
1824  */
1825 static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
1826                        uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
1827 {
1828     unsigned a_bits;
1829     bool crosspage;
1830     int flags;
1831 
1832     l->memop = get_memop(oi);
1833     l->mmu_idx = get_mmuidx(oi);
1834 
1835     tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
1836 
1837     /* Handle CPU specific unaligned behaviour */
1838     a_bits = get_alignment_bits(l->memop);
1839     if (addr & ((1 << a_bits) - 1)) {
1840         cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
1841     }
1842 
1843     l->page[0].addr = addr;
1844     l->page[0].size = memop_size(l->memop);
1845     l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
1846     l->page[1].size = 0;
1847     crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
1848 
1849     if (likely(!crosspage)) {
1850         mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
1851 
1852         flags = l->page[0].flags;
1853         if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1854             mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
1855         }
1856         if (unlikely(flags & TLB_BSWAP)) {
1857             l->memop ^= MO_BSWAP;
1858         }
1859     } else {
1860         /* Finish compute of page crossing. */
1861         int size0 = l->page[1].addr - addr;
1862         l->page[1].size = l->page[0].size - size0;
1863         l->page[0].size = size0;
1864 
1865         /*
1866          * Lookup both pages, recognizing exceptions from either.  If the
1867          * second lookup potentially resized, refresh first CPUTLBEntryFull.
1868          */
1869         mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
1870         if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) {
1871             uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
1872             l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
1873         }
1874 
1875         flags = l->page[0].flags | l->page[1].flags;
1876         if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1877             mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
1878             mmu_watch_or_dirty(cpu, &l->page[1], type, ra);
1879         }
1880 
1881         /*
1882          * Since target/sparc is the only user of TLB_BSWAP, and all
1883          * Sparc accesses are aligned, any treatment across two pages
1884          * would be arbitrary.  Refuse it until there's a use.
1885          */
1886         tcg_debug_assert((flags & TLB_BSWAP) == 0);
1887     }
1888 
1889     /*
1890      * This alignment check differs from the one above, in that this is
1891      * based on the atomicity of the operation. The intended use case is
1892      * the ARM memory type field of each PTE, where access to pages with
1893      * Device memory type require alignment.
1894      */
1895     if (unlikely(flags & TLB_CHECK_ALIGNED)) {
1896         MemOp size = l->memop & MO_SIZE;
1897 
1898         switch (l->memop & MO_ATOM_MASK) {
1899         case MO_ATOM_NONE:
1900             size = MO_8;
1901             break;
1902         case MO_ATOM_IFALIGN_PAIR:
1903         case MO_ATOM_WITHIN16_PAIR:
1904             size = size ? size - 1 : 0;
1905             break;
1906         default:
1907             break;
1908         }
1909         if (addr & ((1 << size) - 1)) {
1910             cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
1911         }
1912     }
1913 
1914     return crosspage;
1915 }
1916 
1917 /*
1918  * Probe for an atomic operation.  Do not allow unaligned operations,
1919  * or io operations to proceed.  Return the host address.
1920  */
1921 static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
1922                                int size, uintptr_t retaddr)
1923 {
1924     uintptr_t mmu_idx = get_mmuidx(oi);
1925     MemOp mop = get_memop(oi);
1926     int a_bits = get_alignment_bits(mop);
1927     uintptr_t index;
1928     CPUTLBEntry *tlbe;
1929     vaddr tlb_addr;
1930     void *hostaddr;
1931     CPUTLBEntryFull *full;
1932 
1933     tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1934 
1935     /* Adjust the given return address.  */
1936     retaddr -= GETPC_ADJ;
1937 
1938     /* Enforce guest required alignment.  */
1939     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1940         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1941         cpu_unaligned_access(cpu, addr, MMU_DATA_STORE,
1942                              mmu_idx, retaddr);
1943     }
1944 
1945     /* Enforce qemu required alignment.  */
1946     if (unlikely(addr & (size - 1))) {
1947         /* We get here if guest alignment was not requested,
1948            or was not enforced by cpu_unaligned_access above.
1949            We might widen the access and emulate, but for now
1950            mark an exception and exit the cpu loop.  */
1951         goto stop_the_world;
1952     }
1953 
1954     index = tlb_index(cpu, mmu_idx, addr);
1955     tlbe = tlb_entry(cpu, mmu_idx, addr);
1956 
1957     /* Check TLB entry and enforce page permissions.  */
1958     tlb_addr = tlb_addr_write(tlbe);
1959     if (!tlb_hit(tlb_addr, addr)) {
1960         if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
1961                             addr & TARGET_PAGE_MASK)) {
1962             tlb_fill(cpu, addr, size,
1963                      MMU_DATA_STORE, mmu_idx, retaddr);
1964             index = tlb_index(cpu, mmu_idx, addr);
1965             tlbe = tlb_entry(cpu, mmu_idx, addr);
1966         }
1967         tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1968     }
1969 
1970     /*
1971      * Let the guest notice RMW on a write-only page.
1972      * We have just verified that the page is writable.
1973      * Subpage lookups may have left TLB_INVALID_MASK set,
1974      * but addr_read will only be -1 if PAGE_READ was unset.
1975      */
1976     if (unlikely(tlbe->addr_read == -1)) {
1977         tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
1978         /*
1979          * Since we don't support reads and writes to different
1980          * addresses, and we do have the proper page loaded for
1981          * write, this shouldn't ever return.  But just in case,
1982          * handle via stop-the-world.
1983          */
1984         goto stop_the_world;
1985     }
1986     /* Collect tlb flags for read. */
1987     tlb_addr |= tlbe->addr_read;
1988 
1989     /* Notice an IO access or a needs-MMU-lookup access */
1990     if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) {
1991         /* There's really nothing that can be done to
1992            support this apart from stop-the-world.  */
1993         goto stop_the_world;
1994     }
1995 
1996     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1997     full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
1998 
1999     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
2000         notdirty_write(cpu, addr, size, full, retaddr);
2001     }
2002 
2003     if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
2004         int wp_flags = 0;
2005 
2006         if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) {
2007             wp_flags |= BP_MEM_WRITE;
2008         }
2009         if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) {
2010             wp_flags |= BP_MEM_READ;
2011         }
2012         if (wp_flags) {
2013             cpu_check_watchpoint(cpu, addr, size,
2014                                  full->attrs, wp_flags, retaddr);
2015         }
2016     }
2017 
2018     return hostaddr;
2019 
2020  stop_the_world:
2021     cpu_loop_exit_atomic(cpu, retaddr);
2022 }
2023 
2024 /*
2025  * Load Helpers
2026  *
2027  * We support two different access types. SOFTMMU_CODE_ACCESS is
2028  * specifically for reading instructions from system memory. It is
2029  * called by the translation loop and in some helpers where the code
2030  * is disassembled. It shouldn't be called directly by guest code.
2031  *
2032  * For the benefit of TCG generated code, we want to avoid the
2033  * complication of ABI-specific return type promotion and always
2034  * return a value extended to the register size of the host. This is
2035  * tcg_target_long, except in the case of a 32-bit host and 64-bit
2036  * data, and for that we always have uint64_t.
2037  *
2038  * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
2039  */
2040 
2041 /**
2042  * do_ld_mmio_beN:
2043  * @cpu: generic cpu state
2044  * @full: page parameters
2045  * @ret_be: accumulated data
2046  * @addr: virtual address
2047  * @size: number of bytes
2048  * @mmu_idx: virtual address context
2049  * @ra: return address into tcg generated code, or 0
2050  * Context: BQL held
2051  *
2052  * Load @size bytes from @addr, which is memory-mapped i/o.
2053  * The bytes are concatenated in big-endian order with @ret_be.
2054  */
2055 static uint64_t int_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
2056                                 uint64_t ret_be, vaddr addr, int size,
2057                                 int mmu_idx, MMUAccessType type, uintptr_t ra,
2058                                 MemoryRegion *mr, hwaddr mr_offset)
2059 {
2060     do {
2061         MemOp this_mop;
2062         unsigned this_size;
2063         uint64_t val;
2064         MemTxResult r;
2065 
2066         /* Read aligned pieces up to 8 bytes. */
2067         this_mop = ctz32(size | (int)addr | 8);
2068         this_size = 1 << this_mop;
2069         this_mop |= MO_BE;
2070 
2071         r = memory_region_dispatch_read(mr, mr_offset, &val,
2072                                         this_mop, full->attrs);
2073         if (unlikely(r != MEMTX_OK)) {
2074             io_failed(cpu, full, addr, this_size, type, mmu_idx, r, ra);
2075         }
2076         if (this_size == 8) {
2077             return val;
2078         }
2079 
2080         ret_be = (ret_be << (this_size * 8)) | val;
2081         addr += this_size;
2082         mr_offset += this_size;
2083         size -= this_size;
2084     } while (size);
2085 
2086     return ret_be;
2087 }
2088 
2089 static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
2090                                uint64_t ret_be, vaddr addr, int size,
2091                                int mmu_idx, MMUAccessType type, uintptr_t ra)
2092 {
2093     MemoryRegionSection *section;
2094     MemoryRegion *mr;
2095     hwaddr mr_offset;
2096     MemTxAttrs attrs;
2097 
2098     tcg_debug_assert(size > 0 && size <= 8);
2099 
2100     attrs = full->attrs;
2101     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2102     mr = section->mr;
2103 
2104     BQL_LOCK_GUARD();
2105     return int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
2106                            type, ra, mr, mr_offset);
2107 }
2108 
2109 static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
2110                                uint64_t ret_be, vaddr addr, int size,
2111                                int mmu_idx, uintptr_t ra)
2112 {
2113     MemoryRegionSection *section;
2114     MemoryRegion *mr;
2115     hwaddr mr_offset;
2116     MemTxAttrs attrs;
2117     uint64_t a, b;
2118 
2119     tcg_debug_assert(size > 8 && size <= 16);
2120 
2121     attrs = full->attrs;
2122     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2123     mr = section->mr;
2124 
2125     BQL_LOCK_GUARD();
2126     a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
2127                         MMU_DATA_LOAD, ra, mr, mr_offset);
2128     b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
2129                         MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
2130     return int128_make128(b, a);
2131 }
2132 
2133 /**
2134  * do_ld_bytes_beN
2135  * @p: translation parameters
2136  * @ret_be: accumulated data
2137  *
2138  * Load @p->size bytes from @p->haddr, which is RAM.
2139  * The bytes to concatenated in big-endian order with @ret_be.
2140  */
2141 static uint64_t do_ld_bytes_beN(MMULookupPageData *p, uint64_t ret_be)
2142 {
2143     uint8_t *haddr = p->haddr;
2144     int i, size = p->size;
2145 
2146     for (i = 0; i < size; i++) {
2147         ret_be = (ret_be << 8) | haddr[i];
2148     }
2149     return ret_be;
2150 }
2151 
2152 /**
2153  * do_ld_parts_beN
2154  * @p: translation parameters
2155  * @ret_be: accumulated data
2156  *
2157  * As do_ld_bytes_beN, but atomically on each aligned part.
2158  */
2159 static uint64_t do_ld_parts_beN(MMULookupPageData *p, uint64_t ret_be)
2160 {
2161     void *haddr = p->haddr;
2162     int size = p->size;
2163 
2164     do {
2165         uint64_t x;
2166         int n;
2167 
2168         /*
2169          * Find minimum of alignment and size.
2170          * This is slightly stronger than required by MO_ATOM_SUBALIGN, which
2171          * would have only checked the low bits of addr|size once at the start,
2172          * but is just as easy.
2173          */
2174         switch (((uintptr_t)haddr | size) & 7) {
2175         case 4:
2176             x = cpu_to_be32(load_atomic4(haddr));
2177             ret_be = (ret_be << 32) | x;
2178             n = 4;
2179             break;
2180         case 2:
2181         case 6:
2182             x = cpu_to_be16(load_atomic2(haddr));
2183             ret_be = (ret_be << 16) | x;
2184             n = 2;
2185             break;
2186         default:
2187             x = *(uint8_t *)haddr;
2188             ret_be = (ret_be << 8) | x;
2189             n = 1;
2190             break;
2191         case 0:
2192             g_assert_not_reached();
2193         }
2194         haddr += n;
2195         size -= n;
2196     } while (size != 0);
2197     return ret_be;
2198 }
2199 
2200 /**
2201  * do_ld_parts_be4
2202  * @p: translation parameters
2203  * @ret_be: accumulated data
2204  *
2205  * As do_ld_bytes_beN, but with one atomic load.
2206  * Four aligned bytes are guaranteed to cover the load.
2207  */
2208 static uint64_t do_ld_whole_be4(MMULookupPageData *p, uint64_t ret_be)
2209 {
2210     int o = p->addr & 3;
2211     uint32_t x = load_atomic4(p->haddr - o);
2212 
2213     x = cpu_to_be32(x);
2214     x <<= o * 8;
2215     x >>= (4 - p->size) * 8;
2216     return (ret_be << (p->size * 8)) | x;
2217 }
2218 
2219 /**
2220  * do_ld_parts_be8
2221  * @p: translation parameters
2222  * @ret_be: accumulated data
2223  *
2224  * As do_ld_bytes_beN, but with one atomic load.
2225  * Eight aligned bytes are guaranteed to cover the load.
2226  */
2227 static uint64_t do_ld_whole_be8(CPUState *cpu, uintptr_t ra,
2228                                 MMULookupPageData *p, uint64_t ret_be)
2229 {
2230     int o = p->addr & 7;
2231     uint64_t x = load_atomic8_or_exit(cpu, ra, p->haddr - o);
2232 
2233     x = cpu_to_be64(x);
2234     x <<= o * 8;
2235     x >>= (8 - p->size) * 8;
2236     return (ret_be << (p->size * 8)) | x;
2237 }
2238 
2239 /**
2240  * do_ld_parts_be16
2241  * @p: translation parameters
2242  * @ret_be: accumulated data
2243  *
2244  * As do_ld_bytes_beN, but with one atomic load.
2245  * 16 aligned bytes are guaranteed to cover the load.
2246  */
2247 static Int128 do_ld_whole_be16(CPUState *cpu, uintptr_t ra,
2248                                MMULookupPageData *p, uint64_t ret_be)
2249 {
2250     int o = p->addr & 15;
2251     Int128 x, y = load_atomic16_or_exit(cpu, ra, p->haddr - o);
2252     int size = p->size;
2253 
2254     if (!HOST_BIG_ENDIAN) {
2255         y = bswap128(y);
2256     }
2257     y = int128_lshift(y, o * 8);
2258     y = int128_urshift(y, (16 - size) * 8);
2259     x = int128_make64(ret_be);
2260     x = int128_lshift(x, size * 8);
2261     return int128_or(x, y);
2262 }
2263 
2264 /*
2265  * Wrapper for the above.
2266  */
2267 static uint64_t do_ld_beN(CPUState *cpu, MMULookupPageData *p,
2268                           uint64_t ret_be, int mmu_idx, MMUAccessType type,
2269                           MemOp mop, uintptr_t ra)
2270 {
2271     MemOp atom;
2272     unsigned tmp, half_size;
2273 
2274     if (unlikely(p->flags & TLB_MMIO)) {
2275         return do_ld_mmio_beN(cpu, p->full, ret_be, p->addr, p->size,
2276                               mmu_idx, type, ra);
2277     }
2278 
2279     /*
2280      * It is a given that we cross a page and therefore there is no
2281      * atomicity for the load as a whole, but subobjects may need attention.
2282      */
2283     atom = mop & MO_ATOM_MASK;
2284     switch (atom) {
2285     case MO_ATOM_SUBALIGN:
2286         return do_ld_parts_beN(p, ret_be);
2287 
2288     case MO_ATOM_IFALIGN_PAIR:
2289     case MO_ATOM_WITHIN16_PAIR:
2290         tmp = mop & MO_SIZE;
2291         tmp = tmp ? tmp - 1 : 0;
2292         half_size = 1 << tmp;
2293         if (atom == MO_ATOM_IFALIGN_PAIR
2294             ? p->size == half_size
2295             : p->size >= half_size) {
2296             if (!HAVE_al8_fast && p->size < 4) {
2297                 return do_ld_whole_be4(p, ret_be);
2298             } else {
2299                 return do_ld_whole_be8(cpu, ra, p, ret_be);
2300             }
2301         }
2302         /* fall through */
2303 
2304     case MO_ATOM_IFALIGN:
2305     case MO_ATOM_WITHIN16:
2306     case MO_ATOM_NONE:
2307         return do_ld_bytes_beN(p, ret_be);
2308 
2309     default:
2310         g_assert_not_reached();
2311     }
2312 }
2313 
2314 /*
2315  * Wrapper for the above, for 8 < size < 16.
2316  */
2317 static Int128 do_ld16_beN(CPUState *cpu, MMULookupPageData *p,
2318                           uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra)
2319 {
2320     int size = p->size;
2321     uint64_t b;
2322     MemOp atom;
2323 
2324     if (unlikely(p->flags & TLB_MMIO)) {
2325         return do_ld16_mmio_beN(cpu, p->full, a, p->addr, size, mmu_idx, ra);
2326     }
2327 
2328     /*
2329      * It is a given that we cross a page and therefore there is no
2330      * atomicity for the load as a whole, but subobjects may need attention.
2331      */
2332     atom = mop & MO_ATOM_MASK;
2333     switch (atom) {
2334     case MO_ATOM_SUBALIGN:
2335         p->size = size - 8;
2336         a = do_ld_parts_beN(p, a);
2337         p->haddr += size - 8;
2338         p->size = 8;
2339         b = do_ld_parts_beN(p, 0);
2340         break;
2341 
2342     case MO_ATOM_WITHIN16_PAIR:
2343         /* Since size > 8, this is the half that must be atomic. */
2344         return do_ld_whole_be16(cpu, ra, p, a);
2345 
2346     case MO_ATOM_IFALIGN_PAIR:
2347         /*
2348          * Since size > 8, both halves are misaligned,
2349          * and so neither is atomic.
2350          */
2351     case MO_ATOM_IFALIGN:
2352     case MO_ATOM_WITHIN16:
2353     case MO_ATOM_NONE:
2354         p->size = size - 8;
2355         a = do_ld_bytes_beN(p, a);
2356         b = ldq_be_p(p->haddr + size - 8);
2357         break;
2358 
2359     default:
2360         g_assert_not_reached();
2361     }
2362 
2363     return int128_make128(b, a);
2364 }
2365 
2366 static uint8_t do_ld_1(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2367                        MMUAccessType type, uintptr_t ra)
2368 {
2369     if (unlikely(p->flags & TLB_MMIO)) {
2370         return do_ld_mmio_beN(cpu, p->full, 0, p->addr, 1, mmu_idx, type, ra);
2371     } else {
2372         return *(uint8_t *)p->haddr;
2373     }
2374 }
2375 
2376 static uint16_t do_ld_2(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2377                         MMUAccessType type, MemOp memop, uintptr_t ra)
2378 {
2379     uint16_t ret;
2380 
2381     if (unlikely(p->flags & TLB_MMIO)) {
2382         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 2, mmu_idx, type, ra);
2383         if ((memop & MO_BSWAP) == MO_LE) {
2384             ret = bswap16(ret);
2385         }
2386     } else {
2387         /* Perform the load host endian, then swap if necessary. */
2388         ret = load_atom_2(cpu, ra, p->haddr, memop);
2389         if (memop & MO_BSWAP) {
2390             ret = bswap16(ret);
2391         }
2392     }
2393     return ret;
2394 }
2395 
2396 static uint32_t do_ld_4(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2397                         MMUAccessType type, MemOp memop, uintptr_t ra)
2398 {
2399     uint32_t ret;
2400 
2401     if (unlikely(p->flags & TLB_MMIO)) {
2402         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 4, mmu_idx, type, ra);
2403         if ((memop & MO_BSWAP) == MO_LE) {
2404             ret = bswap32(ret);
2405         }
2406     } else {
2407         /* Perform the load host endian. */
2408         ret = load_atom_4(cpu, ra, p->haddr, memop);
2409         if (memop & MO_BSWAP) {
2410             ret = bswap32(ret);
2411         }
2412     }
2413     return ret;
2414 }
2415 
2416 static uint64_t do_ld_8(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
2417                         MMUAccessType type, MemOp memop, uintptr_t ra)
2418 {
2419     uint64_t ret;
2420 
2421     if (unlikely(p->flags & TLB_MMIO)) {
2422         ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 8, mmu_idx, type, ra);
2423         if ((memop & MO_BSWAP) == MO_LE) {
2424             ret = bswap64(ret);
2425         }
2426     } else {
2427         /* Perform the load host endian. */
2428         ret = load_atom_8(cpu, ra, p->haddr, memop);
2429         if (memop & MO_BSWAP) {
2430             ret = bswap64(ret);
2431         }
2432     }
2433     return ret;
2434 }
2435 
2436 static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2437                           uintptr_t ra, MMUAccessType access_type)
2438 {
2439     MMULookupLocals l;
2440     bool crosspage;
2441 
2442     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2443     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2444     tcg_debug_assert(!crosspage);
2445 
2446     return do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
2447 }
2448 
2449 static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2450                            uintptr_t ra, MMUAccessType access_type)
2451 {
2452     MMULookupLocals l;
2453     bool crosspage;
2454     uint16_t ret;
2455     uint8_t a, b;
2456 
2457     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2458     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2459     if (likely(!crosspage)) {
2460         return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2461     }
2462 
2463     a = do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
2464     b = do_ld_1(cpu, &l.page[1], l.mmu_idx, access_type, ra);
2465 
2466     if ((l.memop & MO_BSWAP) == MO_LE) {
2467         ret = a | (b << 8);
2468     } else {
2469         ret = b | (a << 8);
2470     }
2471     return ret;
2472 }
2473 
2474 static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2475                            uintptr_t ra, MMUAccessType access_type)
2476 {
2477     MMULookupLocals l;
2478     bool crosspage;
2479     uint32_t ret;
2480 
2481     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2482     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2483     if (likely(!crosspage)) {
2484         return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2485     }
2486 
2487     ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2488     ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
2489     if ((l.memop & MO_BSWAP) == MO_LE) {
2490         ret = bswap32(ret);
2491     }
2492     return ret;
2493 }
2494 
2495 static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
2496                            uintptr_t ra, MMUAccessType access_type)
2497 {
2498     MMULookupLocals l;
2499     bool crosspage;
2500     uint64_t ret;
2501 
2502     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2503     crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
2504     if (likely(!crosspage)) {
2505         return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2506     }
2507 
2508     ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2509     ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
2510     if ((l.memop & MO_BSWAP) == MO_LE) {
2511         ret = bswap64(ret);
2512     }
2513     return ret;
2514 }
2515 
2516 static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
2517                           MemOpIdx oi, uintptr_t ra)
2518 {
2519     MMULookupLocals l;
2520     bool crosspage;
2521     uint64_t a, b;
2522     Int128 ret;
2523     int first;
2524 
2525     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2526     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l);
2527     if (likely(!crosspage)) {
2528         if (unlikely(l.page[0].flags & TLB_MMIO)) {
2529             ret = do_ld16_mmio_beN(cpu, l.page[0].full, 0, addr, 16,
2530                                    l.mmu_idx, ra);
2531             if ((l.memop & MO_BSWAP) == MO_LE) {
2532                 ret = bswap128(ret);
2533             }
2534         } else {
2535             /* Perform the load host endian. */
2536             ret = load_atom_16(cpu, ra, l.page[0].haddr, l.memop);
2537             if (l.memop & MO_BSWAP) {
2538                 ret = bswap128(ret);
2539             }
2540         }
2541         return ret;
2542     }
2543 
2544     first = l.page[0].size;
2545     if (first == 8) {
2546         MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64;
2547 
2548         a = do_ld_8(cpu, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
2549         b = do_ld_8(cpu, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
2550         if ((mop8 & MO_BSWAP) == MO_LE) {
2551             ret = int128_make128(a, b);
2552         } else {
2553             ret = int128_make128(b, a);
2554         }
2555         return ret;
2556     }
2557 
2558     if (first < 8) {
2559         a = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx,
2560                       MMU_DATA_LOAD, l.memop, ra);
2561         ret = do_ld16_beN(cpu, &l.page[1], a, l.mmu_idx, l.memop, ra);
2562     } else {
2563         ret = do_ld16_beN(cpu, &l.page[0], 0, l.mmu_idx, l.memop, ra);
2564         b = int128_getlo(ret);
2565         ret = int128_lshift(ret, l.page[1].size * 8);
2566         a = int128_gethi(ret);
2567         b = do_ld_beN(cpu, &l.page[1], b, l.mmu_idx,
2568                       MMU_DATA_LOAD, l.memop, ra);
2569         ret = int128_make128(b, a);
2570     }
2571     if ((l.memop & MO_BSWAP) == MO_LE) {
2572         ret = bswap128(ret);
2573     }
2574     return ret;
2575 }
2576 
2577 /*
2578  * Store Helpers
2579  */
2580 
2581 /**
2582  * do_st_mmio_leN:
2583  * @cpu: generic cpu state
2584  * @full: page parameters
2585  * @val_le: data to store
2586  * @addr: virtual address
2587  * @size: number of bytes
2588  * @mmu_idx: virtual address context
2589  * @ra: return address into tcg generated code, or 0
2590  * Context: BQL held
2591  *
2592  * Store @size bytes at @addr, which is memory-mapped i/o.
2593  * The bytes to store are extracted in little-endian order from @val_le;
2594  * return the bytes of @val_le beyond @p->size that have not been stored.
2595  */
2596 static uint64_t int_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
2597                                 uint64_t val_le, vaddr addr, int size,
2598                                 int mmu_idx, uintptr_t ra,
2599                                 MemoryRegion *mr, hwaddr mr_offset)
2600 {
2601     do {
2602         MemOp this_mop;
2603         unsigned this_size;
2604         MemTxResult r;
2605 
2606         /* Store aligned pieces up to 8 bytes. */
2607         this_mop = ctz32(size | (int)addr | 8);
2608         this_size = 1 << this_mop;
2609         this_mop |= MO_LE;
2610 
2611         r = memory_region_dispatch_write(mr, mr_offset, val_le,
2612                                          this_mop, full->attrs);
2613         if (unlikely(r != MEMTX_OK)) {
2614             io_failed(cpu, full, addr, this_size, MMU_DATA_STORE,
2615                       mmu_idx, r, ra);
2616         }
2617         if (this_size == 8) {
2618             return 0;
2619         }
2620 
2621         val_le >>= this_size * 8;
2622         addr += this_size;
2623         mr_offset += this_size;
2624         size -= this_size;
2625     } while (size);
2626 
2627     return val_le;
2628 }
2629 
2630 static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
2631                                uint64_t val_le, vaddr addr, int size,
2632                                int mmu_idx, uintptr_t ra)
2633 {
2634     MemoryRegionSection *section;
2635     hwaddr mr_offset;
2636     MemoryRegion *mr;
2637     MemTxAttrs attrs;
2638 
2639     tcg_debug_assert(size > 0 && size <= 8);
2640 
2641     attrs = full->attrs;
2642     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2643     mr = section->mr;
2644 
2645     BQL_LOCK_GUARD();
2646     return int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
2647                            ra, mr, mr_offset);
2648 }
2649 
2650 static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
2651                                  Int128 val_le, vaddr addr, int size,
2652                                  int mmu_idx, uintptr_t ra)
2653 {
2654     MemoryRegionSection *section;
2655     MemoryRegion *mr;
2656     hwaddr mr_offset;
2657     MemTxAttrs attrs;
2658 
2659     tcg_debug_assert(size > 8 && size <= 16);
2660 
2661     attrs = full->attrs;
2662     section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
2663     mr = section->mr;
2664 
2665     BQL_LOCK_GUARD();
2666     int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
2667                     mmu_idx, ra, mr, mr_offset);
2668     return int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
2669                            size - 8, mmu_idx, ra, mr, mr_offset + 8);
2670 }
2671 
2672 /*
2673  * Wrapper for the above.
2674  */
2675 static uint64_t do_st_leN(CPUState *cpu, MMULookupPageData *p,
2676                           uint64_t val_le, int mmu_idx,
2677                           MemOp mop, uintptr_t ra)
2678 {
2679     MemOp atom;
2680     unsigned tmp, half_size;
2681 
2682     if (unlikely(p->flags & TLB_MMIO)) {
2683         return do_st_mmio_leN(cpu, p->full, val_le, p->addr,
2684                               p->size, mmu_idx, ra);
2685     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2686         return val_le >> (p->size * 8);
2687     }
2688 
2689     /*
2690      * It is a given that we cross a page and therefore there is no atomicity
2691      * for the store as a whole, but subobjects may need attention.
2692      */
2693     atom = mop & MO_ATOM_MASK;
2694     switch (atom) {
2695     case MO_ATOM_SUBALIGN:
2696         return store_parts_leN(p->haddr, p->size, val_le);
2697 
2698     case MO_ATOM_IFALIGN_PAIR:
2699     case MO_ATOM_WITHIN16_PAIR:
2700         tmp = mop & MO_SIZE;
2701         tmp = tmp ? tmp - 1 : 0;
2702         half_size = 1 << tmp;
2703         if (atom == MO_ATOM_IFALIGN_PAIR
2704             ? p->size == half_size
2705             : p->size >= half_size) {
2706             if (!HAVE_al8_fast && p->size <= 4) {
2707                 return store_whole_le4(p->haddr, p->size, val_le);
2708             } else if (HAVE_al8) {
2709                 return store_whole_le8(p->haddr, p->size, val_le);
2710             } else {
2711                 cpu_loop_exit_atomic(cpu, ra);
2712             }
2713         }
2714         /* fall through */
2715 
2716     case MO_ATOM_IFALIGN:
2717     case MO_ATOM_WITHIN16:
2718     case MO_ATOM_NONE:
2719         return store_bytes_leN(p->haddr, p->size, val_le);
2720 
2721     default:
2722         g_assert_not_reached();
2723     }
2724 }
2725 
2726 /*
2727  * Wrapper for the above, for 8 < size < 16.
2728  */
2729 static uint64_t do_st16_leN(CPUState *cpu, MMULookupPageData *p,
2730                             Int128 val_le, int mmu_idx,
2731                             MemOp mop, uintptr_t ra)
2732 {
2733     int size = p->size;
2734     MemOp atom;
2735 
2736     if (unlikely(p->flags & TLB_MMIO)) {
2737         return do_st16_mmio_leN(cpu, p->full, val_le, p->addr,
2738                                 size, mmu_idx, ra);
2739     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2740         return int128_gethi(val_le) >> ((size - 8) * 8);
2741     }
2742 
2743     /*
2744      * It is a given that we cross a page and therefore there is no atomicity
2745      * for the store as a whole, but subobjects may need attention.
2746      */
2747     atom = mop & MO_ATOM_MASK;
2748     switch (atom) {
2749     case MO_ATOM_SUBALIGN:
2750         store_parts_leN(p->haddr, 8, int128_getlo(val_le));
2751         return store_parts_leN(p->haddr + 8, p->size - 8,
2752                                int128_gethi(val_le));
2753 
2754     case MO_ATOM_WITHIN16_PAIR:
2755         /* Since size > 8, this is the half that must be atomic. */
2756         if (!HAVE_CMPXCHG128) {
2757             cpu_loop_exit_atomic(cpu, ra);
2758         }
2759         return store_whole_le16(p->haddr, p->size, val_le);
2760 
2761     case MO_ATOM_IFALIGN_PAIR:
2762         /*
2763          * Since size > 8, both halves are misaligned,
2764          * and so neither is atomic.
2765          */
2766     case MO_ATOM_IFALIGN:
2767     case MO_ATOM_WITHIN16:
2768     case MO_ATOM_NONE:
2769         stq_le_p(p->haddr, int128_getlo(val_le));
2770         return store_bytes_leN(p->haddr + 8, p->size - 8,
2771                                int128_gethi(val_le));
2772 
2773     default:
2774         g_assert_not_reached();
2775     }
2776 }
2777 
2778 static void do_st_1(CPUState *cpu, MMULookupPageData *p, uint8_t val,
2779                     int mmu_idx, uintptr_t ra)
2780 {
2781     if (unlikely(p->flags & TLB_MMIO)) {
2782         do_st_mmio_leN(cpu, p->full, val, p->addr, 1, mmu_idx, ra);
2783     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2784         /* nothing */
2785     } else {
2786         *(uint8_t *)p->haddr = val;
2787     }
2788 }
2789 
2790 static void do_st_2(CPUState *cpu, MMULookupPageData *p, uint16_t val,
2791                     int mmu_idx, MemOp memop, uintptr_t ra)
2792 {
2793     if (unlikely(p->flags & TLB_MMIO)) {
2794         if ((memop & MO_BSWAP) != MO_LE) {
2795             val = bswap16(val);
2796         }
2797         do_st_mmio_leN(cpu, p->full, val, p->addr, 2, mmu_idx, ra);
2798     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2799         /* nothing */
2800     } else {
2801         /* Swap to host endian if necessary, then store. */
2802         if (memop & MO_BSWAP) {
2803             val = bswap16(val);
2804         }
2805         store_atom_2(cpu, ra, p->haddr, memop, val);
2806     }
2807 }
2808 
2809 static void do_st_4(CPUState *cpu, MMULookupPageData *p, uint32_t val,
2810                     int mmu_idx, MemOp memop, uintptr_t ra)
2811 {
2812     if (unlikely(p->flags & TLB_MMIO)) {
2813         if ((memop & MO_BSWAP) != MO_LE) {
2814             val = bswap32(val);
2815         }
2816         do_st_mmio_leN(cpu, p->full, val, p->addr, 4, mmu_idx, ra);
2817     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2818         /* nothing */
2819     } else {
2820         /* Swap to host endian if necessary, then store. */
2821         if (memop & MO_BSWAP) {
2822             val = bswap32(val);
2823         }
2824         store_atom_4(cpu, ra, p->haddr, memop, val);
2825     }
2826 }
2827 
2828 static void do_st_8(CPUState *cpu, MMULookupPageData *p, uint64_t val,
2829                     int mmu_idx, MemOp memop, uintptr_t ra)
2830 {
2831     if (unlikely(p->flags & TLB_MMIO)) {
2832         if ((memop & MO_BSWAP) != MO_LE) {
2833             val = bswap64(val);
2834         }
2835         do_st_mmio_leN(cpu, p->full, val, p->addr, 8, mmu_idx, ra);
2836     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2837         /* nothing */
2838     } else {
2839         /* Swap to host endian if necessary, then store. */
2840         if (memop & MO_BSWAP) {
2841             val = bswap64(val);
2842         }
2843         store_atom_8(cpu, ra, p->haddr, memop, val);
2844     }
2845 }
2846 
2847 static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
2848                        MemOpIdx oi, uintptr_t ra)
2849 {
2850     MMULookupLocals l;
2851     bool crosspage;
2852 
2853     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2854     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2855     tcg_debug_assert(!crosspage);
2856 
2857     do_st_1(cpu, &l.page[0], val, l.mmu_idx, ra);
2858 }
2859 
2860 static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
2861                        MemOpIdx oi, uintptr_t ra)
2862 {
2863     MMULookupLocals l;
2864     bool crosspage;
2865     uint8_t a, b;
2866 
2867     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2868     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2869     if (likely(!crosspage)) {
2870         do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2871         return;
2872     }
2873 
2874     if ((l.memop & MO_BSWAP) == MO_LE) {
2875         a = val, b = val >> 8;
2876     } else {
2877         b = val, a = val >> 8;
2878     }
2879     do_st_1(cpu, &l.page[0], a, l.mmu_idx, ra);
2880     do_st_1(cpu, &l.page[1], b, l.mmu_idx, ra);
2881 }
2882 
2883 static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
2884                        MemOpIdx oi, uintptr_t ra)
2885 {
2886     MMULookupLocals l;
2887     bool crosspage;
2888 
2889     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2890     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2891     if (likely(!crosspage)) {
2892         do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2893         return;
2894     }
2895 
2896     /* Swap to little endian for simplicity, then store by bytes. */
2897     if ((l.memop & MO_BSWAP) != MO_LE) {
2898         val = bswap32(val);
2899     }
2900     val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2901     (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
2902 }
2903 
2904 static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
2905                        MemOpIdx oi, uintptr_t ra)
2906 {
2907     MMULookupLocals l;
2908     bool crosspage;
2909 
2910     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2911     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2912     if (likely(!crosspage)) {
2913         do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2914         return;
2915     }
2916 
2917     /* Swap to little endian for simplicity, then store by bytes. */
2918     if ((l.memop & MO_BSWAP) != MO_LE) {
2919         val = bswap64(val);
2920     }
2921     val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2922     (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
2923 }
2924 
2925 static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
2926                         MemOpIdx oi, uintptr_t ra)
2927 {
2928     MMULookupLocals l;
2929     bool crosspage;
2930     uint64_t a, b;
2931     int first;
2932 
2933     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2934     crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
2935     if (likely(!crosspage)) {
2936         if (unlikely(l.page[0].flags & TLB_MMIO)) {
2937             if ((l.memop & MO_BSWAP) != MO_LE) {
2938                 val = bswap128(val);
2939             }
2940             do_st16_mmio_leN(cpu, l.page[0].full, val, addr, 16, l.mmu_idx, ra);
2941         } else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
2942             /* nothing */
2943         } else {
2944             /* Swap to host endian if necessary, then store. */
2945             if (l.memop & MO_BSWAP) {
2946                 val = bswap128(val);
2947             }
2948             store_atom_16(cpu, ra, l.page[0].haddr, l.memop, val);
2949         }
2950         return;
2951     }
2952 
2953     first = l.page[0].size;
2954     if (first == 8) {
2955         MemOp mop8 = (l.memop & ~(MO_SIZE | MO_BSWAP)) | MO_64;
2956 
2957         if (l.memop & MO_BSWAP) {
2958             val = bswap128(val);
2959         }
2960         if (HOST_BIG_ENDIAN) {
2961             b = int128_getlo(val), a = int128_gethi(val);
2962         } else {
2963             a = int128_getlo(val), b = int128_gethi(val);
2964         }
2965         do_st_8(cpu, &l.page[0], a, l.mmu_idx, mop8, ra);
2966         do_st_8(cpu, &l.page[1], b, l.mmu_idx, mop8, ra);
2967         return;
2968     }
2969 
2970     if ((l.memop & MO_BSWAP) != MO_LE) {
2971         val = bswap128(val);
2972     }
2973     if (first < 8) {
2974         do_st_leN(cpu, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra);
2975         val = int128_urshift(val, first * 8);
2976         do_st16_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
2977     } else {
2978         b = do_st16_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
2979         do_st_leN(cpu, &l.page[1], b, l.mmu_idx, l.memop, ra);
2980     }
2981 }
2982 
2983 #include "ldst_common.c.inc"
2984 
2985 /*
2986  * First set of functions passes in OI and RETADDR.
2987  * This makes them callable from other helpers.
2988  */
2989 
2990 #define ATOMIC_NAME(X) \
2991     glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
2992 
2993 #define ATOMIC_MMU_CLEANUP
2994 
2995 #include "atomic_common.c.inc"
2996 
2997 #define DATA_SIZE 1
2998 #include "atomic_template.h"
2999 
3000 #define DATA_SIZE 2
3001 #include "atomic_template.h"
3002 
3003 #define DATA_SIZE 4
3004 #include "atomic_template.h"
3005 
3006 #ifdef CONFIG_ATOMIC64
3007 #define DATA_SIZE 8
3008 #include "atomic_template.h"
3009 #endif
3010 
3011 #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
3012 #define DATA_SIZE 16
3013 #include "atomic_template.h"
3014 #endif
3015 
3016 /* Code access functions.  */
3017 
3018 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
3019 {
3020     CPUState *cs = env_cpu(env);
3021     MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(cs, true));
3022     return do_ld1_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
3023 }
3024 
3025 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
3026 {
3027     CPUState *cs = env_cpu(env);
3028     MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(cs, true));
3029     return do_ld2_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
3030 }
3031 
3032 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
3033 {
3034     CPUState *cs = env_cpu(env);
3035     MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(cs, true));
3036     return do_ld4_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
3037 }
3038 
3039 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
3040 {
3041     CPUState *cs = env_cpu(env);
3042     MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(cs, true));
3043     return do_ld8_mmu(cs, addr, oi, 0, MMU_INST_FETCH);
3044 }
3045 
3046 uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
3047                          MemOpIdx oi, uintptr_t retaddr)
3048 {
3049     return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
3050 }
3051 
3052 uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
3053                           MemOpIdx oi, uintptr_t retaddr)
3054 {
3055     return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
3056 }
3057 
3058 uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
3059                           MemOpIdx oi, uintptr_t retaddr)
3060 {
3061     return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
3062 }
3063 
3064 uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
3065                           MemOpIdx oi, uintptr_t retaddr)
3066 {
3067     return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
3068 }
3069