xref: /openbmc/qemu/accel/tcg/cputlb.c (revision abf7ba31)
1 /*
2  *  Common CPU TLB handling
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "hw/core/tcg-cpu-ops.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/cputlb.h"
27 #include "exec/memory-internal.h"
28 #include "exec/ram_addr.h"
29 #include "tcg/tcg.h"
30 #include "qemu/error-report.h"
31 #include "exec/log.h"
32 #include "exec/helper-proto-common.h"
33 #include "qemu/atomic.h"
34 #include "qemu/atomic128.h"
35 #include "exec/translate-all.h"
36 #include "trace.h"
37 #include "tb-hash.h"
38 #include "internal.h"
39 #ifdef CONFIG_PLUGIN
40 #include "qemu/plugin-memory.h"
41 #endif
42 #include "tcg/tcg-ldst.h"
43 #include "tcg/oversized-guest.h"
44 
45 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
46 /* #define DEBUG_TLB */
47 /* #define DEBUG_TLB_LOG */
48 
49 #ifdef DEBUG_TLB
50 # define DEBUG_TLB_GATE 1
51 # ifdef DEBUG_TLB_LOG
52 #  define DEBUG_TLB_LOG_GATE 1
53 # else
54 #  define DEBUG_TLB_LOG_GATE 0
55 # endif
56 #else
57 # define DEBUG_TLB_GATE 0
58 # define DEBUG_TLB_LOG_GATE 0
59 #endif
60 
61 #define tlb_debug(fmt, ...) do { \
62     if (DEBUG_TLB_LOG_GATE) { \
63         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
64                       ## __VA_ARGS__); \
65     } else if (DEBUG_TLB_GATE) { \
66         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
67     } \
68 } while (0)
69 
70 #define assert_cpu_is_self(cpu) do {                              \
71         if (DEBUG_TLB_GATE) {                                     \
72             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
73         }                                                         \
74     } while (0)
75 
76 /* run_on_cpu_data.target_ptr should always be big enough for a
77  * target_ulong even on 32 bit builds */
78 QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
79 
80 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
81  */
82 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
83 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
84 
85 static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
86 {
87     return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
88 }
89 
90 static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
91 {
92     return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
93 }
94 
95 static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
96                              size_t max_entries)
97 {
98     desc->window_begin_ns = ns;
99     desc->window_max_entries = max_entries;
100 }
101 
102 static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr)
103 {
104     CPUJumpCache *jc = cpu->tb_jmp_cache;
105     int i, i0;
106 
107     if (unlikely(!jc)) {
108         return;
109     }
110 
111     i0 = tb_jmp_cache_hash_page(page_addr);
112     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
113         qatomic_set(&jc->array[i0 + i].tb, NULL);
114     }
115 }
116 
117 /**
118  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
119  * @desc: The CPUTLBDesc portion of the TLB
120  * @fast: The CPUTLBDescFast portion of the same TLB
121  *
122  * Called with tlb_lock_held.
123  *
124  * We have two main constraints when resizing a TLB: (1) we only resize it
125  * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
126  * the array or unnecessarily flushing it), which means we do not control how
127  * frequently the resizing can occur; (2) we don't have access to the guest's
128  * future scheduling decisions, and therefore have to decide the magnitude of
129  * the resize based on past observations.
130  *
131  * In general, a memory-hungry process can benefit greatly from an appropriately
132  * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
133  * we just have to make the TLB as large as possible; while an oversized TLB
134  * results in minimal TLB miss rates, it also takes longer to be flushed
135  * (flushes can be _very_ frequent), and the reduced locality can also hurt
136  * performance.
137  *
138  * To achieve near-optimal performance for all kinds of workloads, we:
139  *
140  * 1. Aggressively increase the size of the TLB when the use rate of the
141  * TLB being flushed is high, since it is likely that in the near future this
142  * memory-hungry process will execute again, and its memory hungriness will
143  * probably be similar.
144  *
145  * 2. Slowly reduce the size of the TLB as the use rate declines over a
146  * reasonably large time window. The rationale is that if in such a time window
147  * we have not observed a high TLB use rate, it is likely that we won't observe
148  * it in the near future. In that case, once a time window expires we downsize
149  * the TLB to match the maximum use rate observed in the window.
150  *
151  * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
152  * since in that range performance is likely near-optimal. Recall that the TLB
153  * is direct mapped, so we want the use rate to be low (or at least not too
154  * high), since otherwise we are likely to have a significant amount of
155  * conflict misses.
156  */
157 static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
158                                   int64_t now)
159 {
160     size_t old_size = tlb_n_entries(fast);
161     size_t rate;
162     size_t new_size = old_size;
163     int64_t window_len_ms = 100;
164     int64_t window_len_ns = window_len_ms * 1000 * 1000;
165     bool window_expired = now > desc->window_begin_ns + window_len_ns;
166 
167     if (desc->n_used_entries > desc->window_max_entries) {
168         desc->window_max_entries = desc->n_used_entries;
169     }
170     rate = desc->window_max_entries * 100 / old_size;
171 
172     if (rate > 70) {
173         new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
174     } else if (rate < 30 && window_expired) {
175         size_t ceil = pow2ceil(desc->window_max_entries);
176         size_t expected_rate = desc->window_max_entries * 100 / ceil;
177 
178         /*
179          * Avoid undersizing when the max number of entries seen is just below
180          * a pow2. For instance, if max_entries == 1025, the expected use rate
181          * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
182          * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
183          * later. Thus, make sure that the expected use rate remains below 70%.
184          * (and since we double the size, that means the lowest rate we'd
185          * expect to get is 35%, which is still in the 30-70% range where
186          * we consider that the size is appropriate.)
187          */
188         if (expected_rate > 70) {
189             ceil *= 2;
190         }
191         new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
192     }
193 
194     if (new_size == old_size) {
195         if (window_expired) {
196             tlb_window_reset(desc, now, desc->n_used_entries);
197         }
198         return;
199     }
200 
201     g_free(fast->table);
202     g_free(desc->fulltlb);
203 
204     tlb_window_reset(desc, now, 0);
205     /* desc->n_used_entries is cleared by the caller */
206     fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
207     fast->table = g_try_new(CPUTLBEntry, new_size);
208     desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
209 
210     /*
211      * If the allocations fail, try smaller sizes. We just freed some
212      * memory, so going back to half of new_size has a good chance of working.
213      * Increased memory pressure elsewhere in the system might cause the
214      * allocations to fail though, so we progressively reduce the allocation
215      * size, aborting if we cannot even allocate the smallest TLB we support.
216      */
217     while (fast->table == NULL || desc->fulltlb == NULL) {
218         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
219             error_report("%s: %s", __func__, strerror(errno));
220             abort();
221         }
222         new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
223         fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
224 
225         g_free(fast->table);
226         g_free(desc->fulltlb);
227         fast->table = g_try_new(CPUTLBEntry, new_size);
228         desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
229     }
230 }
231 
232 static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
233 {
234     desc->n_used_entries = 0;
235     desc->large_page_addr = -1;
236     desc->large_page_mask = -1;
237     desc->vindex = 0;
238     memset(fast->table, -1, sizeof_tlb(fast));
239     memset(desc->vtable, -1, sizeof(desc->vtable));
240 }
241 
242 static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
243                                         int64_t now)
244 {
245     CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
246     CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
247 
248     tlb_mmu_resize_locked(desc, fast, now);
249     tlb_mmu_flush_locked(desc, fast);
250 }
251 
252 static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
253 {
254     size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
255 
256     tlb_window_reset(desc, now, 0);
257     desc->n_used_entries = 0;
258     fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
259     fast->table = g_new(CPUTLBEntry, n_entries);
260     desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
261     tlb_mmu_flush_locked(desc, fast);
262 }
263 
264 static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
265 {
266     env_tlb(env)->d[mmu_idx].n_used_entries++;
267 }
268 
269 static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
270 {
271     env_tlb(env)->d[mmu_idx].n_used_entries--;
272 }
273 
274 void tlb_init(CPUState *cpu)
275 {
276     CPUArchState *env = cpu->env_ptr;
277     int64_t now = get_clock_realtime();
278     int i;
279 
280     qemu_spin_init(&env_tlb(env)->c.lock);
281 
282     /* All tlbs are initialized flushed. */
283     env_tlb(env)->c.dirty = 0;
284 
285     for (i = 0; i < NB_MMU_MODES; i++) {
286         tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
287     }
288 }
289 
290 void tlb_destroy(CPUState *cpu)
291 {
292     CPUArchState *env = cpu->env_ptr;
293     int i;
294 
295     qemu_spin_destroy(&env_tlb(env)->c.lock);
296     for (i = 0; i < NB_MMU_MODES; i++) {
297         CPUTLBDesc *desc = &env_tlb(env)->d[i];
298         CPUTLBDescFast *fast = &env_tlb(env)->f[i];
299 
300         g_free(fast->table);
301         g_free(desc->fulltlb);
302     }
303 }
304 
305 /* flush_all_helper: run fn across all cpus
306  *
307  * If the wait flag is set then the src cpu's helper will be queued as
308  * "safe" work and the loop exited creating a synchronisation point
309  * where all queued work will be finished before execution starts
310  * again.
311  */
312 static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
313                              run_on_cpu_data d)
314 {
315     CPUState *cpu;
316 
317     CPU_FOREACH(cpu) {
318         if (cpu != src) {
319             async_run_on_cpu(cpu, fn, d);
320         }
321     }
322 }
323 
324 void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
325 {
326     CPUState *cpu;
327     size_t full = 0, part = 0, elide = 0;
328 
329     CPU_FOREACH(cpu) {
330         CPUArchState *env = cpu->env_ptr;
331 
332         full += qatomic_read(&env_tlb(env)->c.full_flush_count);
333         part += qatomic_read(&env_tlb(env)->c.part_flush_count);
334         elide += qatomic_read(&env_tlb(env)->c.elide_flush_count);
335     }
336     *pfull = full;
337     *ppart = part;
338     *pelide = elide;
339 }
340 
341 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
342 {
343     CPUArchState *env = cpu->env_ptr;
344     uint16_t asked = data.host_int;
345     uint16_t all_dirty, work, to_clean;
346     int64_t now = get_clock_realtime();
347 
348     assert_cpu_is_self(cpu);
349 
350     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
351 
352     qemu_spin_lock(&env_tlb(env)->c.lock);
353 
354     all_dirty = env_tlb(env)->c.dirty;
355     to_clean = asked & all_dirty;
356     all_dirty &= ~to_clean;
357     env_tlb(env)->c.dirty = all_dirty;
358 
359     for (work = to_clean; work != 0; work &= work - 1) {
360         int mmu_idx = ctz32(work);
361         tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
362     }
363 
364     qemu_spin_unlock(&env_tlb(env)->c.lock);
365 
366     tcg_flush_jmp_cache(cpu);
367 
368     if (to_clean == ALL_MMUIDX_BITS) {
369         qatomic_set(&env_tlb(env)->c.full_flush_count,
370                    env_tlb(env)->c.full_flush_count + 1);
371     } else {
372         qatomic_set(&env_tlb(env)->c.part_flush_count,
373                    env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
374         if (to_clean != asked) {
375             qatomic_set(&env_tlb(env)->c.elide_flush_count,
376                        env_tlb(env)->c.elide_flush_count +
377                        ctpop16(asked & ~to_clean));
378         }
379     }
380 }
381 
382 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
383 {
384     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
385 
386     if (cpu->created && !qemu_cpu_is_self(cpu)) {
387         async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
388                          RUN_ON_CPU_HOST_INT(idxmap));
389     } else {
390         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
391     }
392 }
393 
394 void tlb_flush(CPUState *cpu)
395 {
396     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
397 }
398 
399 void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
400 {
401     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
402 
403     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
404 
405     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
406     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
407 }
408 
409 void tlb_flush_all_cpus(CPUState *src_cpu)
410 {
411     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
412 }
413 
414 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
415 {
416     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
417 
418     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
419 
420     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
421     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
422 }
423 
424 void tlb_flush_all_cpus_synced(CPUState *src_cpu)
425 {
426     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
427 }
428 
429 static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
430                                       vaddr page, vaddr mask)
431 {
432     page &= mask;
433     mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
434 
435     return (page == (tlb_entry->addr_read & mask) ||
436             page == (tlb_addr_write(tlb_entry) & mask) ||
437             page == (tlb_entry->addr_code & mask));
438 }
439 
440 static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page)
441 {
442     return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
443 }
444 
445 /**
446  * tlb_entry_is_empty - return true if the entry is not in use
447  * @te: pointer to CPUTLBEntry
448  */
449 static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
450 {
451     return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
452 }
453 
454 /* Called with tlb_c.lock held */
455 static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
456                                         vaddr page,
457                                         vaddr mask)
458 {
459     if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
460         memset(tlb_entry, -1, sizeof(*tlb_entry));
461         return true;
462     }
463     return false;
464 }
465 
466 static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, vaddr page)
467 {
468     return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
469 }
470 
471 /* Called with tlb_c.lock held */
472 static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
473                                             vaddr page,
474                                             vaddr mask)
475 {
476     CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
477     int k;
478 
479     assert_cpu_is_self(env_cpu(env));
480     for (k = 0; k < CPU_VTLB_SIZE; k++) {
481         if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
482             tlb_n_used_entries_dec(env, mmu_idx);
483         }
484     }
485 }
486 
487 static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
488                                               vaddr page)
489 {
490     tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
491 }
492 
493 static void tlb_flush_page_locked(CPUArchState *env, int midx, vaddr page)
494 {
495     vaddr lp_addr = env_tlb(env)->d[midx].large_page_addr;
496     vaddr lp_mask = env_tlb(env)->d[midx].large_page_mask;
497 
498     /* Check if we need to flush due to large pages.  */
499     if ((page & lp_mask) == lp_addr) {
500         tlb_debug("forcing full flush midx %d (%"
501                   VADDR_PRIx "/%" VADDR_PRIx ")\n",
502                   midx, lp_addr, lp_mask);
503         tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
504     } else {
505         if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
506             tlb_n_used_entries_dec(env, midx);
507         }
508         tlb_flush_vtlb_page_locked(env, midx, page);
509     }
510 }
511 
512 /**
513  * tlb_flush_page_by_mmuidx_async_0:
514  * @cpu: cpu on which to flush
515  * @addr: page of virtual address to flush
516  * @idxmap: set of mmu_idx to flush
517  *
518  * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
519  * at @addr from the tlbs indicated by @idxmap from @cpu.
520  */
521 static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
522                                              vaddr addr,
523                                              uint16_t idxmap)
524 {
525     CPUArchState *env = cpu->env_ptr;
526     int mmu_idx;
527 
528     assert_cpu_is_self(cpu);
529 
530     tlb_debug("page addr: %" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap);
531 
532     qemu_spin_lock(&env_tlb(env)->c.lock);
533     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
534         if ((idxmap >> mmu_idx) & 1) {
535             tlb_flush_page_locked(env, mmu_idx, addr);
536         }
537     }
538     qemu_spin_unlock(&env_tlb(env)->c.lock);
539 
540     /*
541      * Discard jump cache entries for any tb which might potentially
542      * overlap the flushed page, which includes the previous.
543      */
544     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
545     tb_jmp_cache_clear_page(cpu, addr);
546 }
547 
548 /**
549  * tlb_flush_page_by_mmuidx_async_1:
550  * @cpu: cpu on which to flush
551  * @data: encoded addr + idxmap
552  *
553  * Helper for tlb_flush_page_by_mmuidx and friends, called through
554  * async_run_on_cpu.  The idxmap parameter is encoded in the page
555  * offset of the target_ptr field.  This limits the set of mmu_idx
556  * that can be passed via this method.
557  */
558 static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
559                                              run_on_cpu_data data)
560 {
561     vaddr addr_and_idxmap = data.target_ptr;
562     vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK;
563     uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
564 
565     tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
566 }
567 
568 typedef struct {
569     vaddr addr;
570     uint16_t idxmap;
571 } TLBFlushPageByMMUIdxData;
572 
573 /**
574  * tlb_flush_page_by_mmuidx_async_2:
575  * @cpu: cpu on which to flush
576  * @data: allocated addr + idxmap
577  *
578  * Helper for tlb_flush_page_by_mmuidx and friends, called through
579  * async_run_on_cpu.  The addr+idxmap parameters are stored in a
580  * TLBFlushPageByMMUIdxData structure that has been allocated
581  * specifically for this helper.  Free the structure when done.
582  */
583 static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
584                                              run_on_cpu_data data)
585 {
586     TLBFlushPageByMMUIdxData *d = data.host_ptr;
587 
588     tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
589     g_free(d);
590 }
591 
592 void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
593 {
594     tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
595 
596     /* This should already be page aligned */
597     addr &= TARGET_PAGE_MASK;
598 
599     if (qemu_cpu_is_self(cpu)) {
600         tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
601     } else if (idxmap < TARGET_PAGE_SIZE) {
602         /*
603          * Most targets have only a few mmu_idx.  In the case where
604          * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
605          * allocating memory for this operation.
606          */
607         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
608                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
609     } else {
610         TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
611 
612         /* Otherwise allocate a structure, freed by the worker.  */
613         d->addr = addr;
614         d->idxmap = idxmap;
615         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
616                          RUN_ON_CPU_HOST_PTR(d));
617     }
618 }
619 
620 void tlb_flush_page(CPUState *cpu, vaddr addr)
621 {
622     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
623 }
624 
625 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr,
626                                        uint16_t idxmap)
627 {
628     tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
629 
630     /* This should already be page aligned */
631     addr &= TARGET_PAGE_MASK;
632 
633     /*
634      * Allocate memory to hold addr+idxmap only when needed.
635      * See tlb_flush_page_by_mmuidx for details.
636      */
637     if (idxmap < TARGET_PAGE_SIZE) {
638         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
639                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
640     } else {
641         CPUState *dst_cpu;
642 
643         /* Allocate a separate data block for each destination cpu.  */
644         CPU_FOREACH(dst_cpu) {
645             if (dst_cpu != src_cpu) {
646                 TLBFlushPageByMMUIdxData *d
647                     = g_new(TLBFlushPageByMMUIdxData, 1);
648 
649                 d->addr = addr;
650                 d->idxmap = idxmap;
651                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
652                                  RUN_ON_CPU_HOST_PTR(d));
653             }
654         }
655     }
656 
657     tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
658 }
659 
660 void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
661 {
662     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
663 }
664 
665 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
666                                               vaddr addr,
667                                               uint16_t idxmap)
668 {
669     tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
670 
671     /* This should already be page aligned */
672     addr &= TARGET_PAGE_MASK;
673 
674     /*
675      * Allocate memory to hold addr+idxmap only when needed.
676      * See tlb_flush_page_by_mmuidx for details.
677      */
678     if (idxmap < TARGET_PAGE_SIZE) {
679         flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
680                          RUN_ON_CPU_TARGET_PTR(addr | idxmap));
681         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
682                               RUN_ON_CPU_TARGET_PTR(addr | idxmap));
683     } else {
684         CPUState *dst_cpu;
685         TLBFlushPageByMMUIdxData *d;
686 
687         /* Allocate a separate data block for each destination cpu.  */
688         CPU_FOREACH(dst_cpu) {
689             if (dst_cpu != src_cpu) {
690                 d = g_new(TLBFlushPageByMMUIdxData, 1);
691                 d->addr = addr;
692                 d->idxmap = idxmap;
693                 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
694                                  RUN_ON_CPU_HOST_PTR(d));
695             }
696         }
697 
698         d = g_new(TLBFlushPageByMMUIdxData, 1);
699         d->addr = addr;
700         d->idxmap = idxmap;
701         async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
702                               RUN_ON_CPU_HOST_PTR(d));
703     }
704 }
705 
706 void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
707 {
708     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
709 }
710 
711 static void tlb_flush_range_locked(CPUArchState *env, int midx,
712                                    vaddr addr, vaddr len,
713                                    unsigned bits)
714 {
715     CPUTLBDesc *d = &env_tlb(env)->d[midx];
716     CPUTLBDescFast *f = &env_tlb(env)->f[midx];
717     vaddr mask = MAKE_64BIT_MASK(0, bits);
718 
719     /*
720      * If @bits is smaller than the tlb size, there may be multiple entries
721      * within the TLB; otherwise all addresses that match under @mask hit
722      * the same TLB entry.
723      * TODO: Perhaps allow bits to be a few bits less than the size.
724      * For now, just flush the entire TLB.
725      *
726      * If @len is larger than the tlb size, then it will take longer to
727      * test all of the entries in the TLB than it will to flush it all.
728      */
729     if (mask < f->mask || len > f->mask) {
730         tlb_debug("forcing full flush midx %d ("
731                   "%" VADDR_PRIx "/%" VADDR_PRIx "+%" VADDR_PRIx ")\n",
732                   midx, addr, mask, len);
733         tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
734         return;
735     }
736 
737     /*
738      * Check if we need to flush due to large pages.
739      * Because large_page_mask contains all 1's from the msb,
740      * we only need to test the end of the range.
741      */
742     if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
743         tlb_debug("forcing full flush midx %d ("
744                   "%" VADDR_PRIx "/%" VADDR_PRIx ")\n",
745                   midx, d->large_page_addr, d->large_page_mask);
746         tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
747         return;
748     }
749 
750     for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) {
751         vaddr page = addr + i;
752         CPUTLBEntry *entry = tlb_entry(env, midx, page);
753 
754         if (tlb_flush_entry_mask_locked(entry, page, mask)) {
755             tlb_n_used_entries_dec(env, midx);
756         }
757         tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
758     }
759 }
760 
761 typedef struct {
762     vaddr addr;
763     vaddr len;
764     uint16_t idxmap;
765     uint16_t bits;
766 } TLBFlushRangeData;
767 
768 static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
769                                               TLBFlushRangeData d)
770 {
771     CPUArchState *env = cpu->env_ptr;
772     int mmu_idx;
773 
774     assert_cpu_is_self(cpu);
775 
776     tlb_debug("range: %" VADDR_PRIx "/%u+%" VADDR_PRIx " mmu_map:0x%x\n",
777               d.addr, d.bits, d.len, d.idxmap);
778 
779     qemu_spin_lock(&env_tlb(env)->c.lock);
780     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
781         if ((d.idxmap >> mmu_idx) & 1) {
782             tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
783         }
784     }
785     qemu_spin_unlock(&env_tlb(env)->c.lock);
786 
787     /*
788      * If the length is larger than the jump cache size, then it will take
789      * longer to clear each entry individually than it will to clear it all.
790      */
791     if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
792         tcg_flush_jmp_cache(cpu);
793         return;
794     }
795 
796     /*
797      * Discard jump cache entries for any tb which might potentially
798      * overlap the flushed pages, which includes the previous.
799      */
800     d.addr -= TARGET_PAGE_SIZE;
801     for (vaddr i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
802         tb_jmp_cache_clear_page(cpu, d.addr);
803         d.addr += TARGET_PAGE_SIZE;
804     }
805 }
806 
807 static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
808                                               run_on_cpu_data data)
809 {
810     TLBFlushRangeData *d = data.host_ptr;
811     tlb_flush_range_by_mmuidx_async_0(cpu, *d);
812     g_free(d);
813 }
814 
815 void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
816                                vaddr len, uint16_t idxmap,
817                                unsigned bits)
818 {
819     TLBFlushRangeData d;
820 
821     /*
822      * If all bits are significant, and len is small,
823      * this devolves to tlb_flush_page.
824      */
825     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
826         tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
827         return;
828     }
829     /* If no page bits are significant, this devolves to tlb_flush. */
830     if (bits < TARGET_PAGE_BITS) {
831         tlb_flush_by_mmuidx(cpu, idxmap);
832         return;
833     }
834 
835     /* This should already be page aligned */
836     d.addr = addr & TARGET_PAGE_MASK;
837     d.len = len;
838     d.idxmap = idxmap;
839     d.bits = bits;
840 
841     if (qemu_cpu_is_self(cpu)) {
842         tlb_flush_range_by_mmuidx_async_0(cpu, d);
843     } else {
844         /* Otherwise allocate a structure, freed by the worker.  */
845         TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
846         async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
847                          RUN_ON_CPU_HOST_PTR(p));
848     }
849 }
850 
851 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
852                                    uint16_t idxmap, unsigned bits)
853 {
854     tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
855 }
856 
857 void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
858                                         vaddr addr, vaddr len,
859                                         uint16_t idxmap, unsigned bits)
860 {
861     TLBFlushRangeData d;
862     CPUState *dst_cpu;
863 
864     /*
865      * If all bits are significant, and len is small,
866      * this devolves to tlb_flush_page.
867      */
868     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
869         tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
870         return;
871     }
872     /* If no page bits are significant, this devolves to tlb_flush. */
873     if (bits < TARGET_PAGE_BITS) {
874         tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
875         return;
876     }
877 
878     /* This should already be page aligned */
879     d.addr = addr & TARGET_PAGE_MASK;
880     d.len = len;
881     d.idxmap = idxmap;
882     d.bits = bits;
883 
884     /* Allocate a separate data block for each destination cpu.  */
885     CPU_FOREACH(dst_cpu) {
886         if (dst_cpu != src_cpu) {
887             TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
888             async_run_on_cpu(dst_cpu,
889                              tlb_flush_range_by_mmuidx_async_1,
890                              RUN_ON_CPU_HOST_PTR(p));
891         }
892     }
893 
894     tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
895 }
896 
897 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
898                                             vaddr addr, uint16_t idxmap,
899                                             unsigned bits)
900 {
901     tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
902                                        idxmap, bits);
903 }
904 
905 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
906                                                vaddr addr,
907                                                vaddr len,
908                                                uint16_t idxmap,
909                                                unsigned bits)
910 {
911     TLBFlushRangeData d, *p;
912     CPUState *dst_cpu;
913 
914     /*
915      * If all bits are significant, and len is small,
916      * this devolves to tlb_flush_page.
917      */
918     if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
919         tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
920         return;
921     }
922     /* If no page bits are significant, this devolves to tlb_flush. */
923     if (bits < TARGET_PAGE_BITS) {
924         tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
925         return;
926     }
927 
928     /* This should already be page aligned */
929     d.addr = addr & TARGET_PAGE_MASK;
930     d.len = len;
931     d.idxmap = idxmap;
932     d.bits = bits;
933 
934     /* Allocate a separate data block for each destination cpu.  */
935     CPU_FOREACH(dst_cpu) {
936         if (dst_cpu != src_cpu) {
937             p = g_memdup(&d, sizeof(d));
938             async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
939                              RUN_ON_CPU_HOST_PTR(p));
940         }
941     }
942 
943     p = g_memdup(&d, sizeof(d));
944     async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
945                           RUN_ON_CPU_HOST_PTR(p));
946 }
947 
948 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
949                                                    vaddr addr,
950                                                    uint16_t idxmap,
951                                                    unsigned bits)
952 {
953     tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
954                                               idxmap, bits);
955 }
956 
957 /* update the TLBs so that writes to code in the virtual page 'addr'
958    can be detected */
959 void tlb_protect_code(ram_addr_t ram_addr)
960 {
961     cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
962                                              TARGET_PAGE_SIZE,
963                                              DIRTY_MEMORY_CODE);
964 }
965 
966 /* update the TLB so that writes in physical page 'phys_addr' are no longer
967    tested for self modifying code */
968 void tlb_unprotect_code(ram_addr_t ram_addr)
969 {
970     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
971 }
972 
973 
974 /*
975  * Dirty write flag handling
976  *
977  * When the TCG code writes to a location it looks up the address in
978  * the TLB and uses that data to compute the final address. If any of
979  * the lower bits of the address are set then the slow path is forced.
980  * There are a number of reasons to do this but for normal RAM the
981  * most usual is detecting writes to code regions which may invalidate
982  * generated code.
983  *
984  * Other vCPUs might be reading their TLBs during guest execution, so we update
985  * te->addr_write with qatomic_set. We don't need to worry about this for
986  * oversized guests as MTTCG is disabled for them.
987  *
988  * Called with tlb_c.lock held.
989  */
990 static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
991                                          uintptr_t start, uintptr_t length)
992 {
993     uintptr_t addr = tlb_entry->addr_write;
994 
995     if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
996                  TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
997         addr &= TARGET_PAGE_MASK;
998         addr += tlb_entry->addend;
999         if ((addr - start) < length) {
1000 #if TARGET_LONG_BITS == 32
1001             uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write;
1002             ptr_write += HOST_BIG_ENDIAN;
1003             qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY);
1004 #elif TCG_OVERSIZED_GUEST
1005             tlb_entry->addr_write |= TLB_NOTDIRTY;
1006 #else
1007             qatomic_set(&tlb_entry->addr_write,
1008                         tlb_entry->addr_write | TLB_NOTDIRTY);
1009 #endif
1010         }
1011     }
1012 }
1013 
1014 /*
1015  * Called with tlb_c.lock held.
1016  * Called only from the vCPU context, i.e. the TLB's owner thread.
1017  */
1018 static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
1019 {
1020     *d = *s;
1021 }
1022 
1023 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
1024  * the target vCPU).
1025  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
1026  * thing actually updated is the target TLB entry ->addr_write flags.
1027  */
1028 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
1029 {
1030     CPUArchState *env;
1031 
1032     int mmu_idx;
1033 
1034     env = cpu->env_ptr;
1035     qemu_spin_lock(&env_tlb(env)->c.lock);
1036     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1037         unsigned int i;
1038         unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
1039 
1040         for (i = 0; i < n; i++) {
1041             tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
1042                                          start1, length);
1043         }
1044 
1045         for (i = 0; i < CPU_VTLB_SIZE; i++) {
1046             tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
1047                                          start1, length);
1048         }
1049     }
1050     qemu_spin_unlock(&env_tlb(env)->c.lock);
1051 }
1052 
1053 /* Called with tlb_c.lock held */
1054 static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
1055                                          vaddr addr)
1056 {
1057     if (tlb_entry->addr_write == (addr | TLB_NOTDIRTY)) {
1058         tlb_entry->addr_write = addr;
1059     }
1060 }
1061 
1062 /* update the TLB corresponding to virtual page vaddr
1063    so that it is no longer dirty */
1064 void tlb_set_dirty(CPUState *cpu, vaddr addr)
1065 {
1066     CPUArchState *env = cpu->env_ptr;
1067     int mmu_idx;
1068 
1069     assert_cpu_is_self(cpu);
1070 
1071     addr &= TARGET_PAGE_MASK;
1072     qemu_spin_lock(&env_tlb(env)->c.lock);
1073     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1074         tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, addr), addr);
1075     }
1076 
1077     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1078         int k;
1079         for (k = 0; k < CPU_VTLB_SIZE; k++) {
1080             tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], addr);
1081         }
1082     }
1083     qemu_spin_unlock(&env_tlb(env)->c.lock);
1084 }
1085 
1086 /* Our TLB does not support large pages, so remember the area covered by
1087    large pages and trigger a full TLB flush if these are invalidated.  */
1088 static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
1089                                vaddr addr, uint64_t size)
1090 {
1091     vaddr lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
1092     vaddr lp_mask = ~(size - 1);
1093 
1094     if (lp_addr == (vaddr)-1) {
1095         /* No previous large page.  */
1096         lp_addr = addr;
1097     } else {
1098         /* Extend the existing region to include the new page.
1099            This is a compromise between unnecessary flushes and
1100            the cost of maintaining a full variable size TLB.  */
1101         lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
1102         while (((lp_addr ^ addr) & lp_mask) != 0) {
1103             lp_mask <<= 1;
1104         }
1105     }
1106     env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1107     env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
1108 }
1109 
1110 static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
1111                                    target_ulong address, int flags,
1112                                    MMUAccessType access_type, bool enable)
1113 {
1114     if (enable) {
1115         address |= flags & TLB_FLAGS_MASK;
1116         flags &= TLB_SLOW_FLAGS_MASK;
1117         if (flags) {
1118             address |= TLB_FORCE_SLOW;
1119         }
1120     } else {
1121         address = -1;
1122         flags = 0;
1123     }
1124     ent->addr_idx[access_type] = address;
1125     full->slow_flags[access_type] = flags;
1126 }
1127 
1128 /*
1129  * Add a new TLB entry. At most one entry for a given virtual address
1130  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1131  * supplied size is only used by tlb_flush_page.
1132  *
1133  * Called from TCG-generated code, which is under an RCU read-side
1134  * critical section.
1135  */
1136 void tlb_set_page_full(CPUState *cpu, int mmu_idx,
1137                        vaddr addr, CPUTLBEntryFull *full)
1138 {
1139     CPUArchState *env = cpu->env_ptr;
1140     CPUTLB *tlb = env_tlb(env);
1141     CPUTLBDesc *desc = &tlb->d[mmu_idx];
1142     MemoryRegionSection *section;
1143     unsigned int index, read_flags, write_flags;
1144     uintptr_t addend;
1145     CPUTLBEntry *te, tn;
1146     hwaddr iotlb, xlat, sz, paddr_page;
1147     vaddr addr_page;
1148     int asidx, wp_flags, prot;
1149     bool is_ram, is_romd;
1150 
1151     assert_cpu_is_self(cpu);
1152 
1153     if (full->lg_page_size <= TARGET_PAGE_BITS) {
1154         sz = TARGET_PAGE_SIZE;
1155     } else {
1156         sz = (hwaddr)1 << full->lg_page_size;
1157         tlb_add_large_page(env, mmu_idx, addr, sz);
1158     }
1159     addr_page = addr & TARGET_PAGE_MASK;
1160     paddr_page = full->phys_addr & TARGET_PAGE_MASK;
1161 
1162     prot = full->prot;
1163     asidx = cpu_asidx_from_attrs(cpu, full->attrs);
1164     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
1165                                                 &xlat, &sz, full->attrs, &prot);
1166     assert(sz >= TARGET_PAGE_SIZE);
1167 
1168     tlb_debug("vaddr=%" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx
1169               " prot=%x idx=%d\n",
1170               addr, full->phys_addr, prot, mmu_idx);
1171 
1172     read_flags = 0;
1173     if (full->lg_page_size < TARGET_PAGE_BITS) {
1174         /* Repeat the MMU check and TLB fill on every access.  */
1175         read_flags |= TLB_INVALID_MASK;
1176     }
1177     if (full->attrs.byte_swap) {
1178         read_flags |= TLB_BSWAP;
1179     }
1180 
1181     is_ram = memory_region_is_ram(section->mr);
1182     is_romd = memory_region_is_romd(section->mr);
1183 
1184     if (is_ram || is_romd) {
1185         /* RAM and ROMD both have associated host memory. */
1186         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
1187     } else {
1188         /* I/O does not; force the host address to NULL. */
1189         addend = 0;
1190     }
1191 
1192     write_flags = read_flags;
1193     if (is_ram) {
1194         iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1195         /*
1196          * Computing is_clean is expensive; avoid all that unless
1197          * the page is actually writable.
1198          */
1199         if (prot & PAGE_WRITE) {
1200             if (section->readonly) {
1201                 write_flags |= TLB_DISCARD_WRITE;
1202             } else if (cpu_physical_memory_is_clean(iotlb)) {
1203                 write_flags |= TLB_NOTDIRTY;
1204             }
1205         }
1206     } else {
1207         /* I/O or ROMD */
1208         iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
1209         /*
1210          * Writes to romd devices must go through MMIO to enable write.
1211          * Reads to romd devices go through the ram_ptr found above,
1212          * but of course reads to I/O must go through MMIO.
1213          */
1214         write_flags |= TLB_MMIO;
1215         if (!is_romd) {
1216             read_flags = write_flags;
1217         }
1218     }
1219 
1220     wp_flags = cpu_watchpoint_address_matches(cpu, addr_page,
1221                                               TARGET_PAGE_SIZE);
1222 
1223     index = tlb_index(env, mmu_idx, addr_page);
1224     te = tlb_entry(env, mmu_idx, addr_page);
1225 
1226     /*
1227      * Hold the TLB lock for the rest of the function. We could acquire/release
1228      * the lock several times in the function, but it is faster to amortize the
1229      * acquisition cost by acquiring it just once. Note that this leads to
1230      * a longer critical section, but this is not a concern since the TLB lock
1231      * is unlikely to be contended.
1232      */
1233     qemu_spin_lock(&tlb->c.lock);
1234 
1235     /* Note that the tlb is no longer clean.  */
1236     tlb->c.dirty |= 1 << mmu_idx;
1237 
1238     /* Make sure there's no cached translation for the new page.  */
1239     tlb_flush_vtlb_page_locked(env, mmu_idx, addr_page);
1240 
1241     /*
1242      * Only evict the old entry to the victim tlb if it's for a
1243      * different page; otherwise just overwrite the stale data.
1244      */
1245     if (!tlb_hit_page_anyprot(te, addr_page) && !tlb_entry_is_empty(te)) {
1246         unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1247         CPUTLBEntry *tv = &desc->vtable[vidx];
1248 
1249         /* Evict the old entry into the victim tlb.  */
1250         copy_tlb_helper_locked(tv, te);
1251         desc->vfulltlb[vidx] = desc->fulltlb[index];
1252         tlb_n_used_entries_dec(env, mmu_idx);
1253     }
1254 
1255     /* refill the tlb */
1256     /*
1257      * At this point iotlb contains a physical section number in the lower
1258      * TARGET_PAGE_BITS, and either
1259      *  + the ram_addr_t of the page base of the target RAM (RAM)
1260      *  + the offset within section->mr of the page base (I/O, ROMD)
1261      * We subtract addr_page (which is page aligned and thus won't
1262      * disturb the low bits) to give an offset which can be added to the
1263      * (non-page-aligned) vaddr of the eventual memory access to get
1264      * the MemoryRegion offset for the access. Note that the vaddr we
1265      * subtract here is that of the page base, and not the same as the
1266      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
1267      */
1268     desc->fulltlb[index] = *full;
1269     full = &desc->fulltlb[index];
1270     full->xlat_section = iotlb - addr_page;
1271     full->phys_addr = paddr_page;
1272 
1273     /* Now calculate the new entry */
1274     tn.addend = addend - addr_page;
1275 
1276     tlb_set_compare(full, &tn, addr_page, read_flags,
1277                     MMU_INST_FETCH, prot & PAGE_EXEC);
1278 
1279     if (wp_flags & BP_MEM_READ) {
1280         read_flags |= TLB_WATCHPOINT;
1281     }
1282     tlb_set_compare(full, &tn, addr_page, read_flags,
1283                     MMU_DATA_LOAD, prot & PAGE_READ);
1284 
1285     if (prot & PAGE_WRITE_INV) {
1286         write_flags |= TLB_INVALID_MASK;
1287     }
1288     if (wp_flags & BP_MEM_WRITE) {
1289         write_flags |= TLB_WATCHPOINT;
1290     }
1291     tlb_set_compare(full, &tn, addr_page, write_flags,
1292                     MMU_DATA_STORE, prot & PAGE_WRITE);
1293 
1294     copy_tlb_helper_locked(te, &tn);
1295     tlb_n_used_entries_inc(env, mmu_idx);
1296     qemu_spin_unlock(&tlb->c.lock);
1297 }
1298 
1299 void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
1300                              hwaddr paddr, MemTxAttrs attrs, int prot,
1301                              int mmu_idx, uint64_t size)
1302 {
1303     CPUTLBEntryFull full = {
1304         .phys_addr = paddr,
1305         .attrs = attrs,
1306         .prot = prot,
1307         .lg_page_size = ctz64(size)
1308     };
1309 
1310     assert(is_power_of_2(size));
1311     tlb_set_page_full(cpu, mmu_idx, addr, &full);
1312 }
1313 
1314 void tlb_set_page(CPUState *cpu, vaddr addr,
1315                   hwaddr paddr, int prot,
1316                   int mmu_idx, uint64_t size)
1317 {
1318     tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
1319                             prot, mmu_idx, size);
1320 }
1321 
1322 /*
1323  * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
1324  * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
1325  * be discarded and looked up again (e.g. via tlb_entry()).
1326  */
1327 static void tlb_fill(CPUState *cpu, vaddr addr, int size,
1328                      MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1329 {
1330     bool ok;
1331 
1332     /*
1333      * This is not a probe, so only valid return is success; failure
1334      * should result in exception + longjmp to the cpu loop.
1335      */
1336     ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
1337                                     access_type, mmu_idx, false, retaddr);
1338     assert(ok);
1339 }
1340 
1341 static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
1342                                         MMUAccessType access_type,
1343                                         int mmu_idx, uintptr_t retaddr)
1344 {
1345     cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
1346                                           mmu_idx, retaddr);
1347 }
1348 
1349 static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
1350                                           vaddr addr, unsigned size,
1351                                           MMUAccessType access_type,
1352                                           int mmu_idx, MemTxAttrs attrs,
1353                                           MemTxResult response,
1354                                           uintptr_t retaddr)
1355 {
1356     CPUClass *cc = CPU_GET_CLASS(cpu);
1357 
1358     if (!cpu->ignore_memory_transaction_failures &&
1359         cc->tcg_ops->do_transaction_failed) {
1360         cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
1361                                            access_type, mmu_idx, attrs,
1362                                            response, retaddr);
1363     }
1364 }
1365 
1366 static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
1367                          int mmu_idx, vaddr addr, uintptr_t retaddr,
1368                          MMUAccessType access_type, MemOp op)
1369 {
1370     CPUState *cpu = env_cpu(env);
1371     hwaddr mr_offset;
1372     MemoryRegionSection *section;
1373     MemoryRegion *mr;
1374     uint64_t val;
1375     MemTxResult r;
1376 
1377     section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
1378     mr = section->mr;
1379     mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
1380     cpu->mem_io_pc = retaddr;
1381     if (!cpu->can_do_io) {
1382         cpu_io_recompile(cpu, retaddr);
1383     }
1384 
1385     {
1386         QEMU_IOTHREAD_LOCK_GUARD();
1387         r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
1388     }
1389 
1390     if (r != MEMTX_OK) {
1391         hwaddr physaddr = mr_offset +
1392             section->offset_within_address_space -
1393             section->offset_within_region;
1394 
1395         cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
1396                                mmu_idx, full->attrs, r, retaddr);
1397     }
1398     return val;
1399 }
1400 
1401 /*
1402  * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
1403  * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
1404  * because of the side effect of io_writex changing memory layout.
1405  */
1406 static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
1407                             hwaddr mr_offset)
1408 {
1409 #ifdef CONFIG_PLUGIN
1410     SavedIOTLB *saved = &cs->saved_iotlb;
1411     saved->section = section;
1412     saved->mr_offset = mr_offset;
1413 #endif
1414 }
1415 
1416 static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
1417                       int mmu_idx, uint64_t val, vaddr addr,
1418                       uintptr_t retaddr, MemOp op)
1419 {
1420     CPUState *cpu = env_cpu(env);
1421     hwaddr mr_offset;
1422     MemoryRegionSection *section;
1423     MemoryRegion *mr;
1424     MemTxResult r;
1425 
1426     section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
1427     mr = section->mr;
1428     mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
1429     if (!cpu->can_do_io) {
1430         cpu_io_recompile(cpu, retaddr);
1431     }
1432     cpu->mem_io_pc = retaddr;
1433 
1434     /*
1435      * The memory_region_dispatch may trigger a flush/resize
1436      * so for plugins we save the iotlb_data just in case.
1437      */
1438     save_iotlb_data(cpu, section, mr_offset);
1439 
1440     {
1441         QEMU_IOTHREAD_LOCK_GUARD();
1442         r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
1443     }
1444 
1445     if (r != MEMTX_OK) {
1446         hwaddr physaddr = mr_offset +
1447             section->offset_within_address_space -
1448             section->offset_within_region;
1449 
1450         cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
1451                                MMU_DATA_STORE, mmu_idx, full->attrs, r,
1452                                retaddr);
1453     }
1454 }
1455 
1456 /* Return true if ADDR is present in the victim tlb, and has been copied
1457    back to the main tlb.  */
1458 static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
1459                            MMUAccessType access_type, vaddr page)
1460 {
1461     size_t vidx;
1462 
1463     assert_cpu_is_self(env_cpu(env));
1464     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1465         CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
1466         uint64_t cmp = tlb_read_idx(vtlb, access_type);
1467 
1468         if (cmp == page) {
1469             /* Found entry in victim tlb, swap tlb and iotlb.  */
1470             CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
1471 
1472             qemu_spin_lock(&env_tlb(env)->c.lock);
1473             copy_tlb_helper_locked(&tmptlb, tlb);
1474             copy_tlb_helper_locked(tlb, vtlb);
1475             copy_tlb_helper_locked(vtlb, &tmptlb);
1476             qemu_spin_unlock(&env_tlb(env)->c.lock);
1477 
1478             CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1479             CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx];
1480             CPUTLBEntryFull tmpf;
1481             tmpf = *f1; *f1 = *f2; *f2 = tmpf;
1482             return true;
1483         }
1484     }
1485     return false;
1486 }
1487 
1488 static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1489                            CPUTLBEntryFull *full, uintptr_t retaddr)
1490 {
1491     ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
1492 
1493     trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1494 
1495     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1496         tb_invalidate_phys_range_fast(ram_addr, size, retaddr);
1497     }
1498 
1499     /*
1500      * Set both VGA and migration bits for simplicity and to remove
1501      * the notdirty callback faster.
1502      */
1503     cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1504 
1505     /* We remove the notdirty callback only if the code has been flushed. */
1506     if (!cpu_physical_memory_is_clean(ram_addr)) {
1507         trace_memory_notdirty_set_dirty(mem_vaddr);
1508         tlb_set_dirty(cpu, mem_vaddr);
1509     }
1510 }
1511 
1512 static int probe_access_internal(CPUArchState *env, vaddr addr,
1513                                  int fault_size, MMUAccessType access_type,
1514                                  int mmu_idx, bool nonfault,
1515                                  void **phost, CPUTLBEntryFull **pfull,
1516                                  uintptr_t retaddr, bool check_mem_cbs)
1517 {
1518     uintptr_t index = tlb_index(env, mmu_idx, addr);
1519     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1520     uint64_t tlb_addr = tlb_read_idx(entry, access_type);
1521     vaddr page_addr = addr & TARGET_PAGE_MASK;
1522     int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
1523     bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(env_cpu(env));
1524     CPUTLBEntryFull *full;
1525 
1526     if (!tlb_hit_page(tlb_addr, page_addr)) {
1527         if (!victim_tlb_hit(env, mmu_idx, index, access_type, page_addr)) {
1528             CPUState *cs = env_cpu(env);
1529 
1530             if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
1531                                            mmu_idx, nonfault, retaddr)) {
1532                 /* Non-faulting page table read failed.  */
1533                 *phost = NULL;
1534                 *pfull = NULL;
1535                 return TLB_INVALID_MASK;
1536             }
1537 
1538             /* TLB resize via tlb_fill may have moved the entry.  */
1539             index = tlb_index(env, mmu_idx, addr);
1540             entry = tlb_entry(env, mmu_idx, addr);
1541 
1542             /*
1543              * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
1544              * to force the next access through tlb_fill.  We've just
1545              * called tlb_fill, so we know that this entry *is* valid.
1546              */
1547             flags &= ~TLB_INVALID_MASK;
1548         }
1549         tlb_addr = tlb_read_idx(entry, access_type);
1550     }
1551     flags &= tlb_addr;
1552 
1553     *pfull = full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1554     flags |= full->slow_flags[access_type];
1555 
1556     /* Fold all "mmio-like" bits into TLB_MMIO.  This is not RAM.  */
1557     if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))
1558         ||
1559         (access_type != MMU_INST_FETCH && force_mmio)) {
1560         *phost = NULL;
1561         return TLB_MMIO;
1562     }
1563 
1564     /* Everything else is RAM. */
1565     *phost = (void *)((uintptr_t)addr + entry->addend);
1566     return flags;
1567 }
1568 
1569 int probe_access_full(CPUArchState *env, vaddr addr, int size,
1570                       MMUAccessType access_type, int mmu_idx,
1571                       bool nonfault, void **phost, CPUTLBEntryFull **pfull,
1572                       uintptr_t retaddr)
1573 {
1574     int flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1575                                       nonfault, phost, pfull, retaddr, true);
1576 
1577     /* Handle clean RAM pages.  */
1578     if (unlikely(flags & TLB_NOTDIRTY)) {
1579         notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
1580         flags &= ~TLB_NOTDIRTY;
1581     }
1582 
1583     return flags;
1584 }
1585 
1586 int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
1587                           MMUAccessType access_type, int mmu_idx,
1588                           void **phost, CPUTLBEntryFull **pfull)
1589 {
1590     void *discard_phost;
1591     CPUTLBEntryFull *discard_tlb;
1592 
1593     /* privately handle users that don't need full results */
1594     phost = phost ? phost : &discard_phost;
1595     pfull = pfull ? pfull : &discard_tlb;
1596 
1597     int flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1598                                       true, phost, pfull, 0, false);
1599 
1600     /* Handle clean RAM pages.  */
1601     if (unlikely(flags & TLB_NOTDIRTY)) {
1602         notdirty_write(env_cpu(env), addr, 1, *pfull, 0);
1603         flags &= ~TLB_NOTDIRTY;
1604     }
1605 
1606     return flags;
1607 }
1608 
1609 int probe_access_flags(CPUArchState *env, vaddr addr, int size,
1610                        MMUAccessType access_type, int mmu_idx,
1611                        bool nonfault, void **phost, uintptr_t retaddr)
1612 {
1613     CPUTLBEntryFull *full;
1614     int flags;
1615 
1616     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1617 
1618     flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1619                                   nonfault, phost, &full, retaddr, true);
1620 
1621     /* Handle clean RAM pages. */
1622     if (unlikely(flags & TLB_NOTDIRTY)) {
1623         notdirty_write(env_cpu(env), addr, 1, full, retaddr);
1624         flags &= ~TLB_NOTDIRTY;
1625     }
1626 
1627     return flags;
1628 }
1629 
1630 void *probe_access(CPUArchState *env, vaddr addr, int size,
1631                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1632 {
1633     CPUTLBEntryFull *full;
1634     void *host;
1635     int flags;
1636 
1637     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1638 
1639     flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1640                                   false, &host, &full, retaddr, true);
1641 
1642     /* Per the interface, size == 0 merely faults the access. */
1643     if (size == 0) {
1644         return NULL;
1645     }
1646 
1647     if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
1648         /* Handle watchpoints.  */
1649         if (flags & TLB_WATCHPOINT) {
1650             int wp_access = (access_type == MMU_DATA_STORE
1651                              ? BP_MEM_WRITE : BP_MEM_READ);
1652             cpu_check_watchpoint(env_cpu(env), addr, size,
1653                                  full->attrs, wp_access, retaddr);
1654         }
1655 
1656         /* Handle clean RAM pages.  */
1657         if (flags & TLB_NOTDIRTY) {
1658             notdirty_write(env_cpu(env), addr, 1, full, retaddr);
1659         }
1660     }
1661 
1662     return host;
1663 }
1664 
1665 void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1666                         MMUAccessType access_type, int mmu_idx)
1667 {
1668     CPUTLBEntryFull *full;
1669     void *host;
1670     int flags;
1671 
1672     flags = probe_access_internal(env, addr, 0, access_type,
1673                                   mmu_idx, true, &host, &full, 0, false);
1674 
1675     /* No combination of flags are expected by the caller. */
1676     return flags ? NULL : host;
1677 }
1678 
1679 /*
1680  * Return a ram_addr_t for the virtual address for execution.
1681  *
1682  * Return -1 if we can't translate and execute from an entire page
1683  * of RAM.  This will force us to execute by loading and translating
1684  * one insn at a time, without caching.
1685  *
1686  * NOTE: This function will trigger an exception if the page is
1687  * not executable.
1688  */
1689 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
1690                                         void **hostp)
1691 {
1692     CPUTLBEntryFull *full;
1693     void *p;
1694 
1695     (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
1696                                 cpu_mmu_index(env, true), false,
1697                                 &p, &full, 0, false);
1698     if (p == NULL) {
1699         return -1;
1700     }
1701 
1702     if (full->lg_page_size < TARGET_PAGE_BITS) {
1703         return -1;
1704     }
1705 
1706     if (hostp) {
1707         *hostp = p;
1708     }
1709     return qemu_ram_addr_from_host_nofail(p);
1710 }
1711 
1712 /* Load/store with atomicity primitives. */
1713 #include "ldst_atomicity.c.inc"
1714 
1715 #ifdef CONFIG_PLUGIN
1716 /*
1717  * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1718  * This should be a hot path as we will have just looked this path up
1719  * in the softmmu lookup code (or helper). We don't handle re-fills or
1720  * checking the victim table. This is purely informational.
1721  *
1722  * This almost never fails as the memory access being instrumented
1723  * should have just filled the TLB. The one corner case is io_writex
1724  * which can cause TLB flushes and potential resizing of the TLBs
1725  * losing the information we need. In those cases we need to recover
1726  * data from a copy of the CPUTLBEntryFull. As long as this always occurs
1727  * from the same thread (which a mem callback will be) this is safe.
1728  */
1729 
1730 bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
1731                        bool is_store, struct qemu_plugin_hwaddr *data)
1732 {
1733     CPUArchState *env = cpu->env_ptr;
1734     CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1735     uintptr_t index = tlb_index(env, mmu_idx, addr);
1736     uint64_t tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
1737 
1738     if (likely(tlb_hit(tlb_addr, addr))) {
1739         /* We must have an iotlb entry for MMIO */
1740         if (tlb_addr & TLB_MMIO) {
1741             CPUTLBEntryFull *full;
1742             full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1743             data->is_io = true;
1744             data->v.io.section =
1745                 iotlb_to_section(cpu, full->xlat_section, full->attrs);
1746             data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
1747         } else {
1748             data->is_io = false;
1749             data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1750         }
1751         return true;
1752     } else {
1753         SavedIOTLB *saved = &cpu->saved_iotlb;
1754         data->is_io = true;
1755         data->v.io.section = saved->section;
1756         data->v.io.offset = saved->mr_offset;
1757         return true;
1758     }
1759 }
1760 
1761 #endif
1762 
1763 /*
1764  * Probe for a load/store operation.
1765  * Return the host address and into @flags.
1766  */
1767 
1768 typedef struct MMULookupPageData {
1769     CPUTLBEntryFull *full;
1770     void *haddr;
1771     vaddr addr;
1772     int flags;
1773     int size;
1774 } MMULookupPageData;
1775 
1776 typedef struct MMULookupLocals {
1777     MMULookupPageData page[2];
1778     MemOp memop;
1779     int mmu_idx;
1780 } MMULookupLocals;
1781 
1782 /**
1783  * mmu_lookup1: translate one page
1784  * @env: cpu context
1785  * @data: lookup parameters
1786  * @mmu_idx: virtual address context
1787  * @access_type: load/store/code
1788  * @ra: return address into tcg generated code, or 0
1789  *
1790  * Resolve the translation for the one page at @data.addr, filling in
1791  * the rest of @data with the results.  If the translation fails,
1792  * tlb_fill will longjmp out.  Return true if the softmmu tlb for
1793  * @mmu_idx may have resized.
1794  */
1795 static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data,
1796                         int mmu_idx, MMUAccessType access_type, uintptr_t ra)
1797 {
1798     vaddr addr = data->addr;
1799     uintptr_t index = tlb_index(env, mmu_idx, addr);
1800     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1801     uint64_t tlb_addr = tlb_read_idx(entry, access_type);
1802     bool maybe_resized = false;
1803     CPUTLBEntryFull *full;
1804     int flags;
1805 
1806     /* If the TLB entry is for a different page, reload and try again.  */
1807     if (!tlb_hit(tlb_addr, addr)) {
1808         if (!victim_tlb_hit(env, mmu_idx, index, access_type,
1809                             addr & TARGET_PAGE_MASK)) {
1810             tlb_fill(env_cpu(env), addr, data->size, access_type, mmu_idx, ra);
1811             maybe_resized = true;
1812             index = tlb_index(env, mmu_idx, addr);
1813             entry = tlb_entry(env, mmu_idx, addr);
1814         }
1815         tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
1816     }
1817 
1818     full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
1819     flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
1820     flags |= full->slow_flags[access_type];
1821 
1822     data->full = full;
1823     data->flags = flags;
1824     /* Compute haddr speculatively; depending on flags it might be invalid. */
1825     data->haddr = (void *)((uintptr_t)addr + entry->addend);
1826 
1827     return maybe_resized;
1828 }
1829 
1830 /**
1831  * mmu_watch_or_dirty
1832  * @env: cpu context
1833  * @data: lookup parameters
1834  * @access_type: load/store/code
1835  * @ra: return address into tcg generated code, or 0
1836  *
1837  * Trigger watchpoints for @data.addr:@data.size;
1838  * record writes to protected clean pages.
1839  */
1840 static void mmu_watch_or_dirty(CPUArchState *env, MMULookupPageData *data,
1841                                MMUAccessType access_type, uintptr_t ra)
1842 {
1843     CPUTLBEntryFull *full = data->full;
1844     vaddr addr = data->addr;
1845     int flags = data->flags;
1846     int size = data->size;
1847 
1848     /* On watchpoint hit, this will longjmp out.  */
1849     if (flags & TLB_WATCHPOINT) {
1850         int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ;
1851         cpu_check_watchpoint(env_cpu(env), addr, size, full->attrs, wp, ra);
1852         flags &= ~TLB_WATCHPOINT;
1853     }
1854 
1855     /* Note that notdirty is only set for writes. */
1856     if (flags & TLB_NOTDIRTY) {
1857         notdirty_write(env_cpu(env), addr, size, full, ra);
1858         flags &= ~TLB_NOTDIRTY;
1859     }
1860     data->flags = flags;
1861 }
1862 
1863 /**
1864  * mmu_lookup: translate page(s)
1865  * @env: cpu context
1866  * @addr: virtual address
1867  * @oi: combined mmu_idx and MemOp
1868  * @ra: return address into tcg generated code, or 0
1869  * @access_type: load/store/code
1870  * @l: output result
1871  *
1872  * Resolve the translation for the page(s) beginning at @addr, for MemOp.size
1873  * bytes.  Return true if the lookup crosses a page boundary.
1874  */
1875 static bool mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
1876                        uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
1877 {
1878     unsigned a_bits;
1879     bool crosspage;
1880     int flags;
1881 
1882     l->memop = get_memop(oi);
1883     l->mmu_idx = get_mmuidx(oi);
1884 
1885     tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
1886 
1887     /* Handle CPU specific unaligned behaviour */
1888     a_bits = get_alignment_bits(l->memop);
1889     if (addr & ((1 << a_bits) - 1)) {
1890         cpu_unaligned_access(env_cpu(env), addr, type, l->mmu_idx, ra);
1891     }
1892 
1893     l->page[0].addr = addr;
1894     l->page[0].size = memop_size(l->memop);
1895     l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
1896     l->page[1].size = 0;
1897     crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
1898 
1899     if (likely(!crosspage)) {
1900         mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra);
1901 
1902         flags = l->page[0].flags;
1903         if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1904             mmu_watch_or_dirty(env, &l->page[0], type, ra);
1905         }
1906         if (unlikely(flags & TLB_BSWAP)) {
1907             l->memop ^= MO_BSWAP;
1908         }
1909     } else {
1910         /* Finish compute of page crossing. */
1911         int size0 = l->page[1].addr - addr;
1912         l->page[1].size = l->page[0].size - size0;
1913         l->page[0].size = size0;
1914 
1915         /*
1916          * Lookup both pages, recognizing exceptions from either.  If the
1917          * second lookup potentially resized, refresh first CPUTLBEntryFull.
1918          */
1919         mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra);
1920         if (mmu_lookup1(env, &l->page[1], l->mmu_idx, type, ra)) {
1921             uintptr_t index = tlb_index(env, l->mmu_idx, addr);
1922             l->page[0].full = &env_tlb(env)->d[l->mmu_idx].fulltlb[index];
1923         }
1924 
1925         flags = l->page[0].flags | l->page[1].flags;
1926         if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1927             mmu_watch_or_dirty(env, &l->page[0], type, ra);
1928             mmu_watch_or_dirty(env, &l->page[1], type, ra);
1929         }
1930 
1931         /*
1932          * Since target/sparc is the only user of TLB_BSWAP, and all
1933          * Sparc accesses are aligned, any treatment across two pages
1934          * would be arbitrary.  Refuse it until there's a use.
1935          */
1936         tcg_debug_assert((flags & TLB_BSWAP) == 0);
1937     }
1938 
1939     return crosspage;
1940 }
1941 
1942 /*
1943  * Probe for an atomic operation.  Do not allow unaligned operations,
1944  * or io operations to proceed.  Return the host address.
1945  */
1946 static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
1947                                int size, uintptr_t retaddr)
1948 {
1949     uintptr_t mmu_idx = get_mmuidx(oi);
1950     MemOp mop = get_memop(oi);
1951     int a_bits = get_alignment_bits(mop);
1952     uintptr_t index;
1953     CPUTLBEntry *tlbe;
1954     vaddr tlb_addr;
1955     void *hostaddr;
1956     CPUTLBEntryFull *full;
1957 
1958     tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1959 
1960     /* Adjust the given return address.  */
1961     retaddr -= GETPC_ADJ;
1962 
1963     /* Enforce guest required alignment.  */
1964     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1965         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1966         cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1967                              mmu_idx, retaddr);
1968     }
1969 
1970     /* Enforce qemu required alignment.  */
1971     if (unlikely(addr & (size - 1))) {
1972         /* We get here if guest alignment was not requested,
1973            or was not enforced by cpu_unaligned_access above.
1974            We might widen the access and emulate, but for now
1975            mark an exception and exit the cpu loop.  */
1976         goto stop_the_world;
1977     }
1978 
1979     index = tlb_index(env, mmu_idx, addr);
1980     tlbe = tlb_entry(env, mmu_idx, addr);
1981 
1982     /* Check TLB entry and enforce page permissions.  */
1983     tlb_addr = tlb_addr_write(tlbe);
1984     if (!tlb_hit(tlb_addr, addr)) {
1985         if (!victim_tlb_hit(env, mmu_idx, index, MMU_DATA_STORE,
1986                             addr & TARGET_PAGE_MASK)) {
1987             tlb_fill(env_cpu(env), addr, size,
1988                      MMU_DATA_STORE, mmu_idx, retaddr);
1989             index = tlb_index(env, mmu_idx, addr);
1990             tlbe = tlb_entry(env, mmu_idx, addr);
1991         }
1992         tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1993     }
1994 
1995     /*
1996      * Let the guest notice RMW on a write-only page.
1997      * We have just verified that the page is writable.
1998      * Subpage lookups may have left TLB_INVALID_MASK set,
1999      * but addr_read will only be -1 if PAGE_READ was unset.
2000      */
2001     if (unlikely(tlbe->addr_read == -1)) {
2002         tlb_fill(env_cpu(env), addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
2003         /*
2004          * Since we don't support reads and writes to different
2005          * addresses, and we do have the proper page loaded for
2006          * write, this shouldn't ever return.  But just in case,
2007          * handle via stop-the-world.
2008          */
2009         goto stop_the_world;
2010     }
2011     /* Collect tlb flags for read. */
2012     tlb_addr |= tlbe->addr_read;
2013 
2014     /* Notice an IO access or a needs-MMU-lookup access */
2015     if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) {
2016         /* There's really nothing that can be done to
2017            support this apart from stop-the-world.  */
2018         goto stop_the_world;
2019     }
2020 
2021     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
2022     full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
2023 
2024     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
2025         notdirty_write(env_cpu(env), addr, size, full, retaddr);
2026     }
2027 
2028     if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
2029         int wp_flags = 0;
2030 
2031         if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) {
2032             wp_flags |= BP_MEM_WRITE;
2033         }
2034         if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) {
2035             wp_flags |= BP_MEM_READ;
2036         }
2037         if (wp_flags) {
2038             cpu_check_watchpoint(env_cpu(env), addr, size,
2039                                  full->attrs, wp_flags, retaddr);
2040         }
2041     }
2042 
2043     return hostaddr;
2044 
2045  stop_the_world:
2046     cpu_loop_exit_atomic(env_cpu(env), retaddr);
2047 }
2048 
2049 /*
2050  * Load Helpers
2051  *
2052  * We support two different access types. SOFTMMU_CODE_ACCESS is
2053  * specifically for reading instructions from system memory. It is
2054  * called by the translation loop and in some helpers where the code
2055  * is disassembled. It shouldn't be called directly by guest code.
2056  *
2057  * For the benefit of TCG generated code, we want to avoid the
2058  * complication of ABI-specific return type promotion and always
2059  * return a value extended to the register size of the host. This is
2060  * tcg_target_long, except in the case of a 32-bit host and 64-bit
2061  * data, and for that we always have uint64_t.
2062  *
2063  * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
2064  */
2065 
2066 /**
2067  * do_ld_mmio_beN:
2068  * @env: cpu context
2069  * @p: translation parameters
2070  * @ret_be: accumulated data
2071  * @mmu_idx: virtual address context
2072  * @ra: return address into tcg generated code, or 0
2073  *
2074  * Load @p->size bytes from @p->addr, which is memory-mapped i/o.
2075  * The bytes are concatenated in big-endian order with @ret_be.
2076  */
2077 static uint64_t do_ld_mmio_beN(CPUArchState *env, MMULookupPageData *p,
2078                                uint64_t ret_be, int mmu_idx,
2079                                MMUAccessType type, uintptr_t ra)
2080 {
2081     CPUTLBEntryFull *full = p->full;
2082     vaddr addr = p->addr;
2083     int i, size = p->size;
2084 
2085     QEMU_IOTHREAD_LOCK_GUARD();
2086     for (i = 0; i < size; i++) {
2087         uint8_t x = io_readx(env, full, mmu_idx, addr + i, ra, type, MO_UB);
2088         ret_be = (ret_be << 8) | x;
2089     }
2090     return ret_be;
2091 }
2092 
2093 /**
2094  * do_ld_bytes_beN
2095  * @p: translation parameters
2096  * @ret_be: accumulated data
2097  *
2098  * Load @p->size bytes from @p->haddr, which is RAM.
2099  * The bytes to concatenated in big-endian order with @ret_be.
2100  */
2101 static uint64_t do_ld_bytes_beN(MMULookupPageData *p, uint64_t ret_be)
2102 {
2103     uint8_t *haddr = p->haddr;
2104     int i, size = p->size;
2105 
2106     for (i = 0; i < size; i++) {
2107         ret_be = (ret_be << 8) | haddr[i];
2108     }
2109     return ret_be;
2110 }
2111 
2112 /**
2113  * do_ld_parts_beN
2114  * @p: translation parameters
2115  * @ret_be: accumulated data
2116  *
2117  * As do_ld_bytes_beN, but atomically on each aligned part.
2118  */
2119 static uint64_t do_ld_parts_beN(MMULookupPageData *p, uint64_t ret_be)
2120 {
2121     void *haddr = p->haddr;
2122     int size = p->size;
2123 
2124     do {
2125         uint64_t x;
2126         int n;
2127 
2128         /*
2129          * Find minimum of alignment and size.
2130          * This is slightly stronger than required by MO_ATOM_SUBALIGN, which
2131          * would have only checked the low bits of addr|size once at the start,
2132          * but is just as easy.
2133          */
2134         switch (((uintptr_t)haddr | size) & 7) {
2135         case 4:
2136             x = cpu_to_be32(load_atomic4(haddr));
2137             ret_be = (ret_be << 32) | x;
2138             n = 4;
2139             break;
2140         case 2:
2141         case 6:
2142             x = cpu_to_be16(load_atomic2(haddr));
2143             ret_be = (ret_be << 16) | x;
2144             n = 2;
2145             break;
2146         default:
2147             x = *(uint8_t *)haddr;
2148             ret_be = (ret_be << 8) | x;
2149             n = 1;
2150             break;
2151         case 0:
2152             g_assert_not_reached();
2153         }
2154         haddr += n;
2155         size -= n;
2156     } while (size != 0);
2157     return ret_be;
2158 }
2159 
2160 /**
2161  * do_ld_parts_be4
2162  * @p: translation parameters
2163  * @ret_be: accumulated data
2164  *
2165  * As do_ld_bytes_beN, but with one atomic load.
2166  * Four aligned bytes are guaranteed to cover the load.
2167  */
2168 static uint64_t do_ld_whole_be4(MMULookupPageData *p, uint64_t ret_be)
2169 {
2170     int o = p->addr & 3;
2171     uint32_t x = load_atomic4(p->haddr - o);
2172 
2173     x = cpu_to_be32(x);
2174     x <<= o * 8;
2175     x >>= (4 - p->size) * 8;
2176     return (ret_be << (p->size * 8)) | x;
2177 }
2178 
2179 /**
2180  * do_ld_parts_be8
2181  * @p: translation parameters
2182  * @ret_be: accumulated data
2183  *
2184  * As do_ld_bytes_beN, but with one atomic load.
2185  * Eight aligned bytes are guaranteed to cover the load.
2186  */
2187 static uint64_t do_ld_whole_be8(CPUArchState *env, uintptr_t ra,
2188                                 MMULookupPageData *p, uint64_t ret_be)
2189 {
2190     int o = p->addr & 7;
2191     uint64_t x = load_atomic8_or_exit(env, ra, p->haddr - o);
2192 
2193     x = cpu_to_be64(x);
2194     x <<= o * 8;
2195     x >>= (8 - p->size) * 8;
2196     return (ret_be << (p->size * 8)) | x;
2197 }
2198 
2199 /**
2200  * do_ld_parts_be16
2201  * @p: translation parameters
2202  * @ret_be: accumulated data
2203  *
2204  * As do_ld_bytes_beN, but with one atomic load.
2205  * 16 aligned bytes are guaranteed to cover the load.
2206  */
2207 static Int128 do_ld_whole_be16(CPUArchState *env, uintptr_t ra,
2208                                MMULookupPageData *p, uint64_t ret_be)
2209 {
2210     int o = p->addr & 15;
2211     Int128 x, y = load_atomic16_or_exit(env, ra, p->haddr - o);
2212     int size = p->size;
2213 
2214     if (!HOST_BIG_ENDIAN) {
2215         y = bswap128(y);
2216     }
2217     y = int128_lshift(y, o * 8);
2218     y = int128_urshift(y, (16 - size) * 8);
2219     x = int128_make64(ret_be);
2220     x = int128_lshift(x, size * 8);
2221     return int128_or(x, y);
2222 }
2223 
2224 /*
2225  * Wrapper for the above.
2226  */
2227 static uint64_t do_ld_beN(CPUArchState *env, MMULookupPageData *p,
2228                           uint64_t ret_be, int mmu_idx, MMUAccessType type,
2229                           MemOp mop, uintptr_t ra)
2230 {
2231     MemOp atom;
2232     unsigned tmp, half_size;
2233 
2234     if (unlikely(p->flags & TLB_MMIO)) {
2235         return do_ld_mmio_beN(env, p, ret_be, mmu_idx, type, ra);
2236     }
2237 
2238     /*
2239      * It is a given that we cross a page and therefore there is no
2240      * atomicity for the load as a whole, but subobjects may need attention.
2241      */
2242     atom = mop & MO_ATOM_MASK;
2243     switch (atom) {
2244     case MO_ATOM_SUBALIGN:
2245         return do_ld_parts_beN(p, ret_be);
2246 
2247     case MO_ATOM_IFALIGN_PAIR:
2248     case MO_ATOM_WITHIN16_PAIR:
2249         tmp = mop & MO_SIZE;
2250         tmp = tmp ? tmp - 1 : 0;
2251         half_size = 1 << tmp;
2252         if (atom == MO_ATOM_IFALIGN_PAIR
2253             ? p->size == half_size
2254             : p->size >= half_size) {
2255             if (!HAVE_al8_fast && p->size < 4) {
2256                 return do_ld_whole_be4(p, ret_be);
2257             } else {
2258                 return do_ld_whole_be8(env, ra, p, ret_be);
2259             }
2260         }
2261         /* fall through */
2262 
2263     case MO_ATOM_IFALIGN:
2264     case MO_ATOM_WITHIN16:
2265     case MO_ATOM_NONE:
2266         return do_ld_bytes_beN(p, ret_be);
2267 
2268     default:
2269         g_assert_not_reached();
2270     }
2271 }
2272 
2273 /*
2274  * Wrapper for the above, for 8 < size < 16.
2275  */
2276 static Int128 do_ld16_beN(CPUArchState *env, MMULookupPageData *p,
2277                           uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra)
2278 {
2279     int size = p->size;
2280     uint64_t b;
2281     MemOp atom;
2282 
2283     if (unlikely(p->flags & TLB_MMIO)) {
2284         p->size = size - 8;
2285         a = do_ld_mmio_beN(env, p, a, mmu_idx, MMU_DATA_LOAD, ra);
2286         p->addr += p->size;
2287         p->size = 8;
2288         b = do_ld_mmio_beN(env, p, 0, mmu_idx, MMU_DATA_LOAD, ra);
2289         return int128_make128(b, a);
2290     }
2291 
2292     /*
2293      * It is a given that we cross a page and therefore there is no
2294      * atomicity for the load as a whole, but subobjects may need attention.
2295      */
2296     atom = mop & MO_ATOM_MASK;
2297     switch (atom) {
2298     case MO_ATOM_SUBALIGN:
2299         p->size = size - 8;
2300         a = do_ld_parts_beN(p, a);
2301         p->haddr += size - 8;
2302         p->size = 8;
2303         b = do_ld_parts_beN(p, 0);
2304         break;
2305 
2306     case MO_ATOM_WITHIN16_PAIR:
2307         /* Since size > 8, this is the half that must be atomic. */
2308         return do_ld_whole_be16(env, ra, p, a);
2309 
2310     case MO_ATOM_IFALIGN_PAIR:
2311         /*
2312          * Since size > 8, both halves are misaligned,
2313          * and so neither is atomic.
2314          */
2315     case MO_ATOM_IFALIGN:
2316     case MO_ATOM_WITHIN16:
2317     case MO_ATOM_NONE:
2318         p->size = size - 8;
2319         a = do_ld_bytes_beN(p, a);
2320         b = ldq_be_p(p->haddr + size - 8);
2321         break;
2322 
2323     default:
2324         g_assert_not_reached();
2325     }
2326 
2327     return int128_make128(b, a);
2328 }
2329 
2330 static uint8_t do_ld_1(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
2331                        MMUAccessType type, uintptr_t ra)
2332 {
2333     if (unlikely(p->flags & TLB_MMIO)) {
2334         return io_readx(env, p->full, mmu_idx, p->addr, ra, type, MO_UB);
2335     } else {
2336         return *(uint8_t *)p->haddr;
2337     }
2338 }
2339 
2340 static uint16_t do_ld_2(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
2341                         MMUAccessType type, MemOp memop, uintptr_t ra)
2342 {
2343     uint64_t ret;
2344 
2345     if (unlikely(p->flags & TLB_MMIO)) {
2346         return io_readx(env, p->full, mmu_idx, p->addr, ra, type, memop);
2347     }
2348 
2349     /* Perform the load host endian, then swap if necessary. */
2350     ret = load_atom_2(env, ra, p->haddr, memop);
2351     if (memop & MO_BSWAP) {
2352         ret = bswap16(ret);
2353     }
2354     return ret;
2355 }
2356 
2357 static uint32_t do_ld_4(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
2358                         MMUAccessType type, MemOp memop, uintptr_t ra)
2359 {
2360     uint32_t ret;
2361 
2362     if (unlikely(p->flags & TLB_MMIO)) {
2363         return io_readx(env, p->full, mmu_idx, p->addr, ra, type, memop);
2364     }
2365 
2366     /* Perform the load host endian. */
2367     ret = load_atom_4(env, ra, p->haddr, memop);
2368     if (memop & MO_BSWAP) {
2369         ret = bswap32(ret);
2370     }
2371     return ret;
2372 }
2373 
2374 static uint64_t do_ld_8(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
2375                         MMUAccessType type, MemOp memop, uintptr_t ra)
2376 {
2377     uint64_t ret;
2378 
2379     if (unlikely(p->flags & TLB_MMIO)) {
2380         return io_readx(env, p->full, mmu_idx, p->addr, ra, type, memop);
2381     }
2382 
2383     /* Perform the load host endian. */
2384     ret = load_atom_8(env, ra, p->haddr, memop);
2385     if (memop & MO_BSWAP) {
2386         ret = bswap64(ret);
2387     }
2388     return ret;
2389 }
2390 
2391 static uint8_t do_ld1_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
2392                           uintptr_t ra, MMUAccessType access_type)
2393 {
2394     MMULookupLocals l;
2395     bool crosspage;
2396 
2397     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2398     crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
2399     tcg_debug_assert(!crosspage);
2400 
2401     return do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra);
2402 }
2403 
2404 tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
2405                                  MemOpIdx oi, uintptr_t retaddr)
2406 {
2407     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
2408     return do_ld1_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
2409 }
2410 
2411 static uint16_t do_ld2_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
2412                            uintptr_t ra, MMUAccessType access_type)
2413 {
2414     MMULookupLocals l;
2415     bool crosspage;
2416     uint16_t ret;
2417     uint8_t a, b;
2418 
2419     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2420     crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
2421     if (likely(!crosspage)) {
2422         return do_ld_2(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2423     }
2424 
2425     a = do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra);
2426     b = do_ld_1(env, &l.page[1], l.mmu_idx, access_type, ra);
2427 
2428     if ((l.memop & MO_BSWAP) == MO_LE) {
2429         ret = a | (b << 8);
2430     } else {
2431         ret = b | (a << 8);
2432     }
2433     return ret;
2434 }
2435 
2436 tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
2437                                  MemOpIdx oi, uintptr_t retaddr)
2438 {
2439     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
2440     return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
2441 }
2442 
2443 static uint32_t do_ld4_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
2444                            uintptr_t ra, MMUAccessType access_type)
2445 {
2446     MMULookupLocals l;
2447     bool crosspage;
2448     uint32_t ret;
2449 
2450     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2451     crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
2452     if (likely(!crosspage)) {
2453         return do_ld_4(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2454     }
2455 
2456     ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2457     ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
2458     if ((l.memop & MO_BSWAP) == MO_LE) {
2459         ret = bswap32(ret);
2460     }
2461     return ret;
2462 }
2463 
2464 tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
2465                                  MemOpIdx oi, uintptr_t retaddr)
2466 {
2467     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
2468     return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
2469 }
2470 
2471 static uint64_t do_ld8_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
2472                            uintptr_t ra, MMUAccessType access_type)
2473 {
2474     MMULookupLocals l;
2475     bool crosspage;
2476     uint64_t ret;
2477 
2478     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2479     crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
2480     if (likely(!crosspage)) {
2481         return do_ld_8(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
2482     }
2483 
2484     ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
2485     ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
2486     if ((l.memop & MO_BSWAP) == MO_LE) {
2487         ret = bswap64(ret);
2488     }
2489     return ret;
2490 }
2491 
2492 uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
2493                         MemOpIdx oi, uintptr_t retaddr)
2494 {
2495     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
2496     return do_ld8_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
2497 }
2498 
2499 /*
2500  * Provide signed versions of the load routines as well.  We can of course
2501  * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
2502  */
2503 
2504 tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
2505                                  MemOpIdx oi, uintptr_t retaddr)
2506 {
2507     return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr);
2508 }
2509 
2510 tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
2511                                  MemOpIdx oi, uintptr_t retaddr)
2512 {
2513     return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr);
2514 }
2515 
2516 tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
2517                                  MemOpIdx oi, uintptr_t retaddr)
2518 {
2519     return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
2520 }
2521 
2522 static Int128 do_ld16_mmu(CPUArchState *env, vaddr addr,
2523                           MemOpIdx oi, uintptr_t ra)
2524 {
2525     MMULookupLocals l;
2526     bool crosspage;
2527     uint64_t a, b;
2528     Int128 ret;
2529     int first;
2530 
2531     cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
2532     crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l);
2533     if (likely(!crosspage)) {
2534         /* Perform the load host endian. */
2535         if (unlikely(l.page[0].flags & TLB_MMIO)) {
2536             QEMU_IOTHREAD_LOCK_GUARD();
2537             a = io_readx(env, l.page[0].full, l.mmu_idx, addr,
2538                          ra, MMU_DATA_LOAD, MO_64);
2539             b = io_readx(env, l.page[0].full, l.mmu_idx, addr + 8,
2540                          ra, MMU_DATA_LOAD, MO_64);
2541             ret = int128_make128(HOST_BIG_ENDIAN ? b : a,
2542                                  HOST_BIG_ENDIAN ? a : b);
2543         } else {
2544             ret = load_atom_16(env, ra, l.page[0].haddr, l.memop);
2545         }
2546         if (l.memop & MO_BSWAP) {
2547             ret = bswap128(ret);
2548         }
2549         return ret;
2550     }
2551 
2552     first = l.page[0].size;
2553     if (first == 8) {
2554         MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64;
2555 
2556         a = do_ld_8(env, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
2557         b = do_ld_8(env, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
2558         if ((mop8 & MO_BSWAP) == MO_LE) {
2559             ret = int128_make128(a, b);
2560         } else {
2561             ret = int128_make128(b, a);
2562         }
2563         return ret;
2564     }
2565 
2566     if (first < 8) {
2567         a = do_ld_beN(env, &l.page[0], 0, l.mmu_idx,
2568                       MMU_DATA_LOAD, l.memop, ra);
2569         ret = do_ld16_beN(env, &l.page[1], a, l.mmu_idx, l.memop, ra);
2570     } else {
2571         ret = do_ld16_beN(env, &l.page[0], 0, l.mmu_idx, l.memop, ra);
2572         b = int128_getlo(ret);
2573         ret = int128_lshift(ret, l.page[1].size * 8);
2574         a = int128_gethi(ret);
2575         b = do_ld_beN(env, &l.page[1], b, l.mmu_idx,
2576                       MMU_DATA_LOAD, l.memop, ra);
2577         ret = int128_make128(b, a);
2578     }
2579     if ((l.memop & MO_BSWAP) == MO_LE) {
2580         ret = bswap128(ret);
2581     }
2582     return ret;
2583 }
2584 
2585 Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
2586                        uint32_t oi, uintptr_t retaddr)
2587 {
2588     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
2589     return do_ld16_mmu(env, addr, oi, retaddr);
2590 }
2591 
2592 Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi)
2593 {
2594     return helper_ld16_mmu(env, addr, oi, GETPC());
2595 }
2596 
2597 /*
2598  * Load helpers for cpu_ldst.h.
2599  */
2600 
2601 static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
2602 {
2603     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
2604 }
2605 
2606 uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
2607 {
2608     uint8_t ret;
2609 
2610     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB);
2611     ret = do_ld1_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
2612     plugin_load_cb(env, addr, oi);
2613     return ret;
2614 }
2615 
2616 uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
2617                      MemOpIdx oi, uintptr_t ra)
2618 {
2619     uint16_t ret;
2620 
2621     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
2622     ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
2623     plugin_load_cb(env, addr, oi);
2624     return ret;
2625 }
2626 
2627 uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
2628                      MemOpIdx oi, uintptr_t ra)
2629 {
2630     uint32_t ret;
2631 
2632     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
2633     ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
2634     plugin_load_cb(env, addr, oi);
2635     return ret;
2636 }
2637 
2638 uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
2639                      MemOpIdx oi, uintptr_t ra)
2640 {
2641     uint64_t ret;
2642 
2643     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
2644     ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
2645     plugin_load_cb(env, addr, oi);
2646     return ret;
2647 }
2648 
2649 Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
2650                     MemOpIdx oi, uintptr_t ra)
2651 {
2652     Int128 ret;
2653 
2654     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
2655     ret = do_ld16_mmu(env, addr, oi, ra);
2656     plugin_load_cb(env, addr, oi);
2657     return ret;
2658 }
2659 
2660 /*
2661  * Store Helpers
2662  */
2663 
2664 /**
2665  * do_st_mmio_leN:
2666  * @env: cpu context
2667  * @p: translation parameters
2668  * @val_le: data to store
2669  * @mmu_idx: virtual address context
2670  * @ra: return address into tcg generated code, or 0
2671  *
2672  * Store @p->size bytes at @p->addr, which is memory-mapped i/o.
2673  * The bytes to store are extracted in little-endian order from @val_le;
2674  * return the bytes of @val_le beyond @p->size that have not been stored.
2675  */
2676 static uint64_t do_st_mmio_leN(CPUArchState *env, MMULookupPageData *p,
2677                                uint64_t val_le, int mmu_idx, uintptr_t ra)
2678 {
2679     CPUTLBEntryFull *full = p->full;
2680     vaddr addr = p->addr;
2681     int i, size = p->size;
2682 
2683     QEMU_IOTHREAD_LOCK_GUARD();
2684     for (i = 0; i < size; i++, val_le >>= 8) {
2685         io_writex(env, full, mmu_idx, val_le, addr + i, ra, MO_UB);
2686     }
2687     return val_le;
2688 }
2689 
2690 /*
2691  * Wrapper for the above.
2692  */
2693 static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p,
2694                           uint64_t val_le, int mmu_idx,
2695                           MemOp mop, uintptr_t ra)
2696 {
2697     MemOp atom;
2698     unsigned tmp, half_size;
2699 
2700     if (unlikely(p->flags & TLB_MMIO)) {
2701         return do_st_mmio_leN(env, p, val_le, mmu_idx, ra);
2702     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2703         return val_le >> (p->size * 8);
2704     }
2705 
2706     /*
2707      * It is a given that we cross a page and therefore there is no atomicity
2708      * for the store as a whole, but subobjects may need attention.
2709      */
2710     atom = mop & MO_ATOM_MASK;
2711     switch (atom) {
2712     case MO_ATOM_SUBALIGN:
2713         return store_parts_leN(p->haddr, p->size, val_le);
2714 
2715     case MO_ATOM_IFALIGN_PAIR:
2716     case MO_ATOM_WITHIN16_PAIR:
2717         tmp = mop & MO_SIZE;
2718         tmp = tmp ? tmp - 1 : 0;
2719         half_size = 1 << tmp;
2720         if (atom == MO_ATOM_IFALIGN_PAIR
2721             ? p->size == half_size
2722             : p->size >= half_size) {
2723             if (!HAVE_al8_fast && p->size <= 4) {
2724                 return store_whole_le4(p->haddr, p->size, val_le);
2725             } else if (HAVE_al8) {
2726                 return store_whole_le8(p->haddr, p->size, val_le);
2727             } else {
2728                 cpu_loop_exit_atomic(env_cpu(env), ra);
2729             }
2730         }
2731         /* fall through */
2732 
2733     case MO_ATOM_IFALIGN:
2734     case MO_ATOM_WITHIN16:
2735     case MO_ATOM_NONE:
2736         return store_bytes_leN(p->haddr, p->size, val_le);
2737 
2738     default:
2739         g_assert_not_reached();
2740     }
2741 }
2742 
2743 /*
2744  * Wrapper for the above, for 8 < size < 16.
2745  */
2746 static uint64_t do_st16_leN(CPUArchState *env, MMULookupPageData *p,
2747                             Int128 val_le, int mmu_idx,
2748                             MemOp mop, uintptr_t ra)
2749 {
2750     int size = p->size;
2751     MemOp atom;
2752 
2753     if (unlikely(p->flags & TLB_MMIO)) {
2754         p->size = 8;
2755         do_st_mmio_leN(env, p, int128_getlo(val_le), mmu_idx, ra);
2756         p->size = size - 8;
2757         p->addr += 8;
2758         return do_st_mmio_leN(env, p, int128_gethi(val_le), mmu_idx, ra);
2759     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2760         return int128_gethi(val_le) >> ((size - 8) * 8);
2761     }
2762 
2763     /*
2764      * It is a given that we cross a page and therefore there is no atomicity
2765      * for the store as a whole, but subobjects may need attention.
2766      */
2767     atom = mop & MO_ATOM_MASK;
2768     switch (atom) {
2769     case MO_ATOM_SUBALIGN:
2770         store_parts_leN(p->haddr, 8, int128_getlo(val_le));
2771         return store_parts_leN(p->haddr + 8, p->size - 8,
2772                                int128_gethi(val_le));
2773 
2774     case MO_ATOM_WITHIN16_PAIR:
2775         /* Since size > 8, this is the half that must be atomic. */
2776         if (!HAVE_ATOMIC128_RW) {
2777             cpu_loop_exit_atomic(env_cpu(env), ra);
2778         }
2779         return store_whole_le16(p->haddr, p->size, val_le);
2780 
2781     case MO_ATOM_IFALIGN_PAIR:
2782         /*
2783          * Since size > 8, both halves are misaligned,
2784          * and so neither is atomic.
2785          */
2786     case MO_ATOM_IFALIGN:
2787     case MO_ATOM_WITHIN16:
2788     case MO_ATOM_NONE:
2789         stq_le_p(p->haddr, int128_getlo(val_le));
2790         return store_bytes_leN(p->haddr + 8, p->size - 8,
2791                                int128_gethi(val_le));
2792 
2793     default:
2794         g_assert_not_reached();
2795     }
2796 }
2797 
2798 static void do_st_1(CPUArchState *env, MMULookupPageData *p, uint8_t val,
2799                     int mmu_idx, uintptr_t ra)
2800 {
2801     if (unlikely(p->flags & TLB_MMIO)) {
2802         io_writex(env, p->full, mmu_idx, val, p->addr, ra, MO_UB);
2803     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2804         /* nothing */
2805     } else {
2806         *(uint8_t *)p->haddr = val;
2807     }
2808 }
2809 
2810 static void do_st_2(CPUArchState *env, MMULookupPageData *p, uint16_t val,
2811                     int mmu_idx, MemOp memop, uintptr_t ra)
2812 {
2813     if (unlikely(p->flags & TLB_MMIO)) {
2814         io_writex(env, p->full, mmu_idx, val, p->addr, ra, memop);
2815     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2816         /* nothing */
2817     } else {
2818         /* Swap to host endian if necessary, then store. */
2819         if (memop & MO_BSWAP) {
2820             val = bswap16(val);
2821         }
2822         store_atom_2(env, ra, p->haddr, memop, val);
2823     }
2824 }
2825 
2826 static void do_st_4(CPUArchState *env, MMULookupPageData *p, uint32_t val,
2827                     int mmu_idx, MemOp memop, uintptr_t ra)
2828 {
2829     if (unlikely(p->flags & TLB_MMIO)) {
2830         io_writex(env, p->full, mmu_idx, val, p->addr, ra, memop);
2831     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2832         /* nothing */
2833     } else {
2834         /* Swap to host endian if necessary, then store. */
2835         if (memop & MO_BSWAP) {
2836             val = bswap32(val);
2837         }
2838         store_atom_4(env, ra, p->haddr, memop, val);
2839     }
2840 }
2841 
2842 static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val,
2843                     int mmu_idx, MemOp memop, uintptr_t ra)
2844 {
2845     if (unlikely(p->flags & TLB_MMIO)) {
2846         io_writex(env, p->full, mmu_idx, val, p->addr, ra, memop);
2847     } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
2848         /* nothing */
2849     } else {
2850         /* Swap to host endian if necessary, then store. */
2851         if (memop & MO_BSWAP) {
2852             val = bswap64(val);
2853         }
2854         store_atom_8(env, ra, p->haddr, memop, val);
2855     }
2856 }
2857 
2858 void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
2859                     MemOpIdx oi, uintptr_t ra)
2860 {
2861     MMULookupLocals l;
2862     bool crosspage;
2863 
2864     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
2865     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2866     crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
2867     tcg_debug_assert(!crosspage);
2868 
2869     do_st_1(env, &l.page[0], val, l.mmu_idx, ra);
2870 }
2871 
2872 static void do_st2_mmu(CPUArchState *env, vaddr addr, uint16_t val,
2873                        MemOpIdx oi, uintptr_t ra)
2874 {
2875     MMULookupLocals l;
2876     bool crosspage;
2877     uint8_t a, b;
2878 
2879     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2880     crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
2881     if (likely(!crosspage)) {
2882         do_st_2(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
2883         return;
2884     }
2885 
2886     if ((l.memop & MO_BSWAP) == MO_LE) {
2887         a = val, b = val >> 8;
2888     } else {
2889         b = val, a = val >> 8;
2890     }
2891     do_st_1(env, &l.page[0], a, l.mmu_idx, ra);
2892     do_st_1(env, &l.page[1], b, l.mmu_idx, ra);
2893 }
2894 
2895 void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
2896                     MemOpIdx oi, uintptr_t retaddr)
2897 {
2898     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
2899     do_st2_mmu(env, addr, val, oi, retaddr);
2900 }
2901 
2902 static void do_st4_mmu(CPUArchState *env, vaddr addr, uint32_t val,
2903                        MemOpIdx oi, uintptr_t ra)
2904 {
2905     MMULookupLocals l;
2906     bool crosspage;
2907 
2908     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2909     crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
2910     if (likely(!crosspage)) {
2911         do_st_4(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
2912         return;
2913     }
2914 
2915     /* Swap to little endian for simplicity, then store by bytes. */
2916     if ((l.memop & MO_BSWAP) != MO_LE) {
2917         val = bswap32(val);
2918     }
2919     val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
2920     (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
2921 }
2922 
2923 void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
2924                     MemOpIdx oi, uintptr_t retaddr)
2925 {
2926     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
2927     do_st4_mmu(env, addr, val, oi, retaddr);
2928 }
2929 
2930 static void do_st8_mmu(CPUArchState *env, vaddr addr, uint64_t val,
2931                        MemOpIdx oi, uintptr_t ra)
2932 {
2933     MMULookupLocals l;
2934     bool crosspage;
2935 
2936     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2937     crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
2938     if (likely(!crosspage)) {
2939         do_st_8(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
2940         return;
2941     }
2942 
2943     /* Swap to little endian for simplicity, then store by bytes. */
2944     if ((l.memop & MO_BSWAP) != MO_LE) {
2945         val = bswap64(val);
2946     }
2947     val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
2948     (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
2949 }
2950 
2951 void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
2952                     MemOpIdx oi, uintptr_t retaddr)
2953 {
2954     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
2955     do_st8_mmu(env, addr, val, oi, retaddr);
2956 }
2957 
2958 static void do_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
2959                         MemOpIdx oi, uintptr_t ra)
2960 {
2961     MMULookupLocals l;
2962     bool crosspage;
2963     uint64_t a, b;
2964     int first;
2965 
2966     cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
2967     crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
2968     if (likely(!crosspage)) {
2969         /* Swap to host endian if necessary, then store. */
2970         if (l.memop & MO_BSWAP) {
2971             val = bswap128(val);
2972         }
2973         if (unlikely(l.page[0].flags & TLB_MMIO)) {
2974             QEMU_IOTHREAD_LOCK_GUARD();
2975             if (HOST_BIG_ENDIAN) {
2976                 b = int128_getlo(val), a = int128_gethi(val);
2977             } else {
2978                 a = int128_getlo(val), b = int128_gethi(val);
2979             }
2980             io_writex(env, l.page[0].full, l.mmu_idx, a, addr, ra, MO_64);
2981             io_writex(env, l.page[0].full, l.mmu_idx, b, addr + 8, ra, MO_64);
2982         } else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
2983             /* nothing */
2984         } else {
2985             store_atom_16(env, ra, l.page[0].haddr, l.memop, val);
2986         }
2987         return;
2988     }
2989 
2990     first = l.page[0].size;
2991     if (first == 8) {
2992         MemOp mop8 = (l.memop & ~(MO_SIZE | MO_BSWAP)) | MO_64;
2993 
2994         if (l.memop & MO_BSWAP) {
2995             val = bswap128(val);
2996         }
2997         if (HOST_BIG_ENDIAN) {
2998             b = int128_getlo(val), a = int128_gethi(val);
2999         } else {
3000             a = int128_getlo(val), b = int128_gethi(val);
3001         }
3002         do_st_8(env, &l.page[0], a, l.mmu_idx, mop8, ra);
3003         do_st_8(env, &l.page[1], b, l.mmu_idx, mop8, ra);
3004         return;
3005     }
3006 
3007     if ((l.memop & MO_BSWAP) != MO_LE) {
3008         val = bswap128(val);
3009     }
3010     if (first < 8) {
3011         do_st_leN(env, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra);
3012         val = int128_urshift(val, first * 8);
3013         do_st16_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
3014     } else {
3015         b = do_st16_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
3016         do_st_leN(env, &l.page[1], b, l.mmu_idx, l.memop, ra);
3017     }
3018 }
3019 
3020 void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
3021                      MemOpIdx oi, uintptr_t retaddr)
3022 {
3023     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
3024     do_st16_mmu(env, addr, val, oi, retaddr);
3025 }
3026 
3027 void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
3028 {
3029     helper_st16_mmu(env, addr, val, oi, GETPC());
3030 }
3031 
3032 /*
3033  * Store Helpers for cpu_ldst.h
3034  */
3035 
3036 static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
3037 {
3038     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
3039 }
3040 
3041 void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
3042                  MemOpIdx oi, uintptr_t retaddr)
3043 {
3044     helper_stb_mmu(env, addr, val, oi, retaddr);
3045     plugin_store_cb(env, addr, oi);
3046 }
3047 
3048 void cpu_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
3049                  MemOpIdx oi, uintptr_t retaddr)
3050 {
3051     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
3052     do_st2_mmu(env, addr, val, oi, retaddr);
3053     plugin_store_cb(env, addr, oi);
3054 }
3055 
3056 void cpu_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
3057                     MemOpIdx oi, uintptr_t retaddr)
3058 {
3059     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
3060     do_st4_mmu(env, addr, val, oi, retaddr);
3061     plugin_store_cb(env, addr, oi);
3062 }
3063 
3064 void cpu_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
3065                  MemOpIdx oi, uintptr_t retaddr)
3066 {
3067     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
3068     do_st8_mmu(env, addr, val, oi, retaddr);
3069     plugin_store_cb(env, addr, oi);
3070 }
3071 
3072 void cpu_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val,
3073                   MemOpIdx oi, uintptr_t retaddr)
3074 {
3075     tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
3076     do_st16_mmu(env, addr, val, oi, retaddr);
3077     plugin_store_cb(env, addr, oi);
3078 }
3079 
3080 #include "ldst_common.c.inc"
3081 
3082 /*
3083  * First set of functions passes in OI and RETADDR.
3084  * This makes them callable from other helpers.
3085  */
3086 
3087 #define ATOMIC_NAME(X) \
3088     glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
3089 
3090 #define ATOMIC_MMU_CLEANUP
3091 
3092 #include "atomic_common.c.inc"
3093 
3094 #define DATA_SIZE 1
3095 #include "atomic_template.h"
3096 
3097 #define DATA_SIZE 2
3098 #include "atomic_template.h"
3099 
3100 #define DATA_SIZE 4
3101 #include "atomic_template.h"
3102 
3103 #ifdef CONFIG_ATOMIC64
3104 #define DATA_SIZE 8
3105 #include "atomic_template.h"
3106 #endif
3107 
3108 #if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128)
3109 #define DATA_SIZE 16
3110 #include "atomic_template.h"
3111 #endif
3112 
3113 /* Code access functions.  */
3114 
3115 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
3116 {
3117     MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
3118     return do_ld1_mmu(env, addr, oi, 0, MMU_INST_FETCH);
3119 }
3120 
3121 uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
3122 {
3123     MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
3124     return do_ld2_mmu(env, addr, oi, 0, MMU_INST_FETCH);
3125 }
3126 
3127 uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
3128 {
3129     MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
3130     return do_ld4_mmu(env, addr, oi, 0, MMU_INST_FETCH);
3131 }
3132 
3133 uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
3134 {
3135     MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
3136     return do_ld8_mmu(env, addr, oi, 0, MMU_INST_FETCH);
3137 }
3138 
3139 uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
3140                          MemOpIdx oi, uintptr_t retaddr)
3141 {
3142     return do_ld1_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
3143 }
3144 
3145 uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
3146                           MemOpIdx oi, uintptr_t retaddr)
3147 {
3148     return do_ld2_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
3149 }
3150 
3151 uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
3152                           MemOpIdx oi, uintptr_t retaddr)
3153 {
3154     return do_ld4_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
3155 }
3156 
3157 uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
3158                           MemOpIdx oi, uintptr_t retaddr)
3159 {
3160     return do_ld8_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
3161 }
3162