xref: /openbmc/qemu/accel/tcg/cputlb.c (revision 4b9fa0b4)
1 /*
2  *  Common CPU TLB handling
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/address-spaces.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "tcg/tcg.h"
31 #include "qemu/error-report.h"
32 #include "exec/log.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/atomic.h"
35 #include "qemu/atomic128.h"
36 #include "translate-all.h"
37 #ifdef CONFIG_PLUGIN
38 #include "qemu/plugin-memory.h"
39 #endif
40 
41 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
42 /* #define DEBUG_TLB */
43 /* #define DEBUG_TLB_LOG */
44 
45 #ifdef DEBUG_TLB
46 # define DEBUG_TLB_GATE 1
47 # ifdef DEBUG_TLB_LOG
48 #  define DEBUG_TLB_LOG_GATE 1
49 # else
50 #  define DEBUG_TLB_LOG_GATE 0
51 # endif
52 #else
53 # define DEBUG_TLB_GATE 0
54 # define DEBUG_TLB_LOG_GATE 0
55 #endif
56 
57 #define tlb_debug(fmt, ...) do { \
58     if (DEBUG_TLB_LOG_GATE) { \
59         qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
60                       ## __VA_ARGS__); \
61     } else if (DEBUG_TLB_GATE) { \
62         fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
63     } \
64 } while (0)
65 
66 #define assert_cpu_is_self(cpu) do {                              \
67         if (DEBUG_TLB_GATE) {                                     \
68             g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
69         }                                                         \
70     } while (0)
71 
72 /* run_on_cpu_data.target_ptr should always be big enough for a
73  * target_ulong even on 32 bit builds */
74 QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
75 
76 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
77  */
78 QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
79 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
80 
81 static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
82 {
83     return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
84 }
85 
86 static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
87                              size_t max_entries)
88 {
89     desc->window_begin_ns = ns;
90     desc->window_max_entries = max_entries;
91 }
92 
93 static void tlb_dyn_init(CPUArchState *env)
94 {
95     int i;
96 
97     for (i = 0; i < NB_MMU_MODES; i++) {
98         CPUTLBDesc *desc = &env_tlb(env)->d[i];
99         size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
100 
101         tlb_window_reset(desc, get_clock_realtime(), 0);
102         desc->n_used_entries = 0;
103         env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
104         env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
105         env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
106     }
107 }
108 
109 /**
110  * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
111  * @env: CPU that owns the TLB
112  * @mmu_idx: MMU index of the TLB
113  *
114  * Called with tlb_lock_held.
115  *
116  * We have two main constraints when resizing a TLB: (1) we only resize it
117  * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
118  * the array or unnecessarily flushing it), which means we do not control how
119  * frequently the resizing can occur; (2) we don't have access to the guest's
120  * future scheduling decisions, and therefore have to decide the magnitude of
121  * the resize based on past observations.
122  *
123  * In general, a memory-hungry process can benefit greatly from an appropriately
124  * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
125  * we just have to make the TLB as large as possible; while an oversized TLB
126  * results in minimal TLB miss rates, it also takes longer to be flushed
127  * (flushes can be _very_ frequent), and the reduced locality can also hurt
128  * performance.
129  *
130  * To achieve near-optimal performance for all kinds of workloads, we:
131  *
132  * 1. Aggressively increase the size of the TLB when the use rate of the
133  * TLB being flushed is high, since it is likely that in the near future this
134  * memory-hungry process will execute again, and its memory hungriness will
135  * probably be similar.
136  *
137  * 2. Slowly reduce the size of the TLB as the use rate declines over a
138  * reasonably large time window. The rationale is that if in such a time window
139  * we have not observed a high TLB use rate, it is likely that we won't observe
140  * it in the near future. In that case, once a time window expires we downsize
141  * the TLB to match the maximum use rate observed in the window.
142  *
143  * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
144  * since in that range performance is likely near-optimal. Recall that the TLB
145  * is direct mapped, so we want the use rate to be low (or at least not too
146  * high), since otherwise we are likely to have a significant amount of
147  * conflict misses.
148  */
149 static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
150 {
151     CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
152     size_t old_size = tlb_n_entries(env, mmu_idx);
153     size_t rate;
154     size_t new_size = old_size;
155     int64_t now = get_clock_realtime();
156     int64_t window_len_ms = 100;
157     int64_t window_len_ns = window_len_ms * 1000 * 1000;
158     bool window_expired = now > desc->window_begin_ns + window_len_ns;
159 
160     if (desc->n_used_entries > desc->window_max_entries) {
161         desc->window_max_entries = desc->n_used_entries;
162     }
163     rate = desc->window_max_entries * 100 / old_size;
164 
165     if (rate > 70) {
166         new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
167     } else if (rate < 30 && window_expired) {
168         size_t ceil = pow2ceil(desc->window_max_entries);
169         size_t expected_rate = desc->window_max_entries * 100 / ceil;
170 
171         /*
172          * Avoid undersizing when the max number of entries seen is just below
173          * a pow2. For instance, if max_entries == 1025, the expected use rate
174          * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
175          * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
176          * later. Thus, make sure that the expected use rate remains below 70%.
177          * (and since we double the size, that means the lowest rate we'd
178          * expect to get is 35%, which is still in the 30-70% range where
179          * we consider that the size is appropriate.)
180          */
181         if (expected_rate > 70) {
182             ceil *= 2;
183         }
184         new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
185     }
186 
187     if (new_size == old_size) {
188         if (window_expired) {
189             tlb_window_reset(desc, now, desc->n_used_entries);
190         }
191         return;
192     }
193 
194     g_free(env_tlb(env)->f[mmu_idx].table);
195     g_free(env_tlb(env)->d[mmu_idx].iotlb);
196 
197     tlb_window_reset(desc, now, 0);
198     /* desc->n_used_entries is cleared by the caller */
199     env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
200     env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
201     env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
202     /*
203      * If the allocations fail, try smaller sizes. We just freed some
204      * memory, so going back to half of new_size has a good chance of working.
205      * Increased memory pressure elsewhere in the system might cause the
206      * allocations to fail though, so we progressively reduce the allocation
207      * size, aborting if we cannot even allocate the smallest TLB we support.
208      */
209     while (env_tlb(env)->f[mmu_idx].table == NULL ||
210            env_tlb(env)->d[mmu_idx].iotlb == NULL) {
211         if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
212             error_report("%s: %s", __func__, strerror(errno));
213             abort();
214         }
215         new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
216         env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
217 
218         g_free(env_tlb(env)->f[mmu_idx].table);
219         g_free(env_tlb(env)->d[mmu_idx].iotlb);
220         env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
221         env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
222     }
223 }
224 
225 static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
226 {
227     tlb_mmu_resize_locked(env, mmu_idx);
228     memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
229     env_tlb(env)->d[mmu_idx].n_used_entries = 0;
230 }
231 
232 static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
233 {
234     env_tlb(env)->d[mmu_idx].n_used_entries++;
235 }
236 
237 static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
238 {
239     env_tlb(env)->d[mmu_idx].n_used_entries--;
240 }
241 
242 void tlb_init(CPUState *cpu)
243 {
244     CPUArchState *env = cpu->env_ptr;
245 
246     qemu_spin_init(&env_tlb(env)->c.lock);
247 
248     /* Ensure that cpu_reset performs a full flush.  */
249     env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
250 
251     tlb_dyn_init(env);
252 }
253 
254 /* flush_all_helper: run fn across all cpus
255  *
256  * If the wait flag is set then the src cpu's helper will be queued as
257  * "safe" work and the loop exited creating a synchronisation point
258  * where all queued work will be finished before execution starts
259  * again.
260  */
261 static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
262                              run_on_cpu_data d)
263 {
264     CPUState *cpu;
265 
266     CPU_FOREACH(cpu) {
267         if (cpu != src) {
268             async_run_on_cpu(cpu, fn, d);
269         }
270     }
271 }
272 
273 void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
274 {
275     CPUState *cpu;
276     size_t full = 0, part = 0, elide = 0;
277 
278     CPU_FOREACH(cpu) {
279         CPUArchState *env = cpu->env_ptr;
280 
281         full += atomic_read(&env_tlb(env)->c.full_flush_count);
282         part += atomic_read(&env_tlb(env)->c.part_flush_count);
283         elide += atomic_read(&env_tlb(env)->c.elide_flush_count);
284     }
285     *pfull = full;
286     *ppart = part;
287     *pelide = elide;
288 }
289 
290 static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
291 {
292     tlb_table_flush_by_mmuidx(env, mmu_idx);
293     env_tlb(env)->d[mmu_idx].large_page_addr = -1;
294     env_tlb(env)->d[mmu_idx].large_page_mask = -1;
295     env_tlb(env)->d[mmu_idx].vindex = 0;
296     memset(env_tlb(env)->d[mmu_idx].vtable, -1,
297            sizeof(env_tlb(env)->d[0].vtable));
298 }
299 
300 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
301 {
302     CPUArchState *env = cpu->env_ptr;
303     uint16_t asked = data.host_int;
304     uint16_t all_dirty, work, to_clean;
305 
306     assert_cpu_is_self(cpu);
307 
308     tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
309 
310     qemu_spin_lock(&env_tlb(env)->c.lock);
311 
312     all_dirty = env_tlb(env)->c.dirty;
313     to_clean = asked & all_dirty;
314     all_dirty &= ~to_clean;
315     env_tlb(env)->c.dirty = all_dirty;
316 
317     for (work = to_clean; work != 0; work &= work - 1) {
318         int mmu_idx = ctz32(work);
319         tlb_flush_one_mmuidx_locked(env, mmu_idx);
320     }
321 
322     qemu_spin_unlock(&env_tlb(env)->c.lock);
323 
324     cpu_tb_jmp_cache_clear(cpu);
325 
326     if (to_clean == ALL_MMUIDX_BITS) {
327         atomic_set(&env_tlb(env)->c.full_flush_count,
328                    env_tlb(env)->c.full_flush_count + 1);
329     } else {
330         atomic_set(&env_tlb(env)->c.part_flush_count,
331                    env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
332         if (to_clean != asked) {
333             atomic_set(&env_tlb(env)->c.elide_flush_count,
334                        env_tlb(env)->c.elide_flush_count +
335                        ctpop16(asked & ~to_clean));
336         }
337     }
338 }
339 
340 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
341 {
342     tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
343 
344     if (cpu->created && !qemu_cpu_is_self(cpu)) {
345         async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
346                          RUN_ON_CPU_HOST_INT(idxmap));
347     } else {
348         tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
349     }
350 }
351 
352 void tlb_flush(CPUState *cpu)
353 {
354     tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
355 }
356 
357 void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
358 {
359     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
360 
361     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
362 
363     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
364     fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
365 }
366 
367 void tlb_flush_all_cpus(CPUState *src_cpu)
368 {
369     tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
370 }
371 
372 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
373 {
374     const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
375 
376     tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
377 
378     flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
379     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
380 }
381 
382 void tlb_flush_all_cpus_synced(CPUState *src_cpu)
383 {
384     tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
385 }
386 
387 static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
388                                         target_ulong page)
389 {
390     return tlb_hit_page(tlb_entry->addr_read, page) ||
391            tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
392            tlb_hit_page(tlb_entry->addr_code, page);
393 }
394 
395 /**
396  * tlb_entry_is_empty - return true if the entry is not in use
397  * @te: pointer to CPUTLBEntry
398  */
399 static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
400 {
401     return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
402 }
403 
404 /* Called with tlb_c.lock held */
405 static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
406                                           target_ulong page)
407 {
408     if (tlb_hit_page_anyprot(tlb_entry, page)) {
409         memset(tlb_entry, -1, sizeof(*tlb_entry));
410         return true;
411     }
412     return false;
413 }
414 
415 /* Called with tlb_c.lock held */
416 static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
417                                               target_ulong page)
418 {
419     CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
420     int k;
421 
422     assert_cpu_is_self(env_cpu(env));
423     for (k = 0; k < CPU_VTLB_SIZE; k++) {
424         if (tlb_flush_entry_locked(&d->vtable[k], page)) {
425             tlb_n_used_entries_dec(env, mmu_idx);
426         }
427     }
428 }
429 
430 static void tlb_flush_page_locked(CPUArchState *env, int midx,
431                                   target_ulong page)
432 {
433     target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
434     target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
435 
436     /* Check if we need to flush due to large pages.  */
437     if ((page & lp_mask) == lp_addr) {
438         tlb_debug("forcing full flush midx %d ("
439                   TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
440                   midx, lp_addr, lp_mask);
441         tlb_flush_one_mmuidx_locked(env, midx);
442     } else {
443         if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
444             tlb_n_used_entries_dec(env, midx);
445         }
446         tlb_flush_vtlb_page_locked(env, midx, page);
447     }
448 }
449 
450 /* As we are going to hijack the bottom bits of the page address for a
451  * mmuidx bit mask we need to fail to build if we can't do that
452  */
453 QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
454 
455 static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
456                                                 run_on_cpu_data data)
457 {
458     CPUArchState *env = cpu->env_ptr;
459     target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
460     target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
461     unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
462     int mmu_idx;
463 
464     assert_cpu_is_self(cpu);
465 
466     tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
467               addr, mmu_idx_bitmap);
468 
469     qemu_spin_lock(&env_tlb(env)->c.lock);
470     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
471         if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
472             tlb_flush_page_locked(env, mmu_idx, addr);
473         }
474     }
475     qemu_spin_unlock(&env_tlb(env)->c.lock);
476 
477     tb_flush_jmp_cache(cpu, addr);
478 }
479 
480 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
481 {
482     target_ulong addr_and_mmu_idx;
483 
484     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
485 
486     /* This should already be page aligned */
487     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
488     addr_and_mmu_idx |= idxmap;
489 
490     if (!qemu_cpu_is_self(cpu)) {
491         async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
492                          RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
493     } else {
494         tlb_flush_page_by_mmuidx_async_work(
495             cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
496     }
497 }
498 
499 void tlb_flush_page(CPUState *cpu, target_ulong addr)
500 {
501     tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
502 }
503 
504 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
505                                        uint16_t idxmap)
506 {
507     const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
508     target_ulong addr_and_mmu_idx;
509 
510     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
511 
512     /* This should already be page aligned */
513     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
514     addr_and_mmu_idx |= idxmap;
515 
516     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
517     fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
518 }
519 
520 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
521 {
522     tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
523 }
524 
525 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
526                                               target_ulong addr,
527                                               uint16_t idxmap)
528 {
529     const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
530     target_ulong addr_and_mmu_idx;
531 
532     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
533 
534     /* This should already be page aligned */
535     addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
536     addr_and_mmu_idx |= idxmap;
537 
538     flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
539     async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
540 }
541 
542 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
543 {
544     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
545 }
546 
547 /* update the TLBs so that writes to code in the virtual page 'addr'
548    can be detected */
549 void tlb_protect_code(ram_addr_t ram_addr)
550 {
551     cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
552                                              DIRTY_MEMORY_CODE);
553 }
554 
555 /* update the TLB so that writes in physical page 'phys_addr' are no longer
556    tested for self modifying code */
557 void tlb_unprotect_code(ram_addr_t ram_addr)
558 {
559     cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
560 }
561 
562 
563 /*
564  * Dirty write flag handling
565  *
566  * When the TCG code writes to a location it looks up the address in
567  * the TLB and uses that data to compute the final address. If any of
568  * the lower bits of the address are set then the slow path is forced.
569  * There are a number of reasons to do this but for normal RAM the
570  * most usual is detecting writes to code regions which may invalidate
571  * generated code.
572  *
573  * Other vCPUs might be reading their TLBs during guest execution, so we update
574  * te->addr_write with atomic_set. We don't need to worry about this for
575  * oversized guests as MTTCG is disabled for them.
576  *
577  * Called with tlb_c.lock held.
578  */
579 static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
580                                          uintptr_t start, uintptr_t length)
581 {
582     uintptr_t addr = tlb_entry->addr_write;
583 
584     if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
585                  TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
586         addr &= TARGET_PAGE_MASK;
587         addr += tlb_entry->addend;
588         if ((addr - start) < length) {
589 #if TCG_OVERSIZED_GUEST
590             tlb_entry->addr_write |= TLB_NOTDIRTY;
591 #else
592             atomic_set(&tlb_entry->addr_write,
593                        tlb_entry->addr_write | TLB_NOTDIRTY);
594 #endif
595         }
596     }
597 }
598 
599 /*
600  * Called with tlb_c.lock held.
601  * Called only from the vCPU context, i.e. the TLB's owner thread.
602  */
603 static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
604 {
605     *d = *s;
606 }
607 
608 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
609  * the target vCPU).
610  * We must take tlb_c.lock to avoid racing with another vCPU update. The only
611  * thing actually updated is the target TLB entry ->addr_write flags.
612  */
613 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
614 {
615     CPUArchState *env;
616 
617     int mmu_idx;
618 
619     env = cpu->env_ptr;
620     qemu_spin_lock(&env_tlb(env)->c.lock);
621     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
622         unsigned int i;
623         unsigned int n = tlb_n_entries(env, mmu_idx);
624 
625         for (i = 0; i < n; i++) {
626             tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
627                                          start1, length);
628         }
629 
630         for (i = 0; i < CPU_VTLB_SIZE; i++) {
631             tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
632                                          start1, length);
633         }
634     }
635     qemu_spin_unlock(&env_tlb(env)->c.lock);
636 }
637 
638 /* Called with tlb_c.lock held */
639 static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
640                                          target_ulong vaddr)
641 {
642     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
643         tlb_entry->addr_write = vaddr;
644     }
645 }
646 
647 /* update the TLB corresponding to virtual page vaddr
648    so that it is no longer dirty */
649 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
650 {
651     CPUArchState *env = cpu->env_ptr;
652     int mmu_idx;
653 
654     assert_cpu_is_self(cpu);
655 
656     vaddr &= TARGET_PAGE_MASK;
657     qemu_spin_lock(&env_tlb(env)->c.lock);
658     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
659         tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
660     }
661 
662     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
663         int k;
664         for (k = 0; k < CPU_VTLB_SIZE; k++) {
665             tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
666         }
667     }
668     qemu_spin_unlock(&env_tlb(env)->c.lock);
669 }
670 
671 /* Our TLB does not support large pages, so remember the area covered by
672    large pages and trigger a full TLB flush if these are invalidated.  */
673 static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
674                                target_ulong vaddr, target_ulong size)
675 {
676     target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
677     target_ulong lp_mask = ~(size - 1);
678 
679     if (lp_addr == (target_ulong)-1) {
680         /* No previous large page.  */
681         lp_addr = vaddr;
682     } else {
683         /* Extend the existing region to include the new page.
684            This is a compromise between unnecessary flushes and
685            the cost of maintaining a full variable size TLB.  */
686         lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
687         while (((lp_addr ^ vaddr) & lp_mask) != 0) {
688             lp_mask <<= 1;
689         }
690     }
691     env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
692     env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
693 }
694 
695 /* Add a new TLB entry. At most one entry for a given virtual address
696  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
697  * supplied size is only used by tlb_flush_page.
698  *
699  * Called from TCG-generated code, which is under an RCU read-side
700  * critical section.
701  */
702 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
703                              hwaddr paddr, MemTxAttrs attrs, int prot,
704                              int mmu_idx, target_ulong size)
705 {
706     CPUArchState *env = cpu->env_ptr;
707     CPUTLB *tlb = env_tlb(env);
708     CPUTLBDesc *desc = &tlb->d[mmu_idx];
709     MemoryRegionSection *section;
710     unsigned int index;
711     target_ulong address;
712     target_ulong write_address;
713     uintptr_t addend;
714     CPUTLBEntry *te, tn;
715     hwaddr iotlb, xlat, sz, paddr_page;
716     target_ulong vaddr_page;
717     int asidx = cpu_asidx_from_attrs(cpu, attrs);
718     int wp_flags;
719     bool is_ram, is_romd;
720 
721     assert_cpu_is_self(cpu);
722 
723     if (size <= TARGET_PAGE_SIZE) {
724         sz = TARGET_PAGE_SIZE;
725     } else {
726         tlb_add_large_page(env, mmu_idx, vaddr, size);
727         sz = size;
728     }
729     vaddr_page = vaddr & TARGET_PAGE_MASK;
730     paddr_page = paddr & TARGET_PAGE_MASK;
731 
732     section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
733                                                 &xlat, &sz, attrs, &prot);
734     assert(sz >= TARGET_PAGE_SIZE);
735 
736     tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
737               " prot=%x idx=%d\n",
738               vaddr, paddr, prot, mmu_idx);
739 
740     address = vaddr_page;
741     if (size < TARGET_PAGE_SIZE) {
742         /* Repeat the MMU check and TLB fill on every access.  */
743         address |= TLB_INVALID_MASK;
744     }
745     if (attrs.byte_swap) {
746         address |= TLB_BSWAP;
747     }
748 
749     is_ram = memory_region_is_ram(section->mr);
750     is_romd = memory_region_is_romd(section->mr);
751 
752     if (is_ram || is_romd) {
753         /* RAM and ROMD both have associated host memory. */
754         addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
755     } else {
756         /* I/O does not; force the host address to NULL. */
757         addend = 0;
758     }
759 
760     write_address = address;
761     if (is_ram) {
762         iotlb = memory_region_get_ram_addr(section->mr) + xlat;
763         /*
764          * Computing is_clean is expensive; avoid all that unless
765          * the page is actually writable.
766          */
767         if (prot & PAGE_WRITE) {
768             if (section->readonly) {
769                 write_address |= TLB_DISCARD_WRITE;
770             } else if (cpu_physical_memory_is_clean(iotlb)) {
771                 write_address |= TLB_NOTDIRTY;
772             }
773         }
774     } else {
775         /* I/O or ROMD */
776         iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
777         /*
778          * Writes to romd devices must go through MMIO to enable write.
779          * Reads to romd devices go through the ram_ptr found above,
780          * but of course reads to I/O must go through MMIO.
781          */
782         write_address |= TLB_MMIO;
783         if (!is_romd) {
784             address = write_address;
785         }
786     }
787 
788     wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
789                                               TARGET_PAGE_SIZE);
790 
791     index = tlb_index(env, mmu_idx, vaddr_page);
792     te = tlb_entry(env, mmu_idx, vaddr_page);
793 
794     /*
795      * Hold the TLB lock for the rest of the function. We could acquire/release
796      * the lock several times in the function, but it is faster to amortize the
797      * acquisition cost by acquiring it just once. Note that this leads to
798      * a longer critical section, but this is not a concern since the TLB lock
799      * is unlikely to be contended.
800      */
801     qemu_spin_lock(&tlb->c.lock);
802 
803     /* Note that the tlb is no longer clean.  */
804     tlb->c.dirty |= 1 << mmu_idx;
805 
806     /* Make sure there's no cached translation for the new page.  */
807     tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
808 
809     /*
810      * Only evict the old entry to the victim tlb if it's for a
811      * different page; otherwise just overwrite the stale data.
812      */
813     if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
814         unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
815         CPUTLBEntry *tv = &desc->vtable[vidx];
816 
817         /* Evict the old entry into the victim tlb.  */
818         copy_tlb_helper_locked(tv, te);
819         desc->viotlb[vidx] = desc->iotlb[index];
820         tlb_n_used_entries_dec(env, mmu_idx);
821     }
822 
823     /* refill the tlb */
824     /*
825      * At this point iotlb contains a physical section number in the lower
826      * TARGET_PAGE_BITS, and either
827      *  + the ram_addr_t of the page base of the target RAM (RAM)
828      *  + the offset within section->mr of the page base (I/O, ROMD)
829      * We subtract the vaddr_page (which is page aligned and thus won't
830      * disturb the low bits) to give an offset which can be added to the
831      * (non-page-aligned) vaddr of the eventual memory access to get
832      * the MemoryRegion offset for the access. Note that the vaddr we
833      * subtract here is that of the page base, and not the same as the
834      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
835      */
836     desc->iotlb[index].addr = iotlb - vaddr_page;
837     desc->iotlb[index].attrs = attrs;
838 
839     /* Now calculate the new entry */
840     tn.addend = addend - vaddr_page;
841     if (prot & PAGE_READ) {
842         tn.addr_read = address;
843         if (wp_flags & BP_MEM_READ) {
844             tn.addr_read |= TLB_WATCHPOINT;
845         }
846     } else {
847         tn.addr_read = -1;
848     }
849 
850     if (prot & PAGE_EXEC) {
851         tn.addr_code = address;
852     } else {
853         tn.addr_code = -1;
854     }
855 
856     tn.addr_write = -1;
857     if (prot & PAGE_WRITE) {
858         tn.addr_write = write_address;
859         if (prot & PAGE_WRITE_INV) {
860             tn.addr_write |= TLB_INVALID_MASK;
861         }
862         if (wp_flags & BP_MEM_WRITE) {
863             tn.addr_write |= TLB_WATCHPOINT;
864         }
865     }
866 
867     copy_tlb_helper_locked(te, &tn);
868     tlb_n_used_entries_inc(env, mmu_idx);
869     qemu_spin_unlock(&tlb->c.lock);
870 }
871 
872 /* Add a new TLB entry, but without specifying the memory
873  * transaction attributes to be used.
874  */
875 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
876                   hwaddr paddr, int prot,
877                   int mmu_idx, target_ulong size)
878 {
879     tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
880                             prot, mmu_idx, size);
881 }
882 
883 static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
884 {
885     ram_addr_t ram_addr;
886 
887     ram_addr = qemu_ram_addr_from_host(ptr);
888     if (ram_addr == RAM_ADDR_INVALID) {
889         error_report("Bad ram pointer %p", ptr);
890         abort();
891     }
892     return ram_addr;
893 }
894 
895 /*
896  * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
897  * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
898  * be discarded and looked up again (e.g. via tlb_entry()).
899  */
900 static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
901                      MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
902 {
903     CPUClass *cc = CPU_GET_CLASS(cpu);
904     bool ok;
905 
906     /*
907      * This is not a probe, so only valid return is success; failure
908      * should result in exception + longjmp to the cpu loop.
909      */
910     ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr);
911     assert(ok);
912 }
913 
914 static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
915                          int mmu_idx, target_ulong addr, uintptr_t retaddr,
916                          MMUAccessType access_type, MemOp op)
917 {
918     CPUState *cpu = env_cpu(env);
919     hwaddr mr_offset;
920     MemoryRegionSection *section;
921     MemoryRegion *mr;
922     uint64_t val;
923     bool locked = false;
924     MemTxResult r;
925 
926     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
927     mr = section->mr;
928     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
929     cpu->mem_io_pc = retaddr;
930     if (!cpu->can_do_io) {
931         cpu_io_recompile(cpu, retaddr);
932     }
933 
934     cpu->mem_io_access_type = access_type;
935 
936     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
937         qemu_mutex_lock_iothread();
938         locked = true;
939     }
940     r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
941     if (r != MEMTX_OK) {
942         hwaddr physaddr = mr_offset +
943             section->offset_within_address_space -
944             section->offset_within_region;
945 
946         cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
947                                mmu_idx, iotlbentry->attrs, r, retaddr);
948     }
949     if (locked) {
950         qemu_mutex_unlock_iothread();
951     }
952 
953     return val;
954 }
955 
956 static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
957                       int mmu_idx, uint64_t val, target_ulong addr,
958                       uintptr_t retaddr, MemOp op)
959 {
960     CPUState *cpu = env_cpu(env);
961     hwaddr mr_offset;
962     MemoryRegionSection *section;
963     MemoryRegion *mr;
964     bool locked = false;
965     MemTxResult r;
966 
967     section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
968     mr = section->mr;
969     mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
970     if (!cpu->can_do_io) {
971         cpu_io_recompile(cpu, retaddr);
972     }
973     cpu->mem_io_pc = retaddr;
974 
975     if (mr->global_locking && !qemu_mutex_iothread_locked()) {
976         qemu_mutex_lock_iothread();
977         locked = true;
978     }
979     r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
980     if (r != MEMTX_OK) {
981         hwaddr physaddr = mr_offset +
982             section->offset_within_address_space -
983             section->offset_within_region;
984 
985         cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
986                                MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
987                                retaddr);
988     }
989     if (locked) {
990         qemu_mutex_unlock_iothread();
991     }
992 }
993 
994 static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
995 {
996 #if TCG_OVERSIZED_GUEST
997     return *(target_ulong *)((uintptr_t)entry + ofs);
998 #else
999     /* ofs might correspond to .addr_write, so use atomic_read */
1000     return atomic_read((target_ulong *)((uintptr_t)entry + ofs));
1001 #endif
1002 }
1003 
1004 /* Return true if ADDR is present in the victim tlb, and has been copied
1005    back to the main tlb.  */
1006 static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
1007                            size_t elt_ofs, target_ulong page)
1008 {
1009     size_t vidx;
1010 
1011     assert_cpu_is_self(env_cpu(env));
1012     for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
1013         CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
1014         target_ulong cmp;
1015 
1016         /* elt_ofs might correspond to .addr_write, so use atomic_read */
1017 #if TCG_OVERSIZED_GUEST
1018         cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
1019 #else
1020         cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
1021 #endif
1022 
1023         if (cmp == page) {
1024             /* Found entry in victim tlb, swap tlb and iotlb.  */
1025             CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
1026 
1027             qemu_spin_lock(&env_tlb(env)->c.lock);
1028             copy_tlb_helper_locked(&tmptlb, tlb);
1029             copy_tlb_helper_locked(tlb, vtlb);
1030             copy_tlb_helper_locked(vtlb, &tmptlb);
1031             qemu_spin_unlock(&env_tlb(env)->c.lock);
1032 
1033             CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
1034             CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
1035             tmpio = *io; *io = *vio; *vio = tmpio;
1036             return true;
1037         }
1038     }
1039     return false;
1040 }
1041 
1042 /* Macro to call the above, with local variables from the use context.  */
1043 #define VICTIM_TLB_HIT(TY, ADDR) \
1044   victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1045                  (ADDR) & TARGET_PAGE_MASK)
1046 
1047 /*
1048  * Return a ram_addr_t for the virtual address for execution.
1049  *
1050  * Return -1 if we can't translate and execute from an entire page
1051  * of RAM.  This will force us to execute by loading and translating
1052  * one insn at a time, without caching.
1053  *
1054  * NOTE: This function will trigger an exception if the page is
1055  * not executable.
1056  */
1057 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
1058                                         void **hostp)
1059 {
1060     uintptr_t mmu_idx = cpu_mmu_index(env, true);
1061     uintptr_t index = tlb_index(env, mmu_idx, addr);
1062     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1063     void *p;
1064 
1065     if (unlikely(!tlb_hit(entry->addr_code, addr))) {
1066         if (!VICTIM_TLB_HIT(addr_code, addr)) {
1067             tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
1068             index = tlb_index(env, mmu_idx, addr);
1069             entry = tlb_entry(env, mmu_idx, addr);
1070 
1071             if (unlikely(entry->addr_code & TLB_INVALID_MASK)) {
1072                 /*
1073                  * The MMU protection covers a smaller range than a target
1074                  * page, so we must redo the MMU check for every insn.
1075                  */
1076                 return -1;
1077             }
1078         }
1079         assert(tlb_hit(entry->addr_code, addr));
1080     }
1081 
1082     if (unlikely(entry->addr_code & TLB_MMIO)) {
1083         /* The region is not backed by RAM.  */
1084         if (hostp) {
1085             *hostp = NULL;
1086         }
1087         return -1;
1088     }
1089 
1090     p = (void *)((uintptr_t)addr + entry->addend);
1091     if (hostp) {
1092         *hostp = p;
1093     }
1094     return qemu_ram_addr_from_host_nofail(p);
1095 }
1096 
1097 tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
1098 {
1099     return get_page_addr_code_hostp(env, addr, NULL);
1100 }
1101 
1102 static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1103                            CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
1104 {
1105     ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
1106 
1107     trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1108 
1109     if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1110         struct page_collection *pages
1111             = page_collection_lock(ram_addr, ram_addr + size);
1112         tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
1113         page_collection_unlock(pages);
1114     }
1115 
1116     /*
1117      * Set both VGA and migration bits for simplicity and to remove
1118      * the notdirty callback faster.
1119      */
1120     cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1121 
1122     /* We remove the notdirty callback only if the code has been flushed. */
1123     if (!cpu_physical_memory_is_clean(ram_addr)) {
1124         trace_memory_notdirty_set_dirty(mem_vaddr);
1125         tlb_set_dirty(cpu, mem_vaddr);
1126     }
1127 }
1128 
1129 /*
1130  * Probe for whether the specified guest access is permitted. If it is not
1131  * permitted then an exception will be taken in the same way as if this
1132  * were a real access (and we will not return).
1133  * If the size is 0 or the page requires I/O access, returns NULL; otherwise,
1134  * returns the address of the host page similar to tlb_vaddr_to_host().
1135  */
1136 void *probe_access(CPUArchState *env, target_ulong addr, int size,
1137                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1138 {
1139     uintptr_t index = tlb_index(env, mmu_idx, addr);
1140     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1141     target_ulong tlb_addr;
1142     size_t elt_ofs;
1143     int wp_access;
1144 
1145     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1146 
1147     switch (access_type) {
1148     case MMU_DATA_LOAD:
1149         elt_ofs = offsetof(CPUTLBEntry, addr_read);
1150         wp_access = BP_MEM_READ;
1151         break;
1152     case MMU_DATA_STORE:
1153         elt_ofs = offsetof(CPUTLBEntry, addr_write);
1154         wp_access = BP_MEM_WRITE;
1155         break;
1156     case MMU_INST_FETCH:
1157         elt_ofs = offsetof(CPUTLBEntry, addr_code);
1158         wp_access = BP_MEM_READ;
1159         break;
1160     default:
1161         g_assert_not_reached();
1162     }
1163     tlb_addr = tlb_read_ofs(entry, elt_ofs);
1164 
1165     if (unlikely(!tlb_hit(tlb_addr, addr))) {
1166         if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs,
1167                             addr & TARGET_PAGE_MASK)) {
1168             tlb_fill(env_cpu(env), addr, size, access_type, mmu_idx, retaddr);
1169             /* TLB resize via tlb_fill may have moved the entry. */
1170             index = tlb_index(env, mmu_idx, addr);
1171             entry = tlb_entry(env, mmu_idx, addr);
1172         }
1173         tlb_addr = tlb_read_ofs(entry, elt_ofs);
1174     }
1175 
1176     if (!size) {
1177         return NULL;
1178     }
1179 
1180     if (unlikely(tlb_addr & TLB_FLAGS_MASK)) {
1181         CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1182 
1183         /* Reject I/O access, or other required slow-path.  */
1184         if (tlb_addr & (TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) {
1185             return NULL;
1186         }
1187 
1188         /* Handle watchpoints.  */
1189         if (tlb_addr & TLB_WATCHPOINT) {
1190             cpu_check_watchpoint(env_cpu(env), addr, size,
1191                                  iotlbentry->attrs, wp_access, retaddr);
1192         }
1193 
1194         /* Handle clean RAM pages.  */
1195         if (tlb_addr & TLB_NOTDIRTY) {
1196             notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
1197         }
1198     }
1199 
1200     return (void *)((uintptr_t)addr + entry->addend);
1201 }
1202 
1203 void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1204                         MMUAccessType access_type, int mmu_idx)
1205 {
1206     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1207     uintptr_t tlb_addr, page;
1208     size_t elt_ofs;
1209 
1210     switch (access_type) {
1211     case MMU_DATA_LOAD:
1212         elt_ofs = offsetof(CPUTLBEntry, addr_read);
1213         break;
1214     case MMU_DATA_STORE:
1215         elt_ofs = offsetof(CPUTLBEntry, addr_write);
1216         break;
1217     case MMU_INST_FETCH:
1218         elt_ofs = offsetof(CPUTLBEntry, addr_code);
1219         break;
1220     default:
1221         g_assert_not_reached();
1222     }
1223 
1224     page = addr & TARGET_PAGE_MASK;
1225     tlb_addr = tlb_read_ofs(entry, elt_ofs);
1226 
1227     if (!tlb_hit_page(tlb_addr, page)) {
1228         uintptr_t index = tlb_index(env, mmu_idx, addr);
1229 
1230         if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
1231             CPUState *cs = env_cpu(env);
1232             CPUClass *cc = CPU_GET_CLASS(cs);
1233 
1234             if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
1235                 /* Non-faulting page table read failed.  */
1236                 return NULL;
1237             }
1238 
1239             /* TLB resize via tlb_fill may have moved the entry.  */
1240             entry = tlb_entry(env, mmu_idx, addr);
1241         }
1242         tlb_addr = tlb_read_ofs(entry, elt_ofs);
1243     }
1244 
1245     if (tlb_addr & ~TARGET_PAGE_MASK) {
1246         /* IO access */
1247         return NULL;
1248     }
1249 
1250     return (void *)((uintptr_t)addr + entry->addend);
1251 }
1252 
1253 
1254 #ifdef CONFIG_PLUGIN
1255 /*
1256  * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1257  * This should be a hot path as we will have just looked this path up
1258  * in the softmmu lookup code (or helper). We don't handle re-fills or
1259  * checking the victim table. This is purely informational.
1260  *
1261  * This should never fail as the memory access being instrumented
1262  * should have just filled the TLB.
1263  */
1264 
1265 bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
1266                        bool is_store, struct qemu_plugin_hwaddr *data)
1267 {
1268     CPUArchState *env = cpu->env_ptr;
1269     CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1270     uintptr_t index = tlb_index(env, mmu_idx, addr);
1271     target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
1272 
1273     if (likely(tlb_hit(tlb_addr, addr))) {
1274         /* We must have an iotlb entry for MMIO */
1275         if (tlb_addr & TLB_MMIO) {
1276             CPUIOTLBEntry *iotlbentry;
1277             iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1278             data->is_io = true;
1279             data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1280             data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1281         } else {
1282             data->is_io = false;
1283             data->v.ram.hostaddr = addr + tlbe->addend;
1284         }
1285         return true;
1286     }
1287     return false;
1288 }
1289 
1290 #endif
1291 
1292 /* Probe for a read-modify-write atomic operation.  Do not allow unaligned
1293  * operations, or io operations to proceed.  Return the host address.  */
1294 static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1295                                TCGMemOpIdx oi, uintptr_t retaddr)
1296 {
1297     size_t mmu_idx = get_mmuidx(oi);
1298     uintptr_t index = tlb_index(env, mmu_idx, addr);
1299     CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1300     target_ulong tlb_addr = tlb_addr_write(tlbe);
1301     MemOp mop = get_memop(oi);
1302     int a_bits = get_alignment_bits(mop);
1303     int s_bits = mop & MO_SIZE;
1304     void *hostaddr;
1305 
1306     /* Adjust the given return address.  */
1307     retaddr -= GETPC_ADJ;
1308 
1309     /* Enforce guest required alignment.  */
1310     if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1311         /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1312         cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1313                              mmu_idx, retaddr);
1314     }
1315 
1316     /* Enforce qemu required alignment.  */
1317     if (unlikely(addr & ((1 << s_bits) - 1))) {
1318         /* We get here if guest alignment was not requested,
1319            or was not enforced by cpu_unaligned_access above.
1320            We might widen the access and emulate, but for now
1321            mark an exception and exit the cpu loop.  */
1322         goto stop_the_world;
1323     }
1324 
1325     /* Check TLB entry and enforce page permissions.  */
1326     if (!tlb_hit(tlb_addr, addr)) {
1327         if (!VICTIM_TLB_HIT(addr_write, addr)) {
1328             tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE,
1329                      mmu_idx, retaddr);
1330             index = tlb_index(env, mmu_idx, addr);
1331             tlbe = tlb_entry(env, mmu_idx, addr);
1332         }
1333         tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1334     }
1335 
1336     /* Notice an IO access or a needs-MMU-lookup access */
1337     if (unlikely(tlb_addr & TLB_MMIO)) {
1338         /* There's really nothing that can be done to
1339            support this apart from stop-the-world.  */
1340         goto stop_the_world;
1341     }
1342 
1343     /* Let the guest notice RMW on a write-only page.  */
1344     if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1345         tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD,
1346                  mmu_idx, retaddr);
1347         /* Since we don't support reads and writes to different addresses,
1348            and we do have the proper page loaded for write, this shouldn't
1349            ever return.  But just in case, handle via stop-the-world.  */
1350         goto stop_the_world;
1351     }
1352 
1353     hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1354 
1355     if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1356         notdirty_write(env_cpu(env), addr, 1 << s_bits,
1357                        &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
1358     }
1359 
1360     return hostaddr;
1361 
1362  stop_the_world:
1363     cpu_loop_exit_atomic(env_cpu(env), retaddr);
1364 }
1365 
1366 /*
1367  * Load Helpers
1368  *
1369  * We support two different access types. SOFTMMU_CODE_ACCESS is
1370  * specifically for reading instructions from system memory. It is
1371  * called by the translation loop and in some helpers where the code
1372  * is disassembled. It shouldn't be called directly by guest code.
1373  */
1374 
1375 typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
1376                                 TCGMemOpIdx oi, uintptr_t retaddr);
1377 
1378 static inline uint64_t QEMU_ALWAYS_INLINE
1379 load_memop(const void *haddr, MemOp op)
1380 {
1381     switch (op) {
1382     case MO_UB:
1383         return ldub_p(haddr);
1384     case MO_BEUW:
1385         return lduw_be_p(haddr);
1386     case MO_LEUW:
1387         return lduw_le_p(haddr);
1388     case MO_BEUL:
1389         return (uint32_t)ldl_be_p(haddr);
1390     case MO_LEUL:
1391         return (uint32_t)ldl_le_p(haddr);
1392     case MO_BEQ:
1393         return ldq_be_p(haddr);
1394     case MO_LEQ:
1395         return ldq_le_p(haddr);
1396     default:
1397         qemu_build_not_reached();
1398     }
1399 }
1400 
1401 static inline uint64_t QEMU_ALWAYS_INLINE
1402 load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1403             uintptr_t retaddr, MemOp op, bool code_read,
1404             FullLoadHelper *full_load)
1405 {
1406     uintptr_t mmu_idx = get_mmuidx(oi);
1407     uintptr_t index = tlb_index(env, mmu_idx, addr);
1408     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1409     target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1410     const size_t tlb_off = code_read ?
1411         offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1412     const MMUAccessType access_type =
1413         code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1414     unsigned a_bits = get_alignment_bits(get_memop(oi));
1415     void *haddr;
1416     uint64_t res;
1417     size_t size = memop_size(op);
1418 
1419     /* Handle CPU specific unaligned behaviour */
1420     if (addr & ((1 << a_bits) - 1)) {
1421         cpu_unaligned_access(env_cpu(env), addr, access_type,
1422                              mmu_idx, retaddr);
1423     }
1424 
1425     /* If the TLB entry is for a different page, reload and try again.  */
1426     if (!tlb_hit(tlb_addr, addr)) {
1427         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1428                             addr & TARGET_PAGE_MASK)) {
1429             tlb_fill(env_cpu(env), addr, size,
1430                      access_type, mmu_idx, retaddr);
1431             index = tlb_index(env, mmu_idx, addr);
1432             entry = tlb_entry(env, mmu_idx, addr);
1433         }
1434         tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1435         tlb_addr &= ~TLB_INVALID_MASK;
1436     }
1437 
1438     /* Handle anything that isn't just a straight memory access.  */
1439     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1440         CPUIOTLBEntry *iotlbentry;
1441         bool need_swap;
1442 
1443         /* For anything that is unaligned, recurse through full_load.  */
1444         if ((addr & (size - 1)) != 0) {
1445             goto do_unaligned_access;
1446         }
1447 
1448         iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1449 
1450         /* Handle watchpoints.  */
1451         if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
1452             /* On watchpoint hit, this will longjmp out.  */
1453             cpu_check_watchpoint(env_cpu(env), addr, size,
1454                                  iotlbentry->attrs, BP_MEM_READ, retaddr);
1455         }
1456 
1457         need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
1458 
1459         /* Handle I/O access.  */
1460         if (likely(tlb_addr & TLB_MMIO)) {
1461             return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
1462                             access_type, op ^ (need_swap * MO_BSWAP));
1463         }
1464 
1465         haddr = (void *)((uintptr_t)addr + entry->addend);
1466 
1467         /*
1468          * Keep these two load_memop separate to ensure that the compiler
1469          * is able to fold the entire function to a single instruction.
1470          * There is a build-time assert inside to remind you of this.  ;-)
1471          */
1472         if (unlikely(need_swap)) {
1473             return load_memop(haddr, op ^ MO_BSWAP);
1474         }
1475         return load_memop(haddr, op);
1476     }
1477 
1478     /* Handle slow unaligned access (it spans two pages or IO).  */
1479     if (size > 1
1480         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1481                     >= TARGET_PAGE_SIZE)) {
1482         target_ulong addr1, addr2;
1483         uint64_t r1, r2;
1484         unsigned shift;
1485     do_unaligned_access:
1486         addr1 = addr & ~((target_ulong)size - 1);
1487         addr2 = addr1 + size;
1488         r1 = full_load(env, addr1, oi, retaddr);
1489         r2 = full_load(env, addr2, oi, retaddr);
1490         shift = (addr & (size - 1)) * 8;
1491 
1492         if (memop_big_endian(op)) {
1493             /* Big-endian combine.  */
1494             res = (r1 << shift) | (r2 >> ((size * 8) - shift));
1495         } else {
1496             /* Little-endian combine.  */
1497             res = (r1 >> shift) | (r2 << ((size * 8) - shift));
1498         }
1499         return res & MAKE_64BIT_MASK(0, size * 8);
1500     }
1501 
1502     haddr = (void *)((uintptr_t)addr + entry->addend);
1503     return load_memop(haddr, op);
1504 }
1505 
1506 /*
1507  * For the benefit of TCG generated code, we want to avoid the
1508  * complication of ABI-specific return type promotion and always
1509  * return a value extended to the register size of the host. This is
1510  * tcg_target_long, except in the case of a 32-bit host and 64-bit
1511  * data, and for that we always have uint64_t.
1512  *
1513  * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1514  */
1515 
1516 static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
1517                               TCGMemOpIdx oi, uintptr_t retaddr)
1518 {
1519     return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
1520 }
1521 
1522 tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1523                                      TCGMemOpIdx oi, uintptr_t retaddr)
1524 {
1525     return full_ldub_mmu(env, addr, oi, retaddr);
1526 }
1527 
1528 static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1529                                  TCGMemOpIdx oi, uintptr_t retaddr)
1530 {
1531     return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
1532                        full_le_lduw_mmu);
1533 }
1534 
1535 tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1536                                     TCGMemOpIdx oi, uintptr_t retaddr)
1537 {
1538     return full_le_lduw_mmu(env, addr, oi, retaddr);
1539 }
1540 
1541 static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1542                                  TCGMemOpIdx oi, uintptr_t retaddr)
1543 {
1544     return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
1545                        full_be_lduw_mmu);
1546 }
1547 
1548 tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1549                                     TCGMemOpIdx oi, uintptr_t retaddr)
1550 {
1551     return full_be_lduw_mmu(env, addr, oi, retaddr);
1552 }
1553 
1554 static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1555                                  TCGMemOpIdx oi, uintptr_t retaddr)
1556 {
1557     return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
1558                        full_le_ldul_mmu);
1559 }
1560 
1561 tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1562                                     TCGMemOpIdx oi, uintptr_t retaddr)
1563 {
1564     return full_le_ldul_mmu(env, addr, oi, retaddr);
1565 }
1566 
1567 static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1568                                  TCGMemOpIdx oi, uintptr_t retaddr)
1569 {
1570     return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
1571                        full_be_ldul_mmu);
1572 }
1573 
1574 tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1575                                     TCGMemOpIdx oi, uintptr_t retaddr)
1576 {
1577     return full_be_ldul_mmu(env, addr, oi, retaddr);
1578 }
1579 
1580 uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1581                            TCGMemOpIdx oi, uintptr_t retaddr)
1582 {
1583     return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
1584                        helper_le_ldq_mmu);
1585 }
1586 
1587 uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1588                            TCGMemOpIdx oi, uintptr_t retaddr)
1589 {
1590     return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
1591                        helper_be_ldq_mmu);
1592 }
1593 
1594 /*
1595  * Provide signed versions of the load routines as well.  We can of course
1596  * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
1597  */
1598 
1599 
1600 tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1601                                      TCGMemOpIdx oi, uintptr_t retaddr)
1602 {
1603     return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
1604 }
1605 
1606 tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1607                                     TCGMemOpIdx oi, uintptr_t retaddr)
1608 {
1609     return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
1610 }
1611 
1612 tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1613                                     TCGMemOpIdx oi, uintptr_t retaddr)
1614 {
1615     return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
1616 }
1617 
1618 tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1619                                     TCGMemOpIdx oi, uintptr_t retaddr)
1620 {
1621     return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
1622 }
1623 
1624 tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1625                                     TCGMemOpIdx oi, uintptr_t retaddr)
1626 {
1627     return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
1628 }
1629 
1630 /*
1631  * Store Helpers
1632  */
1633 
1634 static inline void QEMU_ALWAYS_INLINE
1635 store_memop(void *haddr, uint64_t val, MemOp op)
1636 {
1637     switch (op) {
1638     case MO_UB:
1639         stb_p(haddr, val);
1640         break;
1641     case MO_BEUW:
1642         stw_be_p(haddr, val);
1643         break;
1644     case MO_LEUW:
1645         stw_le_p(haddr, val);
1646         break;
1647     case MO_BEUL:
1648         stl_be_p(haddr, val);
1649         break;
1650     case MO_LEUL:
1651         stl_le_p(haddr, val);
1652         break;
1653     case MO_BEQ:
1654         stq_be_p(haddr, val);
1655         break;
1656     case MO_LEQ:
1657         stq_le_p(haddr, val);
1658         break;
1659     default:
1660         qemu_build_not_reached();
1661     }
1662 }
1663 
1664 static inline void QEMU_ALWAYS_INLINE
1665 store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
1666              TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
1667 {
1668     uintptr_t mmu_idx = get_mmuidx(oi);
1669     uintptr_t index = tlb_index(env, mmu_idx, addr);
1670     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1671     target_ulong tlb_addr = tlb_addr_write(entry);
1672     const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
1673     unsigned a_bits = get_alignment_bits(get_memop(oi));
1674     void *haddr;
1675     size_t size = memop_size(op);
1676 
1677     /* Handle CPU specific unaligned behaviour */
1678     if (addr & ((1 << a_bits) - 1)) {
1679         cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1680                              mmu_idx, retaddr);
1681     }
1682 
1683     /* If the TLB entry is for a different page, reload and try again.  */
1684     if (!tlb_hit(tlb_addr, addr)) {
1685         if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1686             addr & TARGET_PAGE_MASK)) {
1687             tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
1688                      mmu_idx, retaddr);
1689             index = tlb_index(env, mmu_idx, addr);
1690             entry = tlb_entry(env, mmu_idx, addr);
1691         }
1692         tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
1693     }
1694 
1695     /* Handle anything that isn't just a straight memory access.  */
1696     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1697         CPUIOTLBEntry *iotlbentry;
1698         bool need_swap;
1699 
1700         /* For anything that is unaligned, recurse through byte stores.  */
1701         if ((addr & (size - 1)) != 0) {
1702             goto do_unaligned_access;
1703         }
1704 
1705         iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1706 
1707         /* Handle watchpoints.  */
1708         if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
1709             /* On watchpoint hit, this will longjmp out.  */
1710             cpu_check_watchpoint(env_cpu(env), addr, size,
1711                                  iotlbentry->attrs, BP_MEM_WRITE, retaddr);
1712         }
1713 
1714         need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
1715 
1716         /* Handle I/O access.  */
1717         if (tlb_addr & TLB_MMIO) {
1718             io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
1719                       op ^ (need_swap * MO_BSWAP));
1720             return;
1721         }
1722 
1723         /* Ignore writes to ROM.  */
1724         if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
1725             return;
1726         }
1727 
1728         /* Handle clean RAM pages.  */
1729         if (tlb_addr & TLB_NOTDIRTY) {
1730             notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
1731         }
1732 
1733         haddr = (void *)((uintptr_t)addr + entry->addend);
1734 
1735         /*
1736          * Keep these two store_memop separate to ensure that the compiler
1737          * is able to fold the entire function to a single instruction.
1738          * There is a build-time assert inside to remind you of this.  ;-)
1739          */
1740         if (unlikely(need_swap)) {
1741             store_memop(haddr, val, op ^ MO_BSWAP);
1742         } else {
1743             store_memop(haddr, val, op);
1744         }
1745         return;
1746     }
1747 
1748     /* Handle slow unaligned access (it spans two pages or IO).  */
1749     if (size > 1
1750         && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1751                      >= TARGET_PAGE_SIZE)) {
1752         int i;
1753         uintptr_t index2;
1754         CPUTLBEntry *entry2;
1755         target_ulong page2, tlb_addr2;
1756         size_t size2;
1757 
1758     do_unaligned_access:
1759         /*
1760          * Ensure the second page is in the TLB.  Note that the first page
1761          * is already guaranteed to be filled, and that the second page
1762          * cannot evict the first.
1763          */
1764         page2 = (addr + size) & TARGET_PAGE_MASK;
1765         size2 = (addr + size) & ~TARGET_PAGE_MASK;
1766         index2 = tlb_index(env, mmu_idx, page2);
1767         entry2 = tlb_entry(env, mmu_idx, page2);
1768         tlb_addr2 = tlb_addr_write(entry2);
1769         if (!tlb_hit_page(tlb_addr2, page2)) {
1770             if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
1771                 tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
1772                          mmu_idx, retaddr);
1773                 index2 = tlb_index(env, mmu_idx, page2);
1774                 entry2 = tlb_entry(env, mmu_idx, page2);
1775             }
1776             tlb_addr2 = tlb_addr_write(entry2);
1777         }
1778 
1779         /*
1780          * Handle watchpoints.  Since this may trap, all checks
1781          * must happen before any store.
1782          */
1783         if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
1784             cpu_check_watchpoint(env_cpu(env), addr, size - size2,
1785                                  env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
1786                                  BP_MEM_WRITE, retaddr);
1787         }
1788         if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
1789             cpu_check_watchpoint(env_cpu(env), page2, size2,
1790                                  env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
1791                                  BP_MEM_WRITE, retaddr);
1792         }
1793 
1794         /*
1795          * XXX: not efficient, but simple.
1796          * This loop must go in the forward direction to avoid issues
1797          * with self-modifying code in Windows 64-bit.
1798          */
1799         for (i = 0; i < size; ++i) {
1800             uint8_t val8;
1801             if (memop_big_endian(op)) {
1802                 /* Big-endian extract.  */
1803                 val8 = val >> (((size - 1) * 8) - (i * 8));
1804             } else {
1805                 /* Little-endian extract.  */
1806                 val8 = val >> (i * 8);
1807             }
1808             helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
1809         }
1810         return;
1811     }
1812 
1813     haddr = (void *)((uintptr_t)addr + entry->addend);
1814     store_memop(haddr, val, op);
1815 }
1816 
1817 void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1818                         TCGMemOpIdx oi, uintptr_t retaddr)
1819 {
1820     store_helper(env, addr, val, oi, retaddr, MO_UB);
1821 }
1822 
1823 void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1824                        TCGMemOpIdx oi, uintptr_t retaddr)
1825 {
1826     store_helper(env, addr, val, oi, retaddr, MO_LEUW);
1827 }
1828 
1829 void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1830                        TCGMemOpIdx oi, uintptr_t retaddr)
1831 {
1832     store_helper(env, addr, val, oi, retaddr, MO_BEUW);
1833 }
1834 
1835 void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1836                        TCGMemOpIdx oi, uintptr_t retaddr)
1837 {
1838     store_helper(env, addr, val, oi, retaddr, MO_LEUL);
1839 }
1840 
1841 void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1842                        TCGMemOpIdx oi, uintptr_t retaddr)
1843 {
1844     store_helper(env, addr, val, oi, retaddr, MO_BEUL);
1845 }
1846 
1847 void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1848                        TCGMemOpIdx oi, uintptr_t retaddr)
1849 {
1850     store_helper(env, addr, val, oi, retaddr, MO_LEQ);
1851 }
1852 
1853 void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1854                        TCGMemOpIdx oi, uintptr_t retaddr)
1855 {
1856     store_helper(env, addr, val, oi, retaddr, MO_BEQ);
1857 }
1858 
1859 /* First set of helpers allows passing in of OI and RETADDR.  This makes
1860    them callable from other helpers.  */
1861 
1862 #define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
1863 #define ATOMIC_NAME(X) \
1864     HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
1865 #define ATOMIC_MMU_DECLS
1866 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
1867 #define ATOMIC_MMU_CLEANUP
1868 #define ATOMIC_MMU_IDX   get_mmuidx(oi)
1869 
1870 #include "atomic_common.inc.c"
1871 
1872 #define DATA_SIZE 1
1873 #include "atomic_template.h"
1874 
1875 #define DATA_SIZE 2
1876 #include "atomic_template.h"
1877 
1878 #define DATA_SIZE 4
1879 #include "atomic_template.h"
1880 
1881 #ifdef CONFIG_ATOMIC64
1882 #define DATA_SIZE 8
1883 #include "atomic_template.h"
1884 #endif
1885 
1886 #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
1887 #define DATA_SIZE 16
1888 #include "atomic_template.h"
1889 #endif
1890 
1891 /* Second set of helpers are directly callable from TCG as helpers.  */
1892 
1893 #undef EXTRA_ARGS
1894 #undef ATOMIC_NAME
1895 #undef ATOMIC_MMU_LOOKUP
1896 #define EXTRA_ARGS         , TCGMemOpIdx oi
1897 #define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1898 #define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC())
1899 
1900 #define DATA_SIZE 1
1901 #include "atomic_template.h"
1902 
1903 #define DATA_SIZE 2
1904 #include "atomic_template.h"
1905 
1906 #define DATA_SIZE 4
1907 #include "atomic_template.h"
1908 
1909 #ifdef CONFIG_ATOMIC64
1910 #define DATA_SIZE 8
1911 #include "atomic_template.h"
1912 #endif
1913 #undef ATOMIC_MMU_IDX
1914 
1915 /* Code access functions.  */
1916 
1917 static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
1918                                TCGMemOpIdx oi, uintptr_t retaddr)
1919 {
1920     return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_cmmu);
1921 }
1922 
1923 uint8_t helper_ret_ldub_cmmu(CPUArchState *env, target_ulong addr,
1924                             TCGMemOpIdx oi, uintptr_t retaddr)
1925 {
1926     return full_ldub_cmmu(env, addr, oi, retaddr);
1927 }
1928 
1929 int8_t helper_ret_ldsb_cmmu(CPUArchState *env, target_ulong addr,
1930                             TCGMemOpIdx oi, uintptr_t retaddr)
1931 {
1932     return (int8_t) full_ldub_cmmu(env, addr, oi, retaddr);
1933 }
1934 
1935 static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
1936                                   TCGMemOpIdx oi, uintptr_t retaddr)
1937 {
1938     return load_helper(env, addr, oi, retaddr, MO_LEUW, true,
1939                        full_le_lduw_cmmu);
1940 }
1941 
1942 uint16_t helper_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
1943                             TCGMemOpIdx oi, uintptr_t retaddr)
1944 {
1945     return full_le_lduw_cmmu(env, addr, oi, retaddr);
1946 }
1947 
1948 int16_t helper_le_ldsw_cmmu(CPUArchState *env, target_ulong addr,
1949                             TCGMemOpIdx oi, uintptr_t retaddr)
1950 {
1951     return (int16_t) full_le_lduw_cmmu(env, addr, oi, retaddr);
1952 }
1953 
1954 static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
1955                                   TCGMemOpIdx oi, uintptr_t retaddr)
1956 {
1957     return load_helper(env, addr, oi, retaddr, MO_BEUW, true,
1958                        full_be_lduw_cmmu);
1959 }
1960 
1961 uint16_t helper_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
1962                             TCGMemOpIdx oi, uintptr_t retaddr)
1963 {
1964     return full_be_lduw_cmmu(env, addr, oi, retaddr);
1965 }
1966 
1967 int16_t helper_be_ldsw_cmmu(CPUArchState *env, target_ulong addr,
1968                             TCGMemOpIdx oi, uintptr_t retaddr)
1969 {
1970     return (int16_t) full_be_lduw_cmmu(env, addr, oi, retaddr);
1971 }
1972 
1973 static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
1974                                   TCGMemOpIdx oi, uintptr_t retaddr)
1975 {
1976     return load_helper(env, addr, oi, retaddr, MO_LEUL, true,
1977                        full_le_ldul_cmmu);
1978 }
1979 
1980 uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
1981                             TCGMemOpIdx oi, uintptr_t retaddr)
1982 {
1983     return full_le_ldul_cmmu(env, addr, oi, retaddr);
1984 }
1985 
1986 static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr,
1987                                   TCGMemOpIdx oi, uintptr_t retaddr)
1988 {
1989     return load_helper(env, addr, oi, retaddr, MO_BEUL, true,
1990                        full_be_ldul_cmmu);
1991 }
1992 
1993 uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
1994                             TCGMemOpIdx oi, uintptr_t retaddr)
1995 {
1996     return full_be_ldul_cmmu(env, addr, oi, retaddr);
1997 }
1998 
1999 uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
2000                             TCGMemOpIdx oi, uintptr_t retaddr)
2001 {
2002     return load_helper(env, addr, oi, retaddr, MO_LEQ, true,
2003                        helper_le_ldq_cmmu);
2004 }
2005 
2006 uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
2007                             TCGMemOpIdx oi, uintptr_t retaddr)
2008 {
2009     return load_helper(env, addr, oi, retaddr, MO_BEQ, true,
2010                        helper_be_ldq_cmmu);
2011 }
2012