1d9bb58e5SYang Zhong /* 2d9bb58e5SYang Zhong * Common CPU TLB handling 3d9bb58e5SYang Zhong * 4d9bb58e5SYang Zhong * Copyright (c) 2003 Fabrice Bellard 5d9bb58e5SYang Zhong * 6d9bb58e5SYang Zhong * This library is free software; you can redistribute it and/or 7d9bb58e5SYang Zhong * modify it under the terms of the GNU Lesser General Public 8d9bb58e5SYang Zhong * License as published by the Free Software Foundation; either 9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version. 10d9bb58e5SYang Zhong * 11d9bb58e5SYang Zhong * This library is distributed in the hope that it will be useful, 12d9bb58e5SYang Zhong * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d9bb58e5SYang Zhong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14d9bb58e5SYang Zhong * Lesser General Public License for more details. 15d9bb58e5SYang Zhong * 16d9bb58e5SYang Zhong * You should have received a copy of the GNU Lesser General Public 17d9bb58e5SYang Zhong * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18d9bb58e5SYang Zhong */ 19d9bb58e5SYang Zhong 20d9bb58e5SYang Zhong #include "qemu/osdep.h" 21d9bb58e5SYang Zhong #include "qemu/main-loop.h" 2278271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h" 23d9bb58e5SYang Zhong #include "exec/exec-all.h" 24d9bb58e5SYang Zhong #include "exec/memory.h" 25d9bb58e5SYang Zhong #include "exec/cpu_ldst.h" 26d9bb58e5SYang Zhong #include "exec/cputlb.h" 27d9bb58e5SYang Zhong #include "exec/memory-internal.h" 28d9bb58e5SYang Zhong #include "exec/ram_addr.h" 29d9bb58e5SYang Zhong #include "tcg/tcg.h" 30d9bb58e5SYang Zhong #include "qemu/error-report.h" 31d9bb58e5SYang Zhong #include "exec/log.h" 32c213ee2dSRichard Henderson #include "exec/helper-proto-common.h" 33d9bb58e5SYang Zhong #include "qemu/atomic.h" 34e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 353b9bd3f4SPaolo Bonzini #include "exec/translate-all.h" 3651807763SPhilippe Mathieu-Daudé #include "trace.h" 37e5ceadffSPhilippe Mathieu-Daudé #include "tb-hash.h" 3865269192SPhilippe Mathieu-Daudé #include "internal.h" 39235537faSAlex Bennée #ifdef CONFIG_PLUGIN 40235537faSAlex Bennée #include "qemu/plugin-memory.h" 41235537faSAlex Bennée #endif 42d2ba8026SRichard Henderson #include "tcg/tcg-ldst.h" 4370f168f8SRichard Henderson #include "tcg/oversized-guest.h" 44d9bb58e5SYang Zhong 45d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 46d9bb58e5SYang Zhong /* #define DEBUG_TLB */ 47d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */ 48d9bb58e5SYang Zhong 49d9bb58e5SYang Zhong #ifdef DEBUG_TLB 50d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1 51d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG 52d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 1 53d9bb58e5SYang Zhong # else 54d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 55d9bb58e5SYang Zhong # endif 56d9bb58e5SYang Zhong #else 57d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0 58d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 59d9bb58e5SYang Zhong #endif 60d9bb58e5SYang Zhong 61d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \ 62d9bb58e5SYang Zhong if (DEBUG_TLB_LOG_GATE) { \ 63d9bb58e5SYang Zhong qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 64d9bb58e5SYang Zhong ## __VA_ARGS__); \ 65d9bb58e5SYang Zhong } else if (DEBUG_TLB_GATE) { \ 66d9bb58e5SYang Zhong fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 67d9bb58e5SYang Zhong } \ 68d9bb58e5SYang Zhong } while (0) 69d9bb58e5SYang Zhong 70ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do { \ 71d9bb58e5SYang Zhong if (DEBUG_TLB_GATE) { \ 72ea9025cbSEmilio G. Cota g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ 73d9bb58e5SYang Zhong } \ 74d9bb58e5SYang Zhong } while (0) 75d9bb58e5SYang Zhong 76d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a 77e79f8142SAnton Johansson * vaddr even on 32 bit builds 78e79f8142SAnton Johansson */ 79e79f8142SAnton Johansson QEMU_BUILD_BUG_ON(sizeof(vaddr) > sizeof(run_on_cpu_data)); 80d9bb58e5SYang Zhong 81d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 82d9bb58e5SYang Zhong */ 83d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 84d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 85d9bb58e5SYang Zhong 86722a1c1eSRichard Henderson static inline size_t tlb_n_entries(CPUTLBDescFast *fast) 877a1efe1bSRichard Henderson { 88722a1c1eSRichard Henderson return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; 897a1efe1bSRichard Henderson } 907a1efe1bSRichard Henderson 91722a1c1eSRichard Henderson static inline size_t sizeof_tlb(CPUTLBDescFast *fast) 9286e1eff8SEmilio G. Cota { 93722a1c1eSRichard Henderson return fast->mask + (1 << CPU_TLB_ENTRY_BITS); 9486e1eff8SEmilio G. Cota } 9586e1eff8SEmilio G. Cota 9679e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, 9786e1eff8SEmilio G. Cota size_t max_entries) 9886e1eff8SEmilio G. Cota { 9979e42085SRichard Henderson desc->window_begin_ns = ns; 10079e42085SRichard Henderson desc->window_max_entries = max_entries; 10186e1eff8SEmilio G. Cota } 10286e1eff8SEmilio G. Cota 10306f3831cSAnton Johansson static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr) 1040f4abea8SRichard Henderson { 105a976a99aSRichard Henderson CPUJumpCache *jc = cpu->tb_jmp_cache; 10699ab4d50SEric Auger int i, i0; 1070f4abea8SRichard Henderson 10899ab4d50SEric Auger if (unlikely(!jc)) { 10999ab4d50SEric Auger return; 11099ab4d50SEric Auger } 11199ab4d50SEric Auger 11299ab4d50SEric Auger i0 = tb_jmp_cache_hash_page(page_addr); 1130f4abea8SRichard Henderson for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { 114a976a99aSRichard Henderson qatomic_set(&jc->array[i0 + i].tb, NULL); 1150f4abea8SRichard Henderson } 1160f4abea8SRichard Henderson } 1170f4abea8SRichard Henderson 11886e1eff8SEmilio G. Cota /** 11986e1eff8SEmilio G. Cota * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary 12071ccd47bSRichard Henderson * @desc: The CPUTLBDesc portion of the TLB 12171ccd47bSRichard Henderson * @fast: The CPUTLBDescFast portion of the same TLB 12286e1eff8SEmilio G. Cota * 12386e1eff8SEmilio G. Cota * Called with tlb_lock_held. 12486e1eff8SEmilio G. Cota * 12586e1eff8SEmilio G. Cota * We have two main constraints when resizing a TLB: (1) we only resize it 12686e1eff8SEmilio G. Cota * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing 12786e1eff8SEmilio G. Cota * the array or unnecessarily flushing it), which means we do not control how 12886e1eff8SEmilio G. Cota * frequently the resizing can occur; (2) we don't have access to the guest's 12986e1eff8SEmilio G. Cota * future scheduling decisions, and therefore have to decide the magnitude of 13086e1eff8SEmilio G. Cota * the resize based on past observations. 13186e1eff8SEmilio G. Cota * 13286e1eff8SEmilio G. Cota * In general, a memory-hungry process can benefit greatly from an appropriately 13386e1eff8SEmilio G. Cota * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that 13486e1eff8SEmilio G. Cota * we just have to make the TLB as large as possible; while an oversized TLB 13586e1eff8SEmilio G. Cota * results in minimal TLB miss rates, it also takes longer to be flushed 13686e1eff8SEmilio G. Cota * (flushes can be _very_ frequent), and the reduced locality can also hurt 13786e1eff8SEmilio G. Cota * performance. 13886e1eff8SEmilio G. Cota * 13986e1eff8SEmilio G. Cota * To achieve near-optimal performance for all kinds of workloads, we: 14086e1eff8SEmilio G. Cota * 14186e1eff8SEmilio G. Cota * 1. Aggressively increase the size of the TLB when the use rate of the 14286e1eff8SEmilio G. Cota * TLB being flushed is high, since it is likely that in the near future this 14386e1eff8SEmilio G. Cota * memory-hungry process will execute again, and its memory hungriness will 14486e1eff8SEmilio G. Cota * probably be similar. 14586e1eff8SEmilio G. Cota * 14686e1eff8SEmilio G. Cota * 2. Slowly reduce the size of the TLB as the use rate declines over a 14786e1eff8SEmilio G. Cota * reasonably large time window. The rationale is that if in such a time window 14886e1eff8SEmilio G. Cota * we have not observed a high TLB use rate, it is likely that we won't observe 14986e1eff8SEmilio G. Cota * it in the near future. In that case, once a time window expires we downsize 15086e1eff8SEmilio G. Cota * the TLB to match the maximum use rate observed in the window. 15186e1eff8SEmilio G. Cota * 15286e1eff8SEmilio G. Cota * 3. Try to keep the maximum use rate in a time window in the 30-70% range, 15386e1eff8SEmilio G. Cota * since in that range performance is likely near-optimal. Recall that the TLB 15486e1eff8SEmilio G. Cota * is direct mapped, so we want the use rate to be low (or at least not too 15586e1eff8SEmilio G. Cota * high), since otherwise we are likely to have a significant amount of 15686e1eff8SEmilio G. Cota * conflict misses. 15786e1eff8SEmilio G. Cota */ 1583c3959f2SRichard Henderson static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, 1593c3959f2SRichard Henderson int64_t now) 16086e1eff8SEmilio G. Cota { 16171ccd47bSRichard Henderson size_t old_size = tlb_n_entries(fast); 16286e1eff8SEmilio G. Cota size_t rate; 16386e1eff8SEmilio G. Cota size_t new_size = old_size; 16486e1eff8SEmilio G. Cota int64_t window_len_ms = 100; 16586e1eff8SEmilio G. Cota int64_t window_len_ns = window_len_ms * 1000 * 1000; 16679e42085SRichard Henderson bool window_expired = now > desc->window_begin_ns + window_len_ns; 16786e1eff8SEmilio G. Cota 16879e42085SRichard Henderson if (desc->n_used_entries > desc->window_max_entries) { 16979e42085SRichard Henderson desc->window_max_entries = desc->n_used_entries; 17086e1eff8SEmilio G. Cota } 17179e42085SRichard Henderson rate = desc->window_max_entries * 100 / old_size; 17286e1eff8SEmilio G. Cota 17386e1eff8SEmilio G. Cota if (rate > 70) { 17486e1eff8SEmilio G. Cota new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); 17586e1eff8SEmilio G. Cota } else if (rate < 30 && window_expired) { 17679e42085SRichard Henderson size_t ceil = pow2ceil(desc->window_max_entries); 17779e42085SRichard Henderson size_t expected_rate = desc->window_max_entries * 100 / ceil; 17886e1eff8SEmilio G. Cota 17986e1eff8SEmilio G. Cota /* 18086e1eff8SEmilio G. Cota * Avoid undersizing when the max number of entries seen is just below 18186e1eff8SEmilio G. Cota * a pow2. For instance, if max_entries == 1025, the expected use rate 18286e1eff8SEmilio G. Cota * would be 1025/2048==50%. However, if max_entries == 1023, we'd get 18386e1eff8SEmilio G. Cota * 1023/1024==99.9% use rate, so we'd likely end up doubling the size 18486e1eff8SEmilio G. Cota * later. Thus, make sure that the expected use rate remains below 70%. 18586e1eff8SEmilio G. Cota * (and since we double the size, that means the lowest rate we'd 18686e1eff8SEmilio G. Cota * expect to get is 35%, which is still in the 30-70% range where 18786e1eff8SEmilio G. Cota * we consider that the size is appropriate.) 18886e1eff8SEmilio G. Cota */ 18986e1eff8SEmilio G. Cota if (expected_rate > 70) { 19086e1eff8SEmilio G. Cota ceil *= 2; 19186e1eff8SEmilio G. Cota } 19286e1eff8SEmilio G. Cota new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); 19386e1eff8SEmilio G. Cota } 19486e1eff8SEmilio G. Cota 19586e1eff8SEmilio G. Cota if (new_size == old_size) { 19686e1eff8SEmilio G. Cota if (window_expired) { 19779e42085SRichard Henderson tlb_window_reset(desc, now, desc->n_used_entries); 19886e1eff8SEmilio G. Cota } 19986e1eff8SEmilio G. Cota return; 20086e1eff8SEmilio G. Cota } 20186e1eff8SEmilio G. Cota 20271ccd47bSRichard Henderson g_free(fast->table); 20325d3ec58SRichard Henderson g_free(desc->fulltlb); 20486e1eff8SEmilio G. Cota 20579e42085SRichard Henderson tlb_window_reset(desc, now, 0); 20686e1eff8SEmilio G. Cota /* desc->n_used_entries is cleared by the caller */ 20771ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 20871ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 20925d3ec58SRichard Henderson desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); 21071ccd47bSRichard Henderson 21186e1eff8SEmilio G. Cota /* 21286e1eff8SEmilio G. Cota * If the allocations fail, try smaller sizes. We just freed some 21386e1eff8SEmilio G. Cota * memory, so going back to half of new_size has a good chance of working. 21486e1eff8SEmilio G. Cota * Increased memory pressure elsewhere in the system might cause the 21586e1eff8SEmilio G. Cota * allocations to fail though, so we progressively reduce the allocation 21686e1eff8SEmilio G. Cota * size, aborting if we cannot even allocate the smallest TLB we support. 21786e1eff8SEmilio G. Cota */ 21825d3ec58SRichard Henderson while (fast->table == NULL || desc->fulltlb == NULL) { 21986e1eff8SEmilio G. Cota if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { 22086e1eff8SEmilio G. Cota error_report("%s: %s", __func__, strerror(errno)); 22186e1eff8SEmilio G. Cota abort(); 22286e1eff8SEmilio G. Cota } 22386e1eff8SEmilio G. Cota new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); 22471ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 22586e1eff8SEmilio G. Cota 22671ccd47bSRichard Henderson g_free(fast->table); 22725d3ec58SRichard Henderson g_free(desc->fulltlb); 22871ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 22925d3ec58SRichard Henderson desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); 23086e1eff8SEmilio G. Cota } 23186e1eff8SEmilio G. Cota } 23286e1eff8SEmilio G. Cota 233bbf021b0SRichard Henderson static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) 23486e1eff8SEmilio G. Cota { 2355c948e31SRichard Henderson desc->n_used_entries = 0; 2365c948e31SRichard Henderson desc->large_page_addr = -1; 2375c948e31SRichard Henderson desc->large_page_mask = -1; 2385c948e31SRichard Henderson desc->vindex = 0; 2395c948e31SRichard Henderson memset(fast->table, -1, sizeof_tlb(fast)); 2405c948e31SRichard Henderson memset(desc->vtable, -1, sizeof(desc->vtable)); 24186e1eff8SEmilio G. Cota } 24286e1eff8SEmilio G. Cota 24310b32e2cSAnton Johansson static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx, 2443c3959f2SRichard Henderson int64_t now) 245bbf021b0SRichard Henderson { 24610b32e2cSAnton Johansson CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx]; 24710b32e2cSAnton Johansson CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx]; 248bbf021b0SRichard Henderson 2493c3959f2SRichard Henderson tlb_mmu_resize_locked(desc, fast, now); 250bbf021b0SRichard Henderson tlb_mmu_flush_locked(desc, fast); 251bbf021b0SRichard Henderson } 252bbf021b0SRichard Henderson 25356e89f76SRichard Henderson static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) 25456e89f76SRichard Henderson { 25556e89f76SRichard Henderson size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; 25656e89f76SRichard Henderson 25756e89f76SRichard Henderson tlb_window_reset(desc, now, 0); 25856e89f76SRichard Henderson desc->n_used_entries = 0; 25956e89f76SRichard Henderson fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; 26056e89f76SRichard Henderson fast->table = g_new(CPUTLBEntry, n_entries); 26125d3ec58SRichard Henderson desc->fulltlb = g_new(CPUTLBEntryFull, n_entries); 2623c16304aSRichard Henderson tlb_mmu_flush_locked(desc, fast); 26356e89f76SRichard Henderson } 26456e89f76SRichard Henderson 26510b32e2cSAnton Johansson static inline void tlb_n_used_entries_inc(CPUState *cpu, uintptr_t mmu_idx) 26686e1eff8SEmilio G. Cota { 26710b32e2cSAnton Johansson cpu->neg.tlb.d[mmu_idx].n_used_entries++; 26886e1eff8SEmilio G. Cota } 26986e1eff8SEmilio G. Cota 27010b32e2cSAnton Johansson static inline void tlb_n_used_entries_dec(CPUState *cpu, uintptr_t mmu_idx) 27186e1eff8SEmilio G. Cota { 27210b32e2cSAnton Johansson cpu->neg.tlb.d[mmu_idx].n_used_entries--; 27386e1eff8SEmilio G. Cota } 27486e1eff8SEmilio G. Cota 2755005e253SEmilio G. Cota void tlb_init(CPUState *cpu) 2765005e253SEmilio G. Cota { 27756e89f76SRichard Henderson int64_t now = get_clock_realtime(); 27856e89f76SRichard Henderson int i; 27971aec354SEmilio G. Cota 28010b32e2cSAnton Johansson qemu_spin_init(&cpu->neg.tlb.c.lock); 2813d1523ceSRichard Henderson 2823c16304aSRichard Henderson /* All tlbs are initialized flushed. */ 28310b32e2cSAnton Johansson cpu->neg.tlb.c.dirty = 0; 28486e1eff8SEmilio G. Cota 28556e89f76SRichard Henderson for (i = 0; i < NB_MMU_MODES; i++) { 28610b32e2cSAnton Johansson tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now); 28756e89f76SRichard Henderson } 2885005e253SEmilio G. Cota } 2895005e253SEmilio G. Cota 290816d9be5SEmilio G. Cota void tlb_destroy(CPUState *cpu) 291816d9be5SEmilio G. Cota { 292816d9be5SEmilio G. Cota int i; 293816d9be5SEmilio G. Cota 29410b32e2cSAnton Johansson qemu_spin_destroy(&cpu->neg.tlb.c.lock); 295816d9be5SEmilio G. Cota for (i = 0; i < NB_MMU_MODES; i++) { 29610b32e2cSAnton Johansson CPUTLBDesc *desc = &cpu->neg.tlb.d[i]; 29710b32e2cSAnton Johansson CPUTLBDescFast *fast = &cpu->neg.tlb.f[i]; 298816d9be5SEmilio G. Cota 299816d9be5SEmilio G. Cota g_free(fast->table); 30025d3ec58SRichard Henderson g_free(desc->fulltlb); 301816d9be5SEmilio G. Cota } 302816d9be5SEmilio G. Cota } 303816d9be5SEmilio G. Cota 304d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus 305d9bb58e5SYang Zhong * 306d9bb58e5SYang Zhong * If the wait flag is set then the src cpu's helper will be queued as 307d9bb58e5SYang Zhong * "safe" work and the loop exited creating a synchronisation point 308d9bb58e5SYang Zhong * where all queued work will be finished before execution starts 309d9bb58e5SYang Zhong * again. 310d9bb58e5SYang Zhong */ 311d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 312d9bb58e5SYang Zhong run_on_cpu_data d) 313d9bb58e5SYang Zhong { 314d9bb58e5SYang Zhong CPUState *cpu; 315d9bb58e5SYang Zhong 316d9bb58e5SYang Zhong CPU_FOREACH(cpu) { 317d9bb58e5SYang Zhong if (cpu != src) { 318d9bb58e5SYang Zhong async_run_on_cpu(cpu, fn, d); 319d9bb58e5SYang Zhong } 320d9bb58e5SYang Zhong } 321d9bb58e5SYang Zhong } 322d9bb58e5SYang Zhong 323e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) 32483974cf4SEmilio G. Cota { 32583974cf4SEmilio G. Cota CPUState *cpu; 326e09de0a2SRichard Henderson size_t full = 0, part = 0, elide = 0; 32783974cf4SEmilio G. Cota 32883974cf4SEmilio G. Cota CPU_FOREACH(cpu) { 32910b32e2cSAnton Johansson full += qatomic_read(&cpu->neg.tlb.c.full_flush_count); 33010b32e2cSAnton Johansson part += qatomic_read(&cpu->neg.tlb.c.part_flush_count); 33110b32e2cSAnton Johansson elide += qatomic_read(&cpu->neg.tlb.c.elide_flush_count); 33283974cf4SEmilio G. Cota } 333e09de0a2SRichard Henderson *pfull = full; 334e09de0a2SRichard Henderson *ppart = part; 335e09de0a2SRichard Henderson *pelide = elide; 33683974cf4SEmilio G. Cota } 337d9bb58e5SYang Zhong 338d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 339d9bb58e5SYang Zhong { 3403d1523ceSRichard Henderson uint16_t asked = data.host_int; 3413d1523ceSRichard Henderson uint16_t all_dirty, work, to_clean; 3423c3959f2SRichard Henderson int64_t now = get_clock_realtime(); 343d9bb58e5SYang Zhong 344d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 345d9bb58e5SYang Zhong 3463d1523ceSRichard Henderson tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); 347d9bb58e5SYang Zhong 34810b32e2cSAnton Johansson qemu_spin_lock(&cpu->neg.tlb.c.lock); 34960a2ad7dSRichard Henderson 35010b32e2cSAnton Johansson all_dirty = cpu->neg.tlb.c.dirty; 3513d1523ceSRichard Henderson to_clean = asked & all_dirty; 3523d1523ceSRichard Henderson all_dirty &= ~to_clean; 35310b32e2cSAnton Johansson cpu->neg.tlb.c.dirty = all_dirty; 3543d1523ceSRichard Henderson 3553d1523ceSRichard Henderson for (work = to_clean; work != 0; work &= work - 1) { 3563d1523ceSRichard Henderson int mmu_idx = ctz32(work); 35710b32e2cSAnton Johansson tlb_flush_one_mmuidx_locked(cpu, mmu_idx, now); 358d9bb58e5SYang Zhong } 3593d1523ceSRichard Henderson 36010b32e2cSAnton Johansson qemu_spin_unlock(&cpu->neg.tlb.c.lock); 361d9bb58e5SYang Zhong 362a976a99aSRichard Henderson tcg_flush_jmp_cache(cpu); 36364f2674bSRichard Henderson 3643d1523ceSRichard Henderson if (to_clean == ALL_MMUIDX_BITS) { 36510b32e2cSAnton Johansson qatomic_set(&cpu->neg.tlb.c.full_flush_count, 36610b32e2cSAnton Johansson cpu->neg.tlb.c.full_flush_count + 1); 367e09de0a2SRichard Henderson } else { 36810b32e2cSAnton Johansson qatomic_set(&cpu->neg.tlb.c.part_flush_count, 36910b32e2cSAnton Johansson cpu->neg.tlb.c.part_flush_count + ctpop16(to_clean)); 3703d1523ceSRichard Henderson if (to_clean != asked) { 37110b32e2cSAnton Johansson qatomic_set(&cpu->neg.tlb.c.elide_flush_count, 37210b32e2cSAnton Johansson cpu->neg.tlb.c.elide_flush_count + 3733d1523ceSRichard Henderson ctpop16(asked & ~to_clean)); 3743d1523ceSRichard Henderson } 37564f2674bSRichard Henderson } 376d9bb58e5SYang Zhong } 377d9bb58e5SYang Zhong 378d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 379d9bb58e5SYang Zhong { 380d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 381d9bb58e5SYang Zhong 38264f2674bSRichard Henderson if (cpu->created && !qemu_cpu_is_self(cpu)) { 383d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 384ab651105SRichard Henderson RUN_ON_CPU_HOST_INT(idxmap)); 385d9bb58e5SYang Zhong } else { 38660a2ad7dSRichard Henderson tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); 387d9bb58e5SYang Zhong } 388d9bb58e5SYang Zhong } 389d9bb58e5SYang Zhong 39064f2674bSRichard Henderson void tlb_flush(CPUState *cpu) 39164f2674bSRichard Henderson { 39264f2674bSRichard Henderson tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); 39364f2674bSRichard Henderson } 39464f2674bSRichard Henderson 395d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 396d9bb58e5SYang Zhong { 397d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 398d9bb58e5SYang Zhong 399d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 400d9bb58e5SYang Zhong 401d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 402d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 403d9bb58e5SYang Zhong } 404d9bb58e5SYang Zhong 40564f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu) 40664f2674bSRichard Henderson { 40764f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); 40864f2674bSRichard Henderson } 40964f2674bSRichard Henderson 41064f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) 411d9bb58e5SYang Zhong { 412d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 413d9bb58e5SYang Zhong 414d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 415d9bb58e5SYang Zhong 416d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 417d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 418d9bb58e5SYang Zhong } 419d9bb58e5SYang Zhong 42064f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu) 42164f2674bSRichard Henderson { 42264f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); 42364f2674bSRichard Henderson } 42464f2674bSRichard Henderson 4253ab6e68cSRichard Henderson static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry, 426732d5487SAnton Johansson vaddr page, vaddr mask) 4273ab6e68cSRichard Henderson { 4283ab6e68cSRichard Henderson page &= mask; 4293ab6e68cSRichard Henderson mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK; 4303ab6e68cSRichard Henderson 4313ab6e68cSRichard Henderson return (page == (tlb_entry->addr_read & mask) || 4323ab6e68cSRichard Henderson page == (tlb_addr_write(tlb_entry) & mask) || 4333ab6e68cSRichard Henderson page == (tlb_entry->addr_code & mask)); 4343ab6e68cSRichard Henderson } 4353ab6e68cSRichard Henderson 436732d5487SAnton Johansson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page) 437d9bb58e5SYang Zhong { 4383ab6e68cSRichard Henderson return tlb_hit_page_mask_anyprot(tlb_entry, page, -1); 43968fea038SRichard Henderson } 44068fea038SRichard Henderson 4413cea94bbSEmilio G. Cota /** 4423cea94bbSEmilio G. Cota * tlb_entry_is_empty - return true if the entry is not in use 4433cea94bbSEmilio G. Cota * @te: pointer to CPUTLBEntry 4443cea94bbSEmilio G. Cota */ 4453cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) 4463cea94bbSEmilio G. Cota { 4473cea94bbSEmilio G. Cota return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; 4483cea94bbSEmilio G. Cota } 4493cea94bbSEmilio G. Cota 45053d28455SRichard Henderson /* Called with tlb_c.lock held */ 4513ab6e68cSRichard Henderson static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry, 452732d5487SAnton Johansson vaddr page, 453732d5487SAnton Johansson vaddr mask) 45468fea038SRichard Henderson { 4553ab6e68cSRichard Henderson if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) { 456d9bb58e5SYang Zhong memset(tlb_entry, -1, sizeof(*tlb_entry)); 45786e1eff8SEmilio G. Cota return true; 458d9bb58e5SYang Zhong } 45986e1eff8SEmilio G. Cota return false; 460d9bb58e5SYang Zhong } 461d9bb58e5SYang Zhong 462732d5487SAnton Johansson static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, vaddr page) 46368fea038SRichard Henderson { 4643ab6e68cSRichard Henderson return tlb_flush_entry_mask_locked(tlb_entry, page, -1); 4653ab6e68cSRichard Henderson } 4663ab6e68cSRichard Henderson 4673ab6e68cSRichard Henderson /* Called with tlb_c.lock held */ 46810b32e2cSAnton Johansson static void tlb_flush_vtlb_page_mask_locked(CPUState *cpu, int mmu_idx, 469732d5487SAnton Johansson vaddr page, 470732d5487SAnton Johansson vaddr mask) 4713ab6e68cSRichard Henderson { 47210b32e2cSAnton Johansson CPUTLBDesc *d = &cpu->neg.tlb.d[mmu_idx]; 47368fea038SRichard Henderson int k; 47471aec354SEmilio G. Cota 47510b32e2cSAnton Johansson assert_cpu_is_self(cpu); 47668fea038SRichard Henderson for (k = 0; k < CPU_VTLB_SIZE; k++) { 4773ab6e68cSRichard Henderson if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) { 47810b32e2cSAnton Johansson tlb_n_used_entries_dec(cpu, mmu_idx); 47986e1eff8SEmilio G. Cota } 48068fea038SRichard Henderson } 48168fea038SRichard Henderson } 48268fea038SRichard Henderson 48310b32e2cSAnton Johansson static inline void tlb_flush_vtlb_page_locked(CPUState *cpu, int mmu_idx, 484732d5487SAnton Johansson vaddr page) 4853ab6e68cSRichard Henderson { 48610b32e2cSAnton Johansson tlb_flush_vtlb_page_mask_locked(cpu, mmu_idx, page, -1); 4873ab6e68cSRichard Henderson } 4883ab6e68cSRichard Henderson 48910b32e2cSAnton Johansson static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page) 4901308e026SRichard Henderson { 49110b32e2cSAnton Johansson vaddr lp_addr = cpu->neg.tlb.d[midx].large_page_addr; 49210b32e2cSAnton Johansson vaddr lp_mask = cpu->neg.tlb.d[midx].large_page_mask; 4931308e026SRichard Henderson 4941308e026SRichard Henderson /* Check if we need to flush due to large pages. */ 4951308e026SRichard Henderson if ((page & lp_mask) == lp_addr) { 4968c605cf1SAnton Johansson tlb_debug("forcing full flush midx %d (%016" 4978c605cf1SAnton Johansson VADDR_PRIx "/%016" VADDR_PRIx ")\n", 4981308e026SRichard Henderson midx, lp_addr, lp_mask); 49910b32e2cSAnton Johansson tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime()); 5001308e026SRichard Henderson } else { 50110b32e2cSAnton Johansson if (tlb_flush_entry_locked(tlb_entry(cpu, midx, page), page)) { 50210b32e2cSAnton Johansson tlb_n_used_entries_dec(cpu, midx); 50386e1eff8SEmilio G. Cota } 50410b32e2cSAnton Johansson tlb_flush_vtlb_page_locked(cpu, midx, page); 5051308e026SRichard Henderson } 5061308e026SRichard Henderson } 5071308e026SRichard Henderson 5087b7d00e0SRichard Henderson /** 5097b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_0: 5107b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5117b7d00e0SRichard Henderson * @addr: page of virtual address to flush 5127b7d00e0SRichard Henderson * @idxmap: set of mmu_idx to flush 5137b7d00e0SRichard Henderson * 5147b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, flush one page 5157b7d00e0SRichard Henderson * at @addr from the tlbs indicated by @idxmap from @cpu. 516d9bb58e5SYang Zhong */ 5177b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, 518732d5487SAnton Johansson vaddr addr, 5197b7d00e0SRichard Henderson uint16_t idxmap) 520d9bb58e5SYang Zhong { 521d9bb58e5SYang Zhong int mmu_idx; 522d9bb58e5SYang Zhong 523d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 524d9bb58e5SYang Zhong 5258c605cf1SAnton Johansson tlb_debug("page addr: %016" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap); 526d9bb58e5SYang Zhong 52710b32e2cSAnton Johansson qemu_spin_lock(&cpu->neg.tlb.c.lock); 528d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 5297b7d00e0SRichard Henderson if ((idxmap >> mmu_idx) & 1) { 53010b32e2cSAnton Johansson tlb_flush_page_locked(cpu, mmu_idx, addr); 531d9bb58e5SYang Zhong } 532d9bb58e5SYang Zhong } 53310b32e2cSAnton Johansson qemu_spin_unlock(&cpu->neg.tlb.c.lock); 534d9bb58e5SYang Zhong 5351d41a79bSRichard Henderson /* 5361d41a79bSRichard Henderson * Discard jump cache entries for any tb which might potentially 5371d41a79bSRichard Henderson * overlap the flushed page, which includes the previous. 5381d41a79bSRichard Henderson */ 5391d41a79bSRichard Henderson tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); 5401d41a79bSRichard Henderson tb_jmp_cache_clear_page(cpu, addr); 541d9bb58e5SYang Zhong } 542d9bb58e5SYang Zhong 5437b7d00e0SRichard Henderson /** 5447b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_1: 5457b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5467b7d00e0SRichard Henderson * @data: encoded addr + idxmap 5477b7d00e0SRichard Henderson * 5487b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5497b7d00e0SRichard Henderson * async_run_on_cpu. The idxmap parameter is encoded in the page 5507b7d00e0SRichard Henderson * offset of the target_ptr field. This limits the set of mmu_idx 5517b7d00e0SRichard Henderson * that can be passed via this method. 5527b7d00e0SRichard Henderson */ 5537b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, 5547b7d00e0SRichard Henderson run_on_cpu_data data) 5557b7d00e0SRichard Henderson { 556732d5487SAnton Johansson vaddr addr_and_idxmap = data.target_ptr; 557732d5487SAnton Johansson vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK; 5587b7d00e0SRichard Henderson uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; 5597b7d00e0SRichard Henderson 5607b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 5617b7d00e0SRichard Henderson } 5627b7d00e0SRichard Henderson 5637b7d00e0SRichard Henderson typedef struct { 564732d5487SAnton Johansson vaddr addr; 5657b7d00e0SRichard Henderson uint16_t idxmap; 5667b7d00e0SRichard Henderson } TLBFlushPageByMMUIdxData; 5677b7d00e0SRichard Henderson 5687b7d00e0SRichard Henderson /** 5697b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_2: 5707b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5717b7d00e0SRichard Henderson * @data: allocated addr + idxmap 5727b7d00e0SRichard Henderson * 5737b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5747b7d00e0SRichard Henderson * async_run_on_cpu. The addr+idxmap parameters are stored in a 5757b7d00e0SRichard Henderson * TLBFlushPageByMMUIdxData structure that has been allocated 5767b7d00e0SRichard Henderson * specifically for this helper. Free the structure when done. 5777b7d00e0SRichard Henderson */ 5787b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, 5797b7d00e0SRichard Henderson run_on_cpu_data data) 5807b7d00e0SRichard Henderson { 5817b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = data.host_ptr; 5827b7d00e0SRichard Henderson 5837b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); 5847b7d00e0SRichard Henderson g_free(d); 5857b7d00e0SRichard Henderson } 5867b7d00e0SRichard Henderson 587732d5487SAnton Johansson void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap) 588d9bb58e5SYang Zhong { 5898c605cf1SAnton Johansson tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap); 590d9bb58e5SYang Zhong 591d9bb58e5SYang Zhong /* This should already be page aligned */ 5927b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 593d9bb58e5SYang Zhong 5947b7d00e0SRichard Henderson if (qemu_cpu_is_self(cpu)) { 5957b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 5967b7d00e0SRichard Henderson } else if (idxmap < TARGET_PAGE_SIZE) { 5977b7d00e0SRichard Henderson /* 5987b7d00e0SRichard Henderson * Most targets have only a few mmu_idx. In the case where 5997b7d00e0SRichard Henderson * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid 6007b7d00e0SRichard Henderson * allocating memory for this operation. 6017b7d00e0SRichard Henderson */ 6027b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, 6037b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 604d9bb58e5SYang Zhong } else { 6057b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); 6067b7d00e0SRichard Henderson 6077b7d00e0SRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 6087b7d00e0SRichard Henderson d->addr = addr; 6097b7d00e0SRichard Henderson d->idxmap = idxmap; 6107b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, 6117b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 612d9bb58e5SYang Zhong } 613d9bb58e5SYang Zhong } 614d9bb58e5SYang Zhong 615732d5487SAnton Johansson void tlb_flush_page(CPUState *cpu, vaddr addr) 616f8144c6cSRichard Henderson { 617f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); 618f8144c6cSRichard Henderson } 619f8144c6cSRichard Henderson 620732d5487SAnton Johansson void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr, 621d9bb58e5SYang Zhong uint16_t idxmap) 622d9bb58e5SYang Zhong { 6238c605cf1SAnton Johansson tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap); 624d9bb58e5SYang Zhong 625d9bb58e5SYang Zhong /* This should already be page aligned */ 6267b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 627d9bb58e5SYang Zhong 6287b7d00e0SRichard Henderson /* 6297b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6307b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6317b7d00e0SRichard Henderson */ 6327b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6337b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6347b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6357b7d00e0SRichard Henderson } else { 6367b7d00e0SRichard Henderson CPUState *dst_cpu; 6377b7d00e0SRichard Henderson 6387b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6397b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6407b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6417b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d 6427b7d00e0SRichard Henderson = g_new(TLBFlushPageByMMUIdxData, 1); 6437b7d00e0SRichard Henderson 6447b7d00e0SRichard Henderson d->addr = addr; 6457b7d00e0SRichard Henderson d->idxmap = idxmap; 6467b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6477b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6487b7d00e0SRichard Henderson } 6497b7d00e0SRichard Henderson } 6507b7d00e0SRichard Henderson } 6517b7d00e0SRichard Henderson 6527b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); 653d9bb58e5SYang Zhong } 654d9bb58e5SYang Zhong 655732d5487SAnton Johansson void tlb_flush_page_all_cpus(CPUState *src, vaddr addr) 656f8144c6cSRichard Henderson { 657f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); 658f8144c6cSRichard Henderson } 659f8144c6cSRichard Henderson 660d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 661732d5487SAnton Johansson vaddr addr, 662d9bb58e5SYang Zhong uint16_t idxmap) 663d9bb58e5SYang Zhong { 6648c605cf1SAnton Johansson tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap); 665d9bb58e5SYang Zhong 666d9bb58e5SYang Zhong /* This should already be page aligned */ 6677b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 668d9bb58e5SYang Zhong 6697b7d00e0SRichard Henderson /* 6707b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6717b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6727b7d00e0SRichard Henderson */ 6737b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6747b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6757b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6767b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6777b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6787b7d00e0SRichard Henderson } else { 6797b7d00e0SRichard Henderson CPUState *dst_cpu; 6807b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d; 6817b7d00e0SRichard Henderson 6827b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6837b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6847b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6857b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6867b7d00e0SRichard Henderson d->addr = addr; 6877b7d00e0SRichard Henderson d->idxmap = idxmap; 6887b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6897b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6907b7d00e0SRichard Henderson } 6917b7d00e0SRichard Henderson } 6927b7d00e0SRichard Henderson 6937b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6947b7d00e0SRichard Henderson d->addr = addr; 6957b7d00e0SRichard Henderson d->idxmap = idxmap; 6967b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, 6977b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6987b7d00e0SRichard Henderson } 699d9bb58e5SYang Zhong } 700d9bb58e5SYang Zhong 701732d5487SAnton Johansson void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr) 702d9bb58e5SYang Zhong { 703f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); 704d9bb58e5SYang Zhong } 705d9bb58e5SYang Zhong 70610b32e2cSAnton Johansson static void tlb_flush_range_locked(CPUState *cpu, int midx, 707732d5487SAnton Johansson vaddr addr, vaddr len, 7083c4ddec1SRichard Henderson unsigned bits) 7093ab6e68cSRichard Henderson { 71010b32e2cSAnton Johansson CPUTLBDesc *d = &cpu->neg.tlb.d[midx]; 71110b32e2cSAnton Johansson CPUTLBDescFast *f = &cpu->neg.tlb.f[midx]; 712732d5487SAnton Johansson vaddr mask = MAKE_64BIT_MASK(0, bits); 7133ab6e68cSRichard Henderson 7143ab6e68cSRichard Henderson /* 7153ab6e68cSRichard Henderson * If @bits is smaller than the tlb size, there may be multiple entries 7163ab6e68cSRichard Henderson * within the TLB; otherwise all addresses that match under @mask hit 7173ab6e68cSRichard Henderson * the same TLB entry. 7183ab6e68cSRichard Henderson * TODO: Perhaps allow bits to be a few bits less than the size. 7193ab6e68cSRichard Henderson * For now, just flush the entire TLB. 7203c4ddec1SRichard Henderson * 7213c4ddec1SRichard Henderson * If @len is larger than the tlb size, then it will take longer to 7223c4ddec1SRichard Henderson * test all of the entries in the TLB than it will to flush it all. 7233ab6e68cSRichard Henderson */ 7243c4ddec1SRichard Henderson if (mask < f->mask || len > f->mask) { 7253ab6e68cSRichard Henderson tlb_debug("forcing full flush midx %d (" 7268c605cf1SAnton Johansson "%016" VADDR_PRIx "/%016" VADDR_PRIx "+%016" VADDR_PRIx ")\n", 7273c4ddec1SRichard Henderson midx, addr, mask, len); 72810b32e2cSAnton Johansson tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime()); 7293ab6e68cSRichard Henderson return; 7303ab6e68cSRichard Henderson } 7313ab6e68cSRichard Henderson 7323c4ddec1SRichard Henderson /* 7333c4ddec1SRichard Henderson * Check if we need to flush due to large pages. 7343c4ddec1SRichard Henderson * Because large_page_mask contains all 1's from the msb, 7353c4ddec1SRichard Henderson * we only need to test the end of the range. 7363c4ddec1SRichard Henderson */ 7373c4ddec1SRichard Henderson if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) { 7383ab6e68cSRichard Henderson tlb_debug("forcing full flush midx %d (" 7398c605cf1SAnton Johansson "%016" VADDR_PRIx "/%016" VADDR_PRIx ")\n", 7403ab6e68cSRichard Henderson midx, d->large_page_addr, d->large_page_mask); 74110b32e2cSAnton Johansson tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime()); 7423ab6e68cSRichard Henderson return; 7433ab6e68cSRichard Henderson } 7443ab6e68cSRichard Henderson 745732d5487SAnton Johansson for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) { 746732d5487SAnton Johansson vaddr page = addr + i; 74710b32e2cSAnton Johansson CPUTLBEntry *entry = tlb_entry(cpu, midx, page); 7483c4ddec1SRichard Henderson 7493c4ddec1SRichard Henderson if (tlb_flush_entry_mask_locked(entry, page, mask)) { 75010b32e2cSAnton Johansson tlb_n_used_entries_dec(cpu, midx); 7513ab6e68cSRichard Henderson } 75210b32e2cSAnton Johansson tlb_flush_vtlb_page_mask_locked(cpu, midx, page, mask); 7533ab6e68cSRichard Henderson } 7543c4ddec1SRichard Henderson } 7553ab6e68cSRichard Henderson 7563ab6e68cSRichard Henderson typedef struct { 757732d5487SAnton Johansson vaddr addr; 758732d5487SAnton Johansson vaddr len; 7593ab6e68cSRichard Henderson uint16_t idxmap; 7603ab6e68cSRichard Henderson uint16_t bits; 7613960a59fSRichard Henderson } TLBFlushRangeData; 7623ab6e68cSRichard Henderson 7636be48e45SRichard Henderson static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, 7643960a59fSRichard Henderson TLBFlushRangeData d) 7653ab6e68cSRichard Henderson { 7663ab6e68cSRichard Henderson int mmu_idx; 7673ab6e68cSRichard Henderson 7683ab6e68cSRichard Henderson assert_cpu_is_self(cpu); 7693ab6e68cSRichard Henderson 7708c605cf1SAnton Johansson tlb_debug("range: %016" VADDR_PRIx "/%u+%016" VADDR_PRIx " mmu_map:0x%x\n", 7713c4ddec1SRichard Henderson d.addr, d.bits, d.len, d.idxmap); 7723ab6e68cSRichard Henderson 77310b32e2cSAnton Johansson qemu_spin_lock(&cpu->neg.tlb.c.lock); 7743ab6e68cSRichard Henderson for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 7753ab6e68cSRichard Henderson if ((d.idxmap >> mmu_idx) & 1) { 77610b32e2cSAnton Johansson tlb_flush_range_locked(cpu, mmu_idx, d.addr, d.len, d.bits); 7773ab6e68cSRichard Henderson } 7783ab6e68cSRichard Henderson } 77910b32e2cSAnton Johansson qemu_spin_unlock(&cpu->neg.tlb.c.lock); 7803ab6e68cSRichard Henderson 781cfc2a2d6SIdan Horowitz /* 782cfc2a2d6SIdan Horowitz * If the length is larger than the jump cache size, then it will take 783cfc2a2d6SIdan Horowitz * longer to clear each entry individually than it will to clear it all. 784cfc2a2d6SIdan Horowitz */ 785cfc2a2d6SIdan Horowitz if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) { 786a976a99aSRichard Henderson tcg_flush_jmp_cache(cpu); 787cfc2a2d6SIdan Horowitz return; 788cfc2a2d6SIdan Horowitz } 789cfc2a2d6SIdan Horowitz 7901d41a79bSRichard Henderson /* 7911d41a79bSRichard Henderson * Discard jump cache entries for any tb which might potentially 7921d41a79bSRichard Henderson * overlap the flushed pages, which includes the previous. 7931d41a79bSRichard Henderson */ 7941d41a79bSRichard Henderson d.addr -= TARGET_PAGE_SIZE; 795732d5487SAnton Johansson for (vaddr i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) { 7961d41a79bSRichard Henderson tb_jmp_cache_clear_page(cpu, d.addr); 7971d41a79bSRichard Henderson d.addr += TARGET_PAGE_SIZE; 7983c4ddec1SRichard Henderson } 7993ab6e68cSRichard Henderson } 8003ab6e68cSRichard Henderson 801206a583dSRichard Henderson static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu, 8023ab6e68cSRichard Henderson run_on_cpu_data data) 8033ab6e68cSRichard Henderson { 8043960a59fSRichard Henderson TLBFlushRangeData *d = data.host_ptr; 8056be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(cpu, *d); 8063ab6e68cSRichard Henderson g_free(d); 8073ab6e68cSRichard Henderson } 8083ab6e68cSRichard Henderson 809732d5487SAnton Johansson void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, 810732d5487SAnton Johansson vaddr len, uint16_t idxmap, 811e5b1921bSRichard Henderson unsigned bits) 8123ab6e68cSRichard Henderson { 8133960a59fSRichard Henderson TLBFlushRangeData d; 8143ab6e68cSRichard Henderson 815e5b1921bSRichard Henderson /* 816e5b1921bSRichard Henderson * If all bits are significant, and len is small, 817e5b1921bSRichard Henderson * this devolves to tlb_flush_page. 818e5b1921bSRichard Henderson */ 819e5b1921bSRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 8203ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, idxmap); 8213ab6e68cSRichard Henderson return; 8223ab6e68cSRichard Henderson } 8233ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 8243ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 8253ab6e68cSRichard Henderson tlb_flush_by_mmuidx(cpu, idxmap); 8263ab6e68cSRichard Henderson return; 8273ab6e68cSRichard Henderson } 8283ab6e68cSRichard Henderson 8293ab6e68cSRichard Henderson /* This should already be page aligned */ 8303ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 831e5b1921bSRichard Henderson d.len = len; 8323ab6e68cSRichard Henderson d.idxmap = idxmap; 8333ab6e68cSRichard Henderson d.bits = bits; 8343ab6e68cSRichard Henderson 8353ab6e68cSRichard Henderson if (qemu_cpu_is_self(cpu)) { 8366be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(cpu, d); 8373ab6e68cSRichard Henderson } else { 8383ab6e68cSRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 8393960a59fSRichard Henderson TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); 840206a583dSRichard Henderson async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1, 8413ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 8423ab6e68cSRichard Henderson } 8433ab6e68cSRichard Henderson } 8443ab6e68cSRichard Henderson 845732d5487SAnton Johansson void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr, 846e5b1921bSRichard Henderson uint16_t idxmap, unsigned bits) 847e5b1921bSRichard Henderson { 848e5b1921bSRichard Henderson tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); 849e5b1921bSRichard Henderson } 850e5b1921bSRichard Henderson 851600b819fSRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, 852732d5487SAnton Johansson vaddr addr, vaddr len, 853600b819fSRichard Henderson uint16_t idxmap, unsigned bits) 8543ab6e68cSRichard Henderson { 8553960a59fSRichard Henderson TLBFlushRangeData d; 856d34e4d1aSRichard Henderson CPUState *dst_cpu; 8573ab6e68cSRichard Henderson 858600b819fSRichard Henderson /* 859600b819fSRichard Henderson * If all bits are significant, and len is small, 860600b819fSRichard Henderson * this devolves to tlb_flush_page. 861600b819fSRichard Henderson */ 862600b819fSRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 8633ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap); 8643ab6e68cSRichard Henderson return; 8653ab6e68cSRichard Henderson } 8663ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 8673ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 8683ab6e68cSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap); 8693ab6e68cSRichard Henderson return; 8703ab6e68cSRichard Henderson } 8713ab6e68cSRichard Henderson 8723ab6e68cSRichard Henderson /* This should already be page aligned */ 8733ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 874600b819fSRichard Henderson d.len = len; 8753ab6e68cSRichard Henderson d.idxmap = idxmap; 8763ab6e68cSRichard Henderson d.bits = bits; 8773ab6e68cSRichard Henderson 8783ab6e68cSRichard Henderson /* Allocate a separate data block for each destination cpu. */ 8793ab6e68cSRichard Henderson CPU_FOREACH(dst_cpu) { 8803ab6e68cSRichard Henderson if (dst_cpu != src_cpu) { 8813960a59fSRichard Henderson TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); 8823ab6e68cSRichard Henderson async_run_on_cpu(dst_cpu, 883206a583dSRichard Henderson tlb_flush_range_by_mmuidx_async_1, 8843ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 8853ab6e68cSRichard Henderson } 8863ab6e68cSRichard Henderson } 8873ab6e68cSRichard Henderson 8886be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(src_cpu, d); 8893ab6e68cSRichard Henderson } 8903ab6e68cSRichard Henderson 891600b819fSRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, 892732d5487SAnton Johansson vaddr addr, uint16_t idxmap, 893732d5487SAnton Johansson unsigned bits) 894600b819fSRichard Henderson { 895600b819fSRichard Henderson tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE, 896600b819fSRichard Henderson idxmap, bits); 897600b819fSRichard Henderson } 898600b819fSRichard Henderson 899c13b27d8SRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 900732d5487SAnton Johansson vaddr addr, 901732d5487SAnton Johansson vaddr len, 9023ab6e68cSRichard Henderson uint16_t idxmap, 9033ab6e68cSRichard Henderson unsigned bits) 9043ab6e68cSRichard Henderson { 905d34e4d1aSRichard Henderson TLBFlushRangeData d, *p; 906d34e4d1aSRichard Henderson CPUState *dst_cpu; 9073ab6e68cSRichard Henderson 908c13b27d8SRichard Henderson /* 909c13b27d8SRichard Henderson * If all bits are significant, and len is small, 910c13b27d8SRichard Henderson * this devolves to tlb_flush_page. 911c13b27d8SRichard Henderson */ 912c13b27d8SRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 9133ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); 9143ab6e68cSRichard Henderson return; 9153ab6e68cSRichard Henderson } 9163ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 9173ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 9183ab6e68cSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); 9193ab6e68cSRichard Henderson return; 9203ab6e68cSRichard Henderson } 9213ab6e68cSRichard Henderson 9223ab6e68cSRichard Henderson /* This should already be page aligned */ 9233ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 924c13b27d8SRichard Henderson d.len = len; 9253ab6e68cSRichard Henderson d.idxmap = idxmap; 9263ab6e68cSRichard Henderson d.bits = bits; 9273ab6e68cSRichard Henderson 9283ab6e68cSRichard Henderson /* Allocate a separate data block for each destination cpu. */ 9293ab6e68cSRichard Henderson CPU_FOREACH(dst_cpu) { 9303ab6e68cSRichard Henderson if (dst_cpu != src_cpu) { 9316d244788SRichard Henderson p = g_memdup(&d, sizeof(d)); 932206a583dSRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1, 9333ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 9343ab6e68cSRichard Henderson } 9353ab6e68cSRichard Henderson } 9363ab6e68cSRichard Henderson 9376d244788SRichard Henderson p = g_memdup(&d, sizeof(d)); 938206a583dSRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1, 9393ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 9403ab6e68cSRichard Henderson } 9413ab6e68cSRichard Henderson 942c13b27d8SRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 943732d5487SAnton Johansson vaddr addr, 944c13b27d8SRichard Henderson uint16_t idxmap, 945c13b27d8SRichard Henderson unsigned bits) 946c13b27d8SRichard Henderson { 947c13b27d8SRichard Henderson tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE, 948c13b27d8SRichard Henderson idxmap, bits); 949c13b27d8SRichard Henderson } 950c13b27d8SRichard Henderson 951d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr' 952d9bb58e5SYang Zhong can be detected */ 953d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr) 954d9bb58e5SYang Zhong { 95593b99616SRichard Henderson cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK, 95693b99616SRichard Henderson TARGET_PAGE_SIZE, 957d9bb58e5SYang Zhong DIRTY_MEMORY_CODE); 958d9bb58e5SYang Zhong } 959d9bb58e5SYang Zhong 960d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer 961d9bb58e5SYang Zhong tested for self modifying code */ 962d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr) 963d9bb58e5SYang Zhong { 964d9bb58e5SYang Zhong cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 965d9bb58e5SYang Zhong } 966d9bb58e5SYang Zhong 967d9bb58e5SYang Zhong 968d9bb58e5SYang Zhong /* 969d9bb58e5SYang Zhong * Dirty write flag handling 970d9bb58e5SYang Zhong * 971d9bb58e5SYang Zhong * When the TCG code writes to a location it looks up the address in 972d9bb58e5SYang Zhong * the TLB and uses that data to compute the final address. If any of 973d9bb58e5SYang Zhong * the lower bits of the address are set then the slow path is forced. 974d9bb58e5SYang Zhong * There are a number of reasons to do this but for normal RAM the 975d9bb58e5SYang Zhong * most usual is detecting writes to code regions which may invalidate 976d9bb58e5SYang Zhong * generated code. 977d9bb58e5SYang Zhong * 97871aec354SEmilio G. Cota * Other vCPUs might be reading their TLBs during guest execution, so we update 979d73415a3SStefan Hajnoczi * te->addr_write with qatomic_set. We don't need to worry about this for 98071aec354SEmilio G. Cota * oversized guests as MTTCG is disabled for them. 981d9bb58e5SYang Zhong * 98253d28455SRichard Henderson * Called with tlb_c.lock held. 983d9bb58e5SYang Zhong */ 98471aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 98571aec354SEmilio G. Cota uintptr_t start, uintptr_t length) 986d9bb58e5SYang Zhong { 987d9bb58e5SYang Zhong uintptr_t addr = tlb_entry->addr_write; 988d9bb58e5SYang Zhong 9897b0d792cSRichard Henderson if ((addr & (TLB_INVALID_MASK | TLB_MMIO | 9907b0d792cSRichard Henderson TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { 991d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 992d9bb58e5SYang Zhong addr += tlb_entry->addend; 993d9bb58e5SYang Zhong if ((addr - start) < length) { 994238f4380SRichard Henderson #if TARGET_LONG_BITS == 32 995238f4380SRichard Henderson uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write; 996238f4380SRichard Henderson ptr_write += HOST_BIG_ENDIAN; 997238f4380SRichard Henderson qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY); 998238f4380SRichard Henderson #elif TCG_OVERSIZED_GUEST 99971aec354SEmilio G. Cota tlb_entry->addr_write |= TLB_NOTDIRTY; 1000d9bb58e5SYang Zhong #else 1001d73415a3SStefan Hajnoczi qatomic_set(&tlb_entry->addr_write, 100271aec354SEmilio G. Cota tlb_entry->addr_write | TLB_NOTDIRTY); 1003d9bb58e5SYang Zhong #endif 1004d9bb58e5SYang Zhong } 100571aec354SEmilio G. Cota } 100671aec354SEmilio G. Cota } 100771aec354SEmilio G. Cota 100871aec354SEmilio G. Cota /* 100953d28455SRichard Henderson * Called with tlb_c.lock held. 101071aec354SEmilio G. Cota * Called only from the vCPU context, i.e. the TLB's owner thread. 101171aec354SEmilio G. Cota */ 101271aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 101371aec354SEmilio G. Cota { 101471aec354SEmilio G. Cota *d = *s; 101571aec354SEmilio G. Cota } 1016d9bb58e5SYang Zhong 1017d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 101871aec354SEmilio G. Cota * the target vCPU). 101953d28455SRichard Henderson * We must take tlb_c.lock to avoid racing with another vCPU update. The only 102071aec354SEmilio G. Cota * thing actually updated is the target TLB entry ->addr_write flags. 1021d9bb58e5SYang Zhong */ 1022d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 1023d9bb58e5SYang Zhong { 1024d9bb58e5SYang Zhong int mmu_idx; 1025d9bb58e5SYang Zhong 102610b32e2cSAnton Johansson qemu_spin_lock(&cpu->neg.tlb.c.lock); 1027d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1028d9bb58e5SYang Zhong unsigned int i; 102910b32e2cSAnton Johansson unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]); 1030d9bb58e5SYang Zhong 103186e1eff8SEmilio G. Cota for (i = 0; i < n; i++) { 103210b32e2cSAnton Johansson tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i], 1033a40ec84eSRichard Henderson start1, length); 1034d9bb58e5SYang Zhong } 1035d9bb58e5SYang Zhong 1036d9bb58e5SYang Zhong for (i = 0; i < CPU_VTLB_SIZE; i++) { 103710b32e2cSAnton Johansson tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i], 1038a40ec84eSRichard Henderson start1, length); 1039d9bb58e5SYang Zhong } 1040d9bb58e5SYang Zhong } 104110b32e2cSAnton Johansson qemu_spin_unlock(&cpu->neg.tlb.c.lock); 1042d9bb58e5SYang Zhong } 1043d9bb58e5SYang Zhong 104453d28455SRichard Henderson /* Called with tlb_c.lock held */ 104571aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 1046732d5487SAnton Johansson vaddr addr) 1047d9bb58e5SYang Zhong { 1048732d5487SAnton Johansson if (tlb_entry->addr_write == (addr | TLB_NOTDIRTY)) { 1049732d5487SAnton Johansson tlb_entry->addr_write = addr; 1050d9bb58e5SYang Zhong } 1051d9bb58e5SYang Zhong } 1052d9bb58e5SYang Zhong 1053d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr 1054d9bb58e5SYang Zhong so that it is no longer dirty */ 1055732d5487SAnton Johansson void tlb_set_dirty(CPUState *cpu, vaddr addr) 1056d9bb58e5SYang Zhong { 1057d9bb58e5SYang Zhong int mmu_idx; 1058d9bb58e5SYang Zhong 1059d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 1060d9bb58e5SYang Zhong 1061732d5487SAnton Johansson addr &= TARGET_PAGE_MASK; 106210b32e2cSAnton Johansson qemu_spin_lock(&cpu->neg.tlb.c.lock); 1063d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 106410b32e2cSAnton Johansson tlb_set_dirty1_locked(tlb_entry(cpu, mmu_idx, addr), addr); 1065d9bb58e5SYang Zhong } 1066d9bb58e5SYang Zhong 1067d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1068d9bb58e5SYang Zhong int k; 1069d9bb58e5SYang Zhong for (k = 0; k < CPU_VTLB_SIZE; k++) { 107010b32e2cSAnton Johansson tlb_set_dirty1_locked(&cpu->neg.tlb.d[mmu_idx].vtable[k], addr); 1071d9bb58e5SYang Zhong } 1072d9bb58e5SYang Zhong } 107310b32e2cSAnton Johansson qemu_spin_unlock(&cpu->neg.tlb.c.lock); 1074d9bb58e5SYang Zhong } 1075d9bb58e5SYang Zhong 1076d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by 1077d9bb58e5SYang Zhong large pages and trigger a full TLB flush if these are invalidated. */ 107810b32e2cSAnton Johansson static void tlb_add_large_page(CPUState *cpu, int mmu_idx, 1079732d5487SAnton Johansson vaddr addr, uint64_t size) 1080d9bb58e5SYang Zhong { 108110b32e2cSAnton Johansson vaddr lp_addr = cpu->neg.tlb.d[mmu_idx].large_page_addr; 1082732d5487SAnton Johansson vaddr lp_mask = ~(size - 1); 1083d9bb58e5SYang Zhong 1084732d5487SAnton Johansson if (lp_addr == (vaddr)-1) { 10851308e026SRichard Henderson /* No previous large page. */ 1086732d5487SAnton Johansson lp_addr = addr; 10871308e026SRichard Henderson } else { 1088d9bb58e5SYang Zhong /* Extend the existing region to include the new page. 10891308e026SRichard Henderson This is a compromise between unnecessary flushes and 10901308e026SRichard Henderson the cost of maintaining a full variable size TLB. */ 109110b32e2cSAnton Johansson lp_mask &= cpu->neg.tlb.d[mmu_idx].large_page_mask; 1092732d5487SAnton Johansson while (((lp_addr ^ addr) & lp_mask) != 0) { 10931308e026SRichard Henderson lp_mask <<= 1; 1094d9bb58e5SYang Zhong } 10951308e026SRichard Henderson } 109610b32e2cSAnton Johansson cpu->neg.tlb.d[mmu_idx].large_page_addr = lp_addr & lp_mask; 109710b32e2cSAnton Johansson cpu->neg.tlb.d[mmu_idx].large_page_mask = lp_mask; 1098d9bb58e5SYang Zhong } 1099d9bb58e5SYang Zhong 110058e8f1f6SRichard Henderson static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent, 1101d712b116SAnton Johansson vaddr address, int flags, 110258e8f1f6SRichard Henderson MMUAccessType access_type, bool enable) 110358e8f1f6SRichard Henderson { 110458e8f1f6SRichard Henderson if (enable) { 110558e8f1f6SRichard Henderson address |= flags & TLB_FLAGS_MASK; 110658e8f1f6SRichard Henderson flags &= TLB_SLOW_FLAGS_MASK; 110758e8f1f6SRichard Henderson if (flags) { 110858e8f1f6SRichard Henderson address |= TLB_FORCE_SLOW; 110958e8f1f6SRichard Henderson } 111058e8f1f6SRichard Henderson } else { 111158e8f1f6SRichard Henderson address = -1; 111258e8f1f6SRichard Henderson flags = 0; 111358e8f1f6SRichard Henderson } 111458e8f1f6SRichard Henderson ent->addr_idx[access_type] = address; 111558e8f1f6SRichard Henderson full->slow_flags[access_type] = flags; 111658e8f1f6SRichard Henderson } 111758e8f1f6SRichard Henderson 111840473689SRichard Henderson /* 111940473689SRichard Henderson * Add a new TLB entry. At most one entry for a given virtual address 1120d9bb58e5SYang Zhong * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 1121d9bb58e5SYang Zhong * supplied size is only used by tlb_flush_page. 1122d9bb58e5SYang Zhong * 1123d9bb58e5SYang Zhong * Called from TCG-generated code, which is under an RCU read-side 1124d9bb58e5SYang Zhong * critical section. 1125d9bb58e5SYang Zhong */ 112640473689SRichard Henderson void tlb_set_page_full(CPUState *cpu, int mmu_idx, 1127732d5487SAnton Johansson vaddr addr, CPUTLBEntryFull *full) 1128d9bb58e5SYang Zhong { 112910b32e2cSAnton Johansson CPUTLB *tlb = &cpu->neg.tlb; 1130a40ec84eSRichard Henderson CPUTLBDesc *desc = &tlb->d[mmu_idx]; 1131d9bb58e5SYang Zhong MemoryRegionSection *section; 113258e8f1f6SRichard Henderson unsigned int index, read_flags, write_flags; 1133d9bb58e5SYang Zhong uintptr_t addend; 113468fea038SRichard Henderson CPUTLBEntry *te, tn; 113555df6fcfSPeter Maydell hwaddr iotlb, xlat, sz, paddr_page; 1136732d5487SAnton Johansson vaddr addr_page; 113740473689SRichard Henderson int asidx, wp_flags, prot; 11388f5db641SRichard Henderson bool is_ram, is_romd; 1139d9bb58e5SYang Zhong 1140d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 114155df6fcfSPeter Maydell 114240473689SRichard Henderson if (full->lg_page_size <= TARGET_PAGE_BITS) { 114355df6fcfSPeter Maydell sz = TARGET_PAGE_SIZE; 114455df6fcfSPeter Maydell } else { 114540473689SRichard Henderson sz = (hwaddr)1 << full->lg_page_size; 114610b32e2cSAnton Johansson tlb_add_large_page(cpu, mmu_idx, addr, sz); 114755df6fcfSPeter Maydell } 1148732d5487SAnton Johansson addr_page = addr & TARGET_PAGE_MASK; 114940473689SRichard Henderson paddr_page = full->phys_addr & TARGET_PAGE_MASK; 115055df6fcfSPeter Maydell 115140473689SRichard Henderson prot = full->prot; 115240473689SRichard Henderson asidx = cpu_asidx_from_attrs(cpu, full->attrs); 115355df6fcfSPeter Maydell section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 115440473689SRichard Henderson &xlat, &sz, full->attrs, &prot); 1155d9bb58e5SYang Zhong assert(sz >= TARGET_PAGE_SIZE); 1156d9bb58e5SYang Zhong 11578c605cf1SAnton Johansson tlb_debug("vaddr=%016" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx 1158d9bb58e5SYang Zhong " prot=%x idx=%d\n", 1159732d5487SAnton Johansson addr, full->phys_addr, prot, mmu_idx); 1160d9bb58e5SYang Zhong 116158e8f1f6SRichard Henderson read_flags = 0; 116240473689SRichard Henderson if (full->lg_page_size < TARGET_PAGE_BITS) { 116330d7e098SRichard Henderson /* Repeat the MMU check and TLB fill on every access. */ 116458e8f1f6SRichard Henderson read_flags |= TLB_INVALID_MASK; 116555df6fcfSPeter Maydell } 116640473689SRichard Henderson if (full->attrs.byte_swap) { 116758e8f1f6SRichard Henderson read_flags |= TLB_BSWAP; 1168a26fc6f5STony Nguyen } 11698f5db641SRichard Henderson 11708f5db641SRichard Henderson is_ram = memory_region_is_ram(section->mr); 11718f5db641SRichard Henderson is_romd = memory_region_is_romd(section->mr); 11728f5db641SRichard Henderson 11738f5db641SRichard Henderson if (is_ram || is_romd) { 11748f5db641SRichard Henderson /* RAM and ROMD both have associated host memory. */ 1175d9bb58e5SYang Zhong addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 11768f5db641SRichard Henderson } else { 11778f5db641SRichard Henderson /* I/O does not; force the host address to NULL. */ 11788f5db641SRichard Henderson addend = 0; 1179d9bb58e5SYang Zhong } 1180d9bb58e5SYang Zhong 118158e8f1f6SRichard Henderson write_flags = read_flags; 11828f5db641SRichard Henderson if (is_ram) { 11838f5db641SRichard Henderson iotlb = memory_region_get_ram_addr(section->mr) + xlat; 1184dff1ab68SLIU Zhiwei assert(!(iotlb & ~TARGET_PAGE_MASK)); 11858f5db641SRichard Henderson /* 11868f5db641SRichard Henderson * Computing is_clean is expensive; avoid all that unless 11878f5db641SRichard Henderson * the page is actually writable. 11888f5db641SRichard Henderson */ 11898f5db641SRichard Henderson if (prot & PAGE_WRITE) { 11908f5db641SRichard Henderson if (section->readonly) { 119158e8f1f6SRichard Henderson write_flags |= TLB_DISCARD_WRITE; 11928f5db641SRichard Henderson } else if (cpu_physical_memory_is_clean(iotlb)) { 119358e8f1f6SRichard Henderson write_flags |= TLB_NOTDIRTY; 11948f5db641SRichard Henderson } 11958f5db641SRichard Henderson } 11968f5db641SRichard Henderson } else { 11978f5db641SRichard Henderson /* I/O or ROMD */ 11988f5db641SRichard Henderson iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; 11998f5db641SRichard Henderson /* 12008f5db641SRichard Henderson * Writes to romd devices must go through MMIO to enable write. 12018f5db641SRichard Henderson * Reads to romd devices go through the ram_ptr found above, 12028f5db641SRichard Henderson * but of course reads to I/O must go through MMIO. 12038f5db641SRichard Henderson */ 120458e8f1f6SRichard Henderson write_flags |= TLB_MMIO; 12058f5db641SRichard Henderson if (!is_romd) { 120658e8f1f6SRichard Henderson read_flags = write_flags; 12078f5db641SRichard Henderson } 12088f5db641SRichard Henderson } 12098f5db641SRichard Henderson 1210732d5487SAnton Johansson wp_flags = cpu_watchpoint_address_matches(cpu, addr_page, 121150b107c5SRichard Henderson TARGET_PAGE_SIZE); 1212d9bb58e5SYang Zhong 121310b32e2cSAnton Johansson index = tlb_index(cpu, mmu_idx, addr_page); 121410b32e2cSAnton Johansson te = tlb_entry(cpu, mmu_idx, addr_page); 1215d9bb58e5SYang Zhong 121668fea038SRichard Henderson /* 121771aec354SEmilio G. Cota * Hold the TLB lock for the rest of the function. We could acquire/release 121871aec354SEmilio G. Cota * the lock several times in the function, but it is faster to amortize the 121971aec354SEmilio G. Cota * acquisition cost by acquiring it just once. Note that this leads to 122071aec354SEmilio G. Cota * a longer critical section, but this is not a concern since the TLB lock 122171aec354SEmilio G. Cota * is unlikely to be contended. 122271aec354SEmilio G. Cota */ 1223a40ec84eSRichard Henderson qemu_spin_lock(&tlb->c.lock); 122471aec354SEmilio G. Cota 12253d1523ceSRichard Henderson /* Note that the tlb is no longer clean. */ 1226a40ec84eSRichard Henderson tlb->c.dirty |= 1 << mmu_idx; 12273d1523ceSRichard Henderson 122871aec354SEmilio G. Cota /* Make sure there's no cached translation for the new page. */ 122910b32e2cSAnton Johansson tlb_flush_vtlb_page_locked(cpu, mmu_idx, addr_page); 123071aec354SEmilio G. Cota 123171aec354SEmilio G. Cota /* 123268fea038SRichard Henderson * Only evict the old entry to the victim tlb if it's for a 123368fea038SRichard Henderson * different page; otherwise just overwrite the stale data. 123468fea038SRichard Henderson */ 1235732d5487SAnton Johansson if (!tlb_hit_page_anyprot(te, addr_page) && !tlb_entry_is_empty(te)) { 1236a40ec84eSRichard Henderson unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; 1237a40ec84eSRichard Henderson CPUTLBEntry *tv = &desc->vtable[vidx]; 123868fea038SRichard Henderson 123968fea038SRichard Henderson /* Evict the old entry into the victim tlb. */ 124071aec354SEmilio G. Cota copy_tlb_helper_locked(tv, te); 124125d3ec58SRichard Henderson desc->vfulltlb[vidx] = desc->fulltlb[index]; 124210b32e2cSAnton Johansson tlb_n_used_entries_dec(cpu, mmu_idx); 124368fea038SRichard Henderson } 1244d9bb58e5SYang Zhong 1245d9bb58e5SYang Zhong /* refill the tlb */ 1246ace41090SPeter Maydell /* 1247dff1ab68SLIU Zhiwei * When memory region is ram, iotlb contains a TARGET_PAGE_BITS 1248dff1ab68SLIU Zhiwei * aligned ram_addr_t of the page base of the target RAM. 1249dff1ab68SLIU Zhiwei * Otherwise, iotlb contains 1250dff1ab68SLIU Zhiwei * - a physical section number in the lower TARGET_PAGE_BITS 1251dff1ab68SLIU Zhiwei * - the offset within section->mr of the page base (I/O, ROMD) with the 1252dff1ab68SLIU Zhiwei * TARGET_PAGE_BITS masked off. 125358e8f1f6SRichard Henderson * We subtract addr_page (which is page aligned and thus won't 1254ace41090SPeter Maydell * disturb the low bits) to give an offset which can be added to the 1255ace41090SPeter Maydell * (non-page-aligned) vaddr of the eventual memory access to get 1256ace41090SPeter Maydell * the MemoryRegion offset for the access. Note that the vaddr we 1257ace41090SPeter Maydell * subtract here is that of the page base, and not the same as the 1258fb3cb376SRichard Henderson * vaddr we add back in io_prepare()/get_page_addr_code(). 1259ace41090SPeter Maydell */ 126040473689SRichard Henderson desc->fulltlb[index] = *full; 126158e8f1f6SRichard Henderson full = &desc->fulltlb[index]; 126258e8f1f6SRichard Henderson full->xlat_section = iotlb - addr_page; 126358e8f1f6SRichard Henderson full->phys_addr = paddr_page; 1264d9bb58e5SYang Zhong 1265d9bb58e5SYang Zhong /* Now calculate the new entry */ 1266732d5487SAnton Johansson tn.addend = addend - addr_page; 126758e8f1f6SRichard Henderson 126858e8f1f6SRichard Henderson tlb_set_compare(full, &tn, addr_page, read_flags, 126958e8f1f6SRichard Henderson MMU_INST_FETCH, prot & PAGE_EXEC); 127058e8f1f6SRichard Henderson 127150b107c5SRichard Henderson if (wp_flags & BP_MEM_READ) { 127258e8f1f6SRichard Henderson read_flags |= TLB_WATCHPOINT; 127350b107c5SRichard Henderson } 127458e8f1f6SRichard Henderson tlb_set_compare(full, &tn, addr_page, read_flags, 127558e8f1f6SRichard Henderson MMU_DATA_LOAD, prot & PAGE_READ); 1276d9bb58e5SYang Zhong 1277f52bfb12SDavid Hildenbrand if (prot & PAGE_WRITE_INV) { 127858e8f1f6SRichard Henderson write_flags |= TLB_INVALID_MASK; 1279f52bfb12SDavid Hildenbrand } 128050b107c5SRichard Henderson if (wp_flags & BP_MEM_WRITE) { 128158e8f1f6SRichard Henderson write_flags |= TLB_WATCHPOINT; 128250b107c5SRichard Henderson } 128358e8f1f6SRichard Henderson tlb_set_compare(full, &tn, addr_page, write_flags, 128458e8f1f6SRichard Henderson MMU_DATA_STORE, prot & PAGE_WRITE); 1285d9bb58e5SYang Zhong 128671aec354SEmilio G. Cota copy_tlb_helper_locked(te, &tn); 128710b32e2cSAnton Johansson tlb_n_used_entries_inc(cpu, mmu_idx); 1288a40ec84eSRichard Henderson qemu_spin_unlock(&tlb->c.lock); 1289d9bb58e5SYang Zhong } 1290d9bb58e5SYang Zhong 1291732d5487SAnton Johansson void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr, 129240473689SRichard Henderson hwaddr paddr, MemTxAttrs attrs, int prot, 1293732d5487SAnton Johansson int mmu_idx, uint64_t size) 129440473689SRichard Henderson { 129540473689SRichard Henderson CPUTLBEntryFull full = { 129640473689SRichard Henderson .phys_addr = paddr, 129740473689SRichard Henderson .attrs = attrs, 129840473689SRichard Henderson .prot = prot, 129940473689SRichard Henderson .lg_page_size = ctz64(size) 130040473689SRichard Henderson }; 130140473689SRichard Henderson 130240473689SRichard Henderson assert(is_power_of_2(size)); 1303732d5487SAnton Johansson tlb_set_page_full(cpu, mmu_idx, addr, &full); 130440473689SRichard Henderson } 130540473689SRichard Henderson 1306732d5487SAnton Johansson void tlb_set_page(CPUState *cpu, vaddr addr, 1307d9bb58e5SYang Zhong hwaddr paddr, int prot, 1308732d5487SAnton Johansson int mmu_idx, uint64_t size) 1309d9bb58e5SYang Zhong { 1310732d5487SAnton Johansson tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED, 1311d9bb58e5SYang Zhong prot, mmu_idx, size); 1312d9bb58e5SYang Zhong } 1313d9bb58e5SYang Zhong 1314c319dc13SRichard Henderson /* 1315c319dc13SRichard Henderson * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the 1316c319dc13SRichard Henderson * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must 1317c319dc13SRichard Henderson * be discarded and looked up again (e.g. via tlb_entry()). 1318c319dc13SRichard Henderson */ 1319732d5487SAnton Johansson static void tlb_fill(CPUState *cpu, vaddr addr, int size, 1320c319dc13SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1321c319dc13SRichard Henderson { 1322c319dc13SRichard Henderson bool ok; 1323c319dc13SRichard Henderson 1324c319dc13SRichard Henderson /* 1325c319dc13SRichard Henderson * This is not a probe, so only valid return is success; failure 1326c319dc13SRichard Henderson * should result in exception + longjmp to the cpu loop. 1327c319dc13SRichard Henderson */ 13288810ee2aSAlex Bennée ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size, 1329e124536fSEduardo Habkost access_type, mmu_idx, false, retaddr); 1330c319dc13SRichard Henderson assert(ok); 1331c319dc13SRichard Henderson } 1332c319dc13SRichard Henderson 133378271684SClaudio Fontana static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, 133478271684SClaudio Fontana MMUAccessType access_type, 133578271684SClaudio Fontana int mmu_idx, uintptr_t retaddr) 133678271684SClaudio Fontana { 13378810ee2aSAlex Bennée cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, 13388810ee2aSAlex Bennée mmu_idx, retaddr); 133978271684SClaudio Fontana } 134078271684SClaudio Fontana 1341fb3cb376SRichard Henderson static MemoryRegionSection * 1342d50ef446SAnton Johansson io_prepare(hwaddr *out_offset, CPUState *cpu, hwaddr xlat, 1343fb3cb376SRichard Henderson MemTxAttrs attrs, vaddr addr, uintptr_t retaddr) 1344d9bb58e5SYang Zhong { 13452d54f194SPeter Maydell MemoryRegionSection *section; 1346fb3cb376SRichard Henderson hwaddr mr_offset; 1347d9bb58e5SYang Zhong 1348fb3cb376SRichard Henderson section = iotlb_to_section(cpu, xlat, attrs); 1349fb3cb376SRichard Henderson mr_offset = (xlat & TARGET_PAGE_MASK) + addr; 1350d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 1351464dacf6SRichard Henderson if (!cpu->neg.can_do_io) { 1352d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1353d9bb58e5SYang Zhong } 1354d9bb58e5SYang Zhong 1355fb3cb376SRichard Henderson *out_offset = mr_offset; 1356fb3cb376SRichard Henderson return section; 1357fb3cb376SRichard Henderson } 1358fb3cb376SRichard Henderson 1359d50ef446SAnton Johansson static void io_failed(CPUState *cpu, CPUTLBEntryFull *full, vaddr addr, 1360fb3cb376SRichard Henderson unsigned size, MMUAccessType access_type, int mmu_idx, 13610e114440SRichard Henderson MemTxResult response, uintptr_t retaddr) 1362fb3cb376SRichard Henderson { 1363d50ef446SAnton Johansson if (!cpu->ignore_memory_transaction_failures 1364d50ef446SAnton Johansson && cpu->cc->tcg_ops->do_transaction_failed) { 13650e114440SRichard Henderson hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK); 1366bef0c216SRichard Henderson 1367d50ef446SAnton Johansson cpu->cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size, 1368bef0c216SRichard Henderson access_type, mmu_idx, 1369bef0c216SRichard Henderson full->attrs, response, retaddr); 1370bef0c216SRichard Henderson } 1371bef0c216SRichard Henderson } 1372fb3cb376SRichard Henderson 1373d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied 1374d9bb58e5SYang Zhong back to the main tlb. */ 137510b32e2cSAnton Johansson static bool victim_tlb_hit(CPUState *cpu, size_t mmu_idx, size_t index, 1376732d5487SAnton Johansson MMUAccessType access_type, vaddr page) 1377d9bb58e5SYang Zhong { 1378d9bb58e5SYang Zhong size_t vidx; 137971aec354SEmilio G. Cota 138010b32e2cSAnton Johansson assert_cpu_is_self(cpu); 1381d9bb58e5SYang Zhong for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 138210b32e2cSAnton Johansson CPUTLBEntry *vtlb = &cpu->neg.tlb.d[mmu_idx].vtable[vidx]; 13839e39de98SAnton Johansson uint64_t cmp = tlb_read_idx(vtlb, access_type); 1384d9bb58e5SYang Zhong 1385d9bb58e5SYang Zhong if (cmp == page) { 1386d9bb58e5SYang Zhong /* Found entry in victim tlb, swap tlb and iotlb. */ 138710b32e2cSAnton Johansson CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index]; 1388d9bb58e5SYang Zhong 138910b32e2cSAnton Johansson qemu_spin_lock(&cpu->neg.tlb.c.lock); 139071aec354SEmilio G. Cota copy_tlb_helper_locked(&tmptlb, tlb); 139171aec354SEmilio G. Cota copy_tlb_helper_locked(tlb, vtlb); 139271aec354SEmilio G. Cota copy_tlb_helper_locked(vtlb, &tmptlb); 139310b32e2cSAnton Johansson qemu_spin_unlock(&cpu->neg.tlb.c.lock); 1394d9bb58e5SYang Zhong 139510b32e2cSAnton Johansson CPUTLBEntryFull *f1 = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; 139610b32e2cSAnton Johansson CPUTLBEntryFull *f2 = &cpu->neg.tlb.d[mmu_idx].vfulltlb[vidx]; 139725d3ec58SRichard Henderson CPUTLBEntryFull tmpf; 139825d3ec58SRichard Henderson tmpf = *f1; *f1 = *f2; *f2 = tmpf; 1399d9bb58e5SYang Zhong return true; 1400d9bb58e5SYang Zhong } 1401d9bb58e5SYang Zhong } 1402d9bb58e5SYang Zhong return false; 1403d9bb58e5SYang Zhong } 1404d9bb58e5SYang Zhong 1405707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, 140625d3ec58SRichard Henderson CPUTLBEntryFull *full, uintptr_t retaddr) 1407707526adSRichard Henderson { 140825d3ec58SRichard Henderson ram_addr_t ram_addr = mem_vaddr + full->xlat_section; 1409707526adSRichard Henderson 1410707526adSRichard Henderson trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); 1411707526adSRichard Henderson 1412707526adSRichard Henderson if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { 1413f349e92eSPhilippe Mathieu-Daudé tb_invalidate_phys_range_fast(ram_addr, size, retaddr); 1414707526adSRichard Henderson } 1415707526adSRichard Henderson 1416707526adSRichard Henderson /* 1417707526adSRichard Henderson * Set both VGA and migration bits for simplicity and to remove 1418707526adSRichard Henderson * the notdirty callback faster. 1419707526adSRichard Henderson */ 1420707526adSRichard Henderson cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); 1421707526adSRichard Henderson 1422707526adSRichard Henderson /* We remove the notdirty callback only if the code has been flushed. */ 1423707526adSRichard Henderson if (!cpu_physical_memory_is_clean(ram_addr)) { 1424707526adSRichard Henderson trace_memory_notdirty_set_dirty(mem_vaddr); 1425707526adSRichard Henderson tlb_set_dirty(cpu, mem_vaddr); 1426707526adSRichard Henderson } 1427707526adSRichard Henderson } 1428707526adSRichard Henderson 14295afec1c6SAnton Johansson static int probe_access_internal(CPUState *cpu, vaddr addr, 1430069cfe77SRichard Henderson int fault_size, MMUAccessType access_type, 1431069cfe77SRichard Henderson int mmu_idx, bool nonfault, 1432af803a4fSRichard Henderson void **phost, CPUTLBEntryFull **pfull, 14336d03226bSAlex Bennée uintptr_t retaddr, bool check_mem_cbs) 1434d9bb58e5SYang Zhong { 14355afec1c6SAnton Johansson uintptr_t index = tlb_index(cpu, mmu_idx, addr); 14365afec1c6SAnton Johansson CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr); 14379e39de98SAnton Johansson uint64_t tlb_addr = tlb_read_idx(entry, access_type); 14384f8f4127SAnton Johansson vaddr page_addr = addr & TARGET_PAGE_MASK; 143958e8f1f6SRichard Henderson int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW; 14405afec1c6SAnton Johansson bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(cpu); 144158e8f1f6SRichard Henderson CPUTLBEntryFull *full; 1442ca86cf32SDavid Hildenbrand 1443069cfe77SRichard Henderson if (!tlb_hit_page(tlb_addr, page_addr)) { 14445afec1c6SAnton Johansson if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) { 14455afec1c6SAnton Johansson if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type, 1446069cfe77SRichard Henderson mmu_idx, nonfault, retaddr)) { 1447069cfe77SRichard Henderson /* Non-faulting page table read failed. */ 1448069cfe77SRichard Henderson *phost = NULL; 1449af803a4fSRichard Henderson *pfull = NULL; 1450069cfe77SRichard Henderson return TLB_INVALID_MASK; 1451069cfe77SRichard Henderson } 1452069cfe77SRichard Henderson 145303a98189SDavid Hildenbrand /* TLB resize via tlb_fill may have moved the entry. */ 14545afec1c6SAnton Johansson index = tlb_index(cpu, mmu_idx, addr); 14555afec1c6SAnton Johansson entry = tlb_entry(cpu, mmu_idx, addr); 1456c3c8bf57SRichard Henderson 1457c3c8bf57SRichard Henderson /* 1458c3c8bf57SRichard Henderson * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately, 1459c3c8bf57SRichard Henderson * to force the next access through tlb_fill. We've just 1460c3c8bf57SRichard Henderson * called tlb_fill, so we know that this entry *is* valid. 1461c3c8bf57SRichard Henderson */ 1462c3c8bf57SRichard Henderson flags &= ~TLB_INVALID_MASK; 1463d9bb58e5SYang Zhong } 14640b3c75adSRichard Henderson tlb_addr = tlb_read_idx(entry, access_type); 146503a98189SDavid Hildenbrand } 1466c3c8bf57SRichard Henderson flags &= tlb_addr; 146703a98189SDavid Hildenbrand 14685afec1c6SAnton Johansson *pfull = full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; 146958e8f1f6SRichard Henderson flags |= full->slow_flags[access_type]; 1470af803a4fSRichard Henderson 1471069cfe77SRichard Henderson /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ 14726d03226bSAlex Bennée if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY)) 14736d03226bSAlex Bennée || 14746d03226bSAlex Bennée (access_type != MMU_INST_FETCH && force_mmio)) { 1475069cfe77SRichard Henderson *phost = NULL; 1476069cfe77SRichard Henderson return TLB_MMIO; 1477fef39ccdSDavid Hildenbrand } 1478fef39ccdSDavid Hildenbrand 1479069cfe77SRichard Henderson /* Everything else is RAM. */ 1480069cfe77SRichard Henderson *phost = (void *)((uintptr_t)addr + entry->addend); 1481069cfe77SRichard Henderson return flags; 1482069cfe77SRichard Henderson } 1483069cfe77SRichard Henderson 14844f8f4127SAnton Johansson int probe_access_full(CPUArchState *env, vaddr addr, int size, 1485069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, 1486af803a4fSRichard Henderson bool nonfault, void **phost, CPUTLBEntryFull **pfull, 1487af803a4fSRichard Henderson uintptr_t retaddr) 1488069cfe77SRichard Henderson { 14895afec1c6SAnton Johansson int flags = probe_access_internal(env_cpu(env), addr, size, access_type, 14905afec1c6SAnton Johansson mmu_idx, nonfault, phost, pfull, retaddr, 14915afec1c6SAnton Johansson true); 1492069cfe77SRichard Henderson 1493069cfe77SRichard Henderson /* Handle clean RAM pages. */ 1494069cfe77SRichard Henderson if (unlikely(flags & TLB_NOTDIRTY)) { 1495af803a4fSRichard Henderson notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr); 1496069cfe77SRichard Henderson flags &= ~TLB_NOTDIRTY; 1497069cfe77SRichard Henderson } 1498069cfe77SRichard Henderson 1499069cfe77SRichard Henderson return flags; 1500069cfe77SRichard Henderson } 1501069cfe77SRichard Henderson 15026d03226bSAlex Bennée int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size, 15036d03226bSAlex Bennée MMUAccessType access_type, int mmu_idx, 15046d03226bSAlex Bennée void **phost, CPUTLBEntryFull **pfull) 15056d03226bSAlex Bennée { 15066d03226bSAlex Bennée void *discard_phost; 15076d03226bSAlex Bennée CPUTLBEntryFull *discard_tlb; 15086d03226bSAlex Bennée 15096d03226bSAlex Bennée /* privately handle users that don't need full results */ 15106d03226bSAlex Bennée phost = phost ? phost : &discard_phost; 15116d03226bSAlex Bennée pfull = pfull ? pfull : &discard_tlb; 15126d03226bSAlex Bennée 15135afec1c6SAnton Johansson int flags = probe_access_internal(env_cpu(env), addr, size, access_type, 15145afec1c6SAnton Johansson mmu_idx, true, phost, pfull, 0, false); 15156d03226bSAlex Bennée 15166d03226bSAlex Bennée /* Handle clean RAM pages. */ 15176d03226bSAlex Bennée if (unlikely(flags & TLB_NOTDIRTY)) { 15186d03226bSAlex Bennée notdirty_write(env_cpu(env), addr, 1, *pfull, 0); 15196d03226bSAlex Bennée flags &= ~TLB_NOTDIRTY; 15206d03226bSAlex Bennée } 15216d03226bSAlex Bennée 15226d03226bSAlex Bennée return flags; 15236d03226bSAlex Bennée } 15246d03226bSAlex Bennée 15254f8f4127SAnton Johansson int probe_access_flags(CPUArchState *env, vaddr addr, int size, 1526af803a4fSRichard Henderson MMUAccessType access_type, int mmu_idx, 1527af803a4fSRichard Henderson bool nonfault, void **phost, uintptr_t retaddr) 1528af803a4fSRichard Henderson { 1529af803a4fSRichard Henderson CPUTLBEntryFull *full; 15301770b2f2SDaniel Henrique Barboza int flags; 1531af803a4fSRichard Henderson 15321770b2f2SDaniel Henrique Barboza g_assert(-(addr | TARGET_PAGE_MASK) >= size); 15331770b2f2SDaniel Henrique Barboza 15345afec1c6SAnton Johansson flags = probe_access_internal(env_cpu(env), addr, size, access_type, 15355afec1c6SAnton Johansson mmu_idx, nonfault, phost, &full, retaddr, 15365afec1c6SAnton Johansson true); 15371770b2f2SDaniel Henrique Barboza 15381770b2f2SDaniel Henrique Barboza /* Handle clean RAM pages. */ 15391770b2f2SDaniel Henrique Barboza if (unlikely(flags & TLB_NOTDIRTY)) { 15401770b2f2SDaniel Henrique Barboza notdirty_write(env_cpu(env), addr, 1, full, retaddr); 15411770b2f2SDaniel Henrique Barboza flags &= ~TLB_NOTDIRTY; 15421770b2f2SDaniel Henrique Barboza } 15431770b2f2SDaniel Henrique Barboza 15441770b2f2SDaniel Henrique Barboza return flags; 1545af803a4fSRichard Henderson } 1546af803a4fSRichard Henderson 15474f8f4127SAnton Johansson void *probe_access(CPUArchState *env, vaddr addr, int size, 1548069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1549069cfe77SRichard Henderson { 1550af803a4fSRichard Henderson CPUTLBEntryFull *full; 1551069cfe77SRichard Henderson void *host; 1552069cfe77SRichard Henderson int flags; 1553069cfe77SRichard Henderson 1554069cfe77SRichard Henderson g_assert(-(addr | TARGET_PAGE_MASK) >= size); 1555069cfe77SRichard Henderson 15565afec1c6SAnton Johansson flags = probe_access_internal(env_cpu(env), addr, size, access_type, 15575afec1c6SAnton Johansson mmu_idx, false, &host, &full, retaddr, 15585afec1c6SAnton Johansson true); 1559069cfe77SRichard Henderson 1560069cfe77SRichard Henderson /* Per the interface, size == 0 merely faults the access. */ 1561069cfe77SRichard Henderson if (size == 0) { 156273bc0bd4SRichard Henderson return NULL; 156373bc0bd4SRichard Henderson } 156473bc0bd4SRichard Henderson 1565069cfe77SRichard Henderson if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { 156603a98189SDavid Hildenbrand /* Handle watchpoints. */ 1567069cfe77SRichard Henderson if (flags & TLB_WATCHPOINT) { 1568069cfe77SRichard Henderson int wp_access = (access_type == MMU_DATA_STORE 1569069cfe77SRichard Henderson ? BP_MEM_WRITE : BP_MEM_READ); 157003a98189SDavid Hildenbrand cpu_check_watchpoint(env_cpu(env), addr, size, 157125d3ec58SRichard Henderson full->attrs, wp_access, retaddr); 1572d9bb58e5SYang Zhong } 1573fef39ccdSDavid Hildenbrand 157473bc0bd4SRichard Henderson /* Handle clean RAM pages. */ 1575069cfe77SRichard Henderson if (flags & TLB_NOTDIRTY) { 157625d3ec58SRichard Henderson notdirty_write(env_cpu(env), addr, 1, full, retaddr); 157773bc0bd4SRichard Henderson } 1578fef39ccdSDavid Hildenbrand } 1579fef39ccdSDavid Hildenbrand 1580069cfe77SRichard Henderson return host; 1581d9bb58e5SYang Zhong } 1582d9bb58e5SYang Zhong 15834811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 15844811e909SRichard Henderson MMUAccessType access_type, int mmu_idx) 15854811e909SRichard Henderson { 1586af803a4fSRichard Henderson CPUTLBEntryFull *full; 1587069cfe77SRichard Henderson void *host; 1588069cfe77SRichard Henderson int flags; 15894811e909SRichard Henderson 15905afec1c6SAnton Johansson flags = probe_access_internal(env_cpu(env), addr, 0, access_type, 15916d03226bSAlex Bennée mmu_idx, true, &host, &full, 0, false); 1592069cfe77SRichard Henderson 1593069cfe77SRichard Henderson /* No combination of flags are expected by the caller. */ 1594069cfe77SRichard Henderson return flags ? NULL : host; 15954811e909SRichard Henderson } 15964811e909SRichard Henderson 15977e0d9973SRichard Henderson /* 15987e0d9973SRichard Henderson * Return a ram_addr_t for the virtual address for execution. 15997e0d9973SRichard Henderson * 16007e0d9973SRichard Henderson * Return -1 if we can't translate and execute from an entire page 16017e0d9973SRichard Henderson * of RAM. This will force us to execute by loading and translating 16027e0d9973SRichard Henderson * one insn at a time, without caching. 16037e0d9973SRichard Henderson * 16047e0d9973SRichard Henderson * NOTE: This function will trigger an exception if the page is 16057e0d9973SRichard Henderson * not executable. 16067e0d9973SRichard Henderson */ 16074f8f4127SAnton Johansson tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr, 16087e0d9973SRichard Henderson void **hostp) 16097e0d9973SRichard Henderson { 1610af803a4fSRichard Henderson CPUTLBEntryFull *full; 16117e0d9973SRichard Henderson void *p; 16127e0d9973SRichard Henderson 16135afec1c6SAnton Johansson (void)probe_access_internal(env_cpu(env), addr, 1, MMU_INST_FETCH, 16146d03226bSAlex Bennée cpu_mmu_index(env, true), false, 16156d03226bSAlex Bennée &p, &full, 0, false); 16167e0d9973SRichard Henderson if (p == NULL) { 16177e0d9973SRichard Henderson return -1; 16187e0d9973SRichard Henderson } 1619ac01ec6fSWeiwei Li 1620ac01ec6fSWeiwei Li if (full->lg_page_size < TARGET_PAGE_BITS) { 1621ac01ec6fSWeiwei Li return -1; 1622ac01ec6fSWeiwei Li } 1623ac01ec6fSWeiwei Li 16247e0d9973SRichard Henderson if (hostp) { 16257e0d9973SRichard Henderson *hostp = p; 16267e0d9973SRichard Henderson } 16277e0d9973SRichard Henderson return qemu_ram_addr_from_host_nofail(p); 16287e0d9973SRichard Henderson } 16297e0d9973SRichard Henderson 1630cdfac37bSRichard Henderson /* Load/store with atomicity primitives. */ 1631cdfac37bSRichard Henderson #include "ldst_atomicity.c.inc" 1632cdfac37bSRichard Henderson 1633235537faSAlex Bennée #ifdef CONFIG_PLUGIN 1634235537faSAlex Bennée /* 1635235537faSAlex Bennée * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. 1636235537faSAlex Bennée * This should be a hot path as we will have just looked this path up 1637235537faSAlex Bennée * in the softmmu lookup code (or helper). We don't handle re-fills or 1638235537faSAlex Bennée * checking the victim table. This is purely informational. 1639235537faSAlex Bennée * 1640da6aef48SRichard Henderson * The one corner case is i/o write, which can cause changes to the 1641da6aef48SRichard Henderson * address space. Those changes, and the corresponding tlb flush, 1642da6aef48SRichard Henderson * should be delayed until the next TB, so even then this ought not fail. 1643da6aef48SRichard Henderson * But check, Just in Case. 1644235537faSAlex Bennée */ 1645732d5487SAnton Johansson bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx, 1646235537faSAlex Bennée bool is_store, struct qemu_plugin_hwaddr *data) 1647235537faSAlex Bennée { 164810b32e2cSAnton Johansson CPUTLBEntry *tlbe = tlb_entry(cpu, mmu_idx, addr); 164910b32e2cSAnton Johansson uintptr_t index = tlb_index(cpu, mmu_idx, addr); 1650da6aef48SRichard Henderson MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD; 1651da6aef48SRichard Henderson uint64_t tlb_addr = tlb_read_idx(tlbe, access_type); 1652405c02d8SRichard Henderson CPUTLBEntryFull *full; 1653235537faSAlex Bennée 1654da6aef48SRichard Henderson if (unlikely(!tlb_hit(tlb_addr, addr))) { 1655da6aef48SRichard Henderson return false; 1656da6aef48SRichard Henderson } 1657da6aef48SRichard Henderson 165810b32e2cSAnton Johansson full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; 1659405c02d8SRichard Henderson data->phys_addr = full->phys_addr | (addr & ~TARGET_PAGE_MASK); 1660405c02d8SRichard Henderson 1661235537faSAlex Bennée /* We must have an iotlb entry for MMIO */ 1662235537faSAlex Bennée if (tlb_addr & TLB_MMIO) { 1663405c02d8SRichard Henderson MemoryRegionSection *section = 1664405c02d8SRichard Henderson iotlb_to_section(cpu, full->xlat_section & ~TARGET_PAGE_MASK, 1665405c02d8SRichard Henderson full->attrs); 1666235537faSAlex Bennée data->is_io = true; 1667405c02d8SRichard Henderson data->mr = section->mr; 1668235537faSAlex Bennée } else { 1669235537faSAlex Bennée data->is_io = false; 1670405c02d8SRichard Henderson data->mr = NULL; 1671235537faSAlex Bennée } 1672235537faSAlex Bennée return true; 1673235537faSAlex Bennée } 1674235537faSAlex Bennée #endif 1675235537faSAlex Bennée 167608dff435SRichard Henderson /* 16778cfdacaaSRichard Henderson * Probe for a load/store operation. 16788cfdacaaSRichard Henderson * Return the host address and into @flags. 16798cfdacaaSRichard Henderson */ 16808cfdacaaSRichard Henderson 16818cfdacaaSRichard Henderson typedef struct MMULookupPageData { 16828cfdacaaSRichard Henderson CPUTLBEntryFull *full; 16838cfdacaaSRichard Henderson void *haddr; 1684fb2c53cbSAnton Johansson vaddr addr; 16858cfdacaaSRichard Henderson int flags; 16868cfdacaaSRichard Henderson int size; 16878cfdacaaSRichard Henderson } MMULookupPageData; 16888cfdacaaSRichard Henderson 16898cfdacaaSRichard Henderson typedef struct MMULookupLocals { 16908cfdacaaSRichard Henderson MMULookupPageData page[2]; 16918cfdacaaSRichard Henderson MemOp memop; 16928cfdacaaSRichard Henderson int mmu_idx; 16938cfdacaaSRichard Henderson } MMULookupLocals; 16948cfdacaaSRichard Henderson 16958cfdacaaSRichard Henderson /** 16968cfdacaaSRichard Henderson * mmu_lookup1: translate one page 1697d50ef446SAnton Johansson * @cpu: generic cpu state 16988cfdacaaSRichard Henderson * @data: lookup parameters 16998cfdacaaSRichard Henderson * @mmu_idx: virtual address context 17008cfdacaaSRichard Henderson * @access_type: load/store/code 17018cfdacaaSRichard Henderson * @ra: return address into tcg generated code, or 0 17028cfdacaaSRichard Henderson * 17038cfdacaaSRichard Henderson * Resolve the translation for the one page at @data.addr, filling in 17048cfdacaaSRichard Henderson * the rest of @data with the results. If the translation fails, 17058cfdacaaSRichard Henderson * tlb_fill will longjmp out. Return true if the softmmu tlb for 17068cfdacaaSRichard Henderson * @mmu_idx may have resized. 17078cfdacaaSRichard Henderson */ 1708d50ef446SAnton Johansson static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, 17098cfdacaaSRichard Henderson int mmu_idx, MMUAccessType access_type, uintptr_t ra) 17108cfdacaaSRichard Henderson { 1711fb2c53cbSAnton Johansson vaddr addr = data->addr; 1712d50ef446SAnton Johansson uintptr_t index = tlb_index(cpu, mmu_idx, addr); 1713d50ef446SAnton Johansson CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr); 17149e39de98SAnton Johansson uint64_t tlb_addr = tlb_read_idx(entry, access_type); 17158cfdacaaSRichard Henderson bool maybe_resized = false; 171658e8f1f6SRichard Henderson CPUTLBEntryFull *full; 171758e8f1f6SRichard Henderson int flags; 17188cfdacaaSRichard Henderson 17198cfdacaaSRichard Henderson /* If the TLB entry is for a different page, reload and try again. */ 17208cfdacaaSRichard Henderson if (!tlb_hit(tlb_addr, addr)) { 1721d50ef446SAnton Johansson if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, 17228cfdacaaSRichard Henderson addr & TARGET_PAGE_MASK)) { 1723d50ef446SAnton Johansson tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra); 17248cfdacaaSRichard Henderson maybe_resized = true; 1725d50ef446SAnton Johansson index = tlb_index(cpu, mmu_idx, addr); 1726d50ef446SAnton Johansson entry = tlb_entry(cpu, mmu_idx, addr); 17278cfdacaaSRichard Henderson } 17288cfdacaaSRichard Henderson tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK; 17298cfdacaaSRichard Henderson } 17308cfdacaaSRichard Henderson 1731d50ef446SAnton Johansson full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; 173258e8f1f6SRichard Henderson flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW); 173358e8f1f6SRichard Henderson flags |= full->slow_flags[access_type]; 173458e8f1f6SRichard Henderson 173558e8f1f6SRichard Henderson data->full = full; 173658e8f1f6SRichard Henderson data->flags = flags; 17378cfdacaaSRichard Henderson /* Compute haddr speculatively; depending on flags it might be invalid. */ 17388cfdacaaSRichard Henderson data->haddr = (void *)((uintptr_t)addr + entry->addend); 17398cfdacaaSRichard Henderson 17408cfdacaaSRichard Henderson return maybe_resized; 17418cfdacaaSRichard Henderson } 17428cfdacaaSRichard Henderson 17438cfdacaaSRichard Henderson /** 17448cfdacaaSRichard Henderson * mmu_watch_or_dirty 1745d50ef446SAnton Johansson * @cpu: generic cpu state 17468cfdacaaSRichard Henderson * @data: lookup parameters 17478cfdacaaSRichard Henderson * @access_type: load/store/code 17488cfdacaaSRichard Henderson * @ra: return address into tcg generated code, or 0 17498cfdacaaSRichard Henderson * 17508cfdacaaSRichard Henderson * Trigger watchpoints for @data.addr:@data.size; 17518cfdacaaSRichard Henderson * record writes to protected clean pages. 17528cfdacaaSRichard Henderson */ 1753d50ef446SAnton Johansson static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data, 17548cfdacaaSRichard Henderson MMUAccessType access_type, uintptr_t ra) 17558cfdacaaSRichard Henderson { 17568cfdacaaSRichard Henderson CPUTLBEntryFull *full = data->full; 1757fb2c53cbSAnton Johansson vaddr addr = data->addr; 17588cfdacaaSRichard Henderson int flags = data->flags; 17598cfdacaaSRichard Henderson int size = data->size; 17608cfdacaaSRichard Henderson 17618cfdacaaSRichard Henderson /* On watchpoint hit, this will longjmp out. */ 17628cfdacaaSRichard Henderson if (flags & TLB_WATCHPOINT) { 17638cfdacaaSRichard Henderson int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ; 1764d50ef446SAnton Johansson cpu_check_watchpoint(cpu, addr, size, full->attrs, wp, ra); 17658cfdacaaSRichard Henderson flags &= ~TLB_WATCHPOINT; 17668cfdacaaSRichard Henderson } 17678cfdacaaSRichard Henderson 17688cfdacaaSRichard Henderson /* Note that notdirty is only set for writes. */ 17698cfdacaaSRichard Henderson if (flags & TLB_NOTDIRTY) { 1770d50ef446SAnton Johansson notdirty_write(cpu, addr, size, full, ra); 17718cfdacaaSRichard Henderson flags &= ~TLB_NOTDIRTY; 17728cfdacaaSRichard Henderson } 17738cfdacaaSRichard Henderson data->flags = flags; 17748cfdacaaSRichard Henderson } 17758cfdacaaSRichard Henderson 17768cfdacaaSRichard Henderson /** 17778cfdacaaSRichard Henderson * mmu_lookup: translate page(s) 1778d50ef446SAnton Johansson * @cpu: generic cpu state 17798cfdacaaSRichard Henderson * @addr: virtual address 17808cfdacaaSRichard Henderson * @oi: combined mmu_idx and MemOp 17818cfdacaaSRichard Henderson * @ra: return address into tcg generated code, or 0 17828cfdacaaSRichard Henderson * @access_type: load/store/code 17838cfdacaaSRichard Henderson * @l: output result 17848cfdacaaSRichard Henderson * 17858cfdacaaSRichard Henderson * Resolve the translation for the page(s) beginning at @addr, for MemOp.size 17868cfdacaaSRichard Henderson * bytes. Return true if the lookup crosses a page boundary. 17878cfdacaaSRichard Henderson */ 1788d50ef446SAnton Johansson static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, 17898cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType type, MMULookupLocals *l) 17908cfdacaaSRichard Henderson { 17918cfdacaaSRichard Henderson unsigned a_bits; 17928cfdacaaSRichard Henderson bool crosspage; 17938cfdacaaSRichard Henderson int flags; 17948cfdacaaSRichard Henderson 17958cfdacaaSRichard Henderson l->memop = get_memop(oi); 17968cfdacaaSRichard Henderson l->mmu_idx = get_mmuidx(oi); 17978cfdacaaSRichard Henderson 17988cfdacaaSRichard Henderson tcg_debug_assert(l->mmu_idx < NB_MMU_MODES); 17998cfdacaaSRichard Henderson 18008cfdacaaSRichard Henderson /* Handle CPU specific unaligned behaviour */ 18018cfdacaaSRichard Henderson a_bits = get_alignment_bits(l->memop); 18028cfdacaaSRichard Henderson if (addr & ((1 << a_bits) - 1)) { 1803d50ef446SAnton Johansson cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra); 18048cfdacaaSRichard Henderson } 18058cfdacaaSRichard Henderson 18068cfdacaaSRichard Henderson l->page[0].addr = addr; 18078cfdacaaSRichard Henderson l->page[0].size = memop_size(l->memop); 18088cfdacaaSRichard Henderson l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK; 18098cfdacaaSRichard Henderson l->page[1].size = 0; 18108cfdacaaSRichard Henderson crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK; 18118cfdacaaSRichard Henderson 18128cfdacaaSRichard Henderson if (likely(!crosspage)) { 1813d50ef446SAnton Johansson mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra); 18148cfdacaaSRichard Henderson 18158cfdacaaSRichard Henderson flags = l->page[0].flags; 18168cfdacaaSRichard Henderson if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) { 1817d50ef446SAnton Johansson mmu_watch_or_dirty(cpu, &l->page[0], type, ra); 18188cfdacaaSRichard Henderson } 18198cfdacaaSRichard Henderson if (unlikely(flags & TLB_BSWAP)) { 18208cfdacaaSRichard Henderson l->memop ^= MO_BSWAP; 18218cfdacaaSRichard Henderson } 18228cfdacaaSRichard Henderson } else { 18238cfdacaaSRichard Henderson /* Finish compute of page crossing. */ 18248cfdacaaSRichard Henderson int size0 = l->page[1].addr - addr; 18258cfdacaaSRichard Henderson l->page[1].size = l->page[0].size - size0; 18268cfdacaaSRichard Henderson l->page[0].size = size0; 18278cfdacaaSRichard Henderson 18288cfdacaaSRichard Henderson /* 18298cfdacaaSRichard Henderson * Lookup both pages, recognizing exceptions from either. If the 18308cfdacaaSRichard Henderson * second lookup potentially resized, refresh first CPUTLBEntryFull. 18318cfdacaaSRichard Henderson */ 1832d50ef446SAnton Johansson mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra); 1833d50ef446SAnton Johansson if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) { 1834d50ef446SAnton Johansson uintptr_t index = tlb_index(cpu, l->mmu_idx, addr); 1835d50ef446SAnton Johansson l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index]; 18368cfdacaaSRichard Henderson } 18378cfdacaaSRichard Henderson 18388cfdacaaSRichard Henderson flags = l->page[0].flags | l->page[1].flags; 18398cfdacaaSRichard Henderson if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) { 1840d50ef446SAnton Johansson mmu_watch_or_dirty(cpu, &l->page[0], type, ra); 1841d50ef446SAnton Johansson mmu_watch_or_dirty(cpu, &l->page[1], type, ra); 18428cfdacaaSRichard Henderson } 18438cfdacaaSRichard Henderson 18448cfdacaaSRichard Henderson /* 18458cfdacaaSRichard Henderson * Since target/sparc is the only user of TLB_BSWAP, and all 18468cfdacaaSRichard Henderson * Sparc accesses are aligned, any treatment across two pages 18478cfdacaaSRichard Henderson * would be arbitrary. Refuse it until there's a use. 18488cfdacaaSRichard Henderson */ 18498cfdacaaSRichard Henderson tcg_debug_assert((flags & TLB_BSWAP) == 0); 18508cfdacaaSRichard Henderson } 18518cfdacaaSRichard Henderson 18528cfdacaaSRichard Henderson return crosspage; 18538cfdacaaSRichard Henderson } 18548cfdacaaSRichard Henderson 18558cfdacaaSRichard Henderson /* 185608dff435SRichard Henderson * Probe for an atomic operation. Do not allow unaligned operations, 185708dff435SRichard Henderson * or io operations to proceed. Return the host address. 185808dff435SRichard Henderson */ 1859d560225fSAnton Johansson static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, 1860b0326eb9SAnton Johansson int size, uintptr_t retaddr) 1861d9bb58e5SYang Zhong { 1862b826044fSRichard Henderson uintptr_t mmu_idx = get_mmuidx(oi); 186314776ab5STony Nguyen MemOp mop = get_memop(oi); 1864d9bb58e5SYang Zhong int a_bits = get_alignment_bits(mop); 186508dff435SRichard Henderson uintptr_t index; 186608dff435SRichard Henderson CPUTLBEntry *tlbe; 1867b0326eb9SAnton Johansson vaddr tlb_addr; 186834d49937SPeter Maydell void *hostaddr; 1869417aeaffSRichard Henderson CPUTLBEntryFull *full; 1870d9bb58e5SYang Zhong 1871b826044fSRichard Henderson tcg_debug_assert(mmu_idx < NB_MMU_MODES); 1872b826044fSRichard Henderson 1873d9bb58e5SYang Zhong /* Adjust the given return address. */ 1874d9bb58e5SYang Zhong retaddr -= GETPC_ADJ; 1875d9bb58e5SYang Zhong 1876d9bb58e5SYang Zhong /* Enforce guest required alignment. */ 1877d9bb58e5SYang Zhong if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 1878d9bb58e5SYang Zhong /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 1879d560225fSAnton Johansson cpu_unaligned_access(cpu, addr, MMU_DATA_STORE, 1880d9bb58e5SYang Zhong mmu_idx, retaddr); 1881d9bb58e5SYang Zhong } 1882d9bb58e5SYang Zhong 1883d9bb58e5SYang Zhong /* Enforce qemu required alignment. */ 188408dff435SRichard Henderson if (unlikely(addr & (size - 1))) { 1885d9bb58e5SYang Zhong /* We get here if guest alignment was not requested, 1886d9bb58e5SYang Zhong or was not enforced by cpu_unaligned_access above. 1887d9bb58e5SYang Zhong We might widen the access and emulate, but for now 1888d9bb58e5SYang Zhong mark an exception and exit the cpu loop. */ 1889d9bb58e5SYang Zhong goto stop_the_world; 1890d9bb58e5SYang Zhong } 1891d9bb58e5SYang Zhong 1892d560225fSAnton Johansson index = tlb_index(cpu, mmu_idx, addr); 1893d560225fSAnton Johansson tlbe = tlb_entry(cpu, mmu_idx, addr); 189408dff435SRichard Henderson 1895d9bb58e5SYang Zhong /* Check TLB entry and enforce page permissions. */ 189608dff435SRichard Henderson tlb_addr = tlb_addr_write(tlbe); 1897334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 1898d560225fSAnton Johansson if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE, 18990b3c75adSRichard Henderson addr & TARGET_PAGE_MASK)) { 1900d560225fSAnton Johansson tlb_fill(cpu, addr, size, 190108dff435SRichard Henderson MMU_DATA_STORE, mmu_idx, retaddr); 1902d560225fSAnton Johansson index = tlb_index(cpu, mmu_idx, addr); 1903d560225fSAnton Johansson tlbe = tlb_entry(cpu, mmu_idx, addr); 1904d9bb58e5SYang Zhong } 1905403f290cSEmilio G. Cota tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; 1906d9bb58e5SYang Zhong } 1907d9bb58e5SYang Zhong 1908417aeaffSRichard Henderson /* 1909417aeaffSRichard Henderson * Let the guest notice RMW on a write-only page. 1910417aeaffSRichard Henderson * We have just verified that the page is writable. 1911417aeaffSRichard Henderson * Subpage lookups may have left TLB_INVALID_MASK set, 1912417aeaffSRichard Henderson * but addr_read will only be -1 if PAGE_READ was unset. 1913417aeaffSRichard Henderson */ 1914417aeaffSRichard Henderson if (unlikely(tlbe->addr_read == -1)) { 1915d560225fSAnton Johansson tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); 191608dff435SRichard Henderson /* 1917417aeaffSRichard Henderson * Since we don't support reads and writes to different 1918417aeaffSRichard Henderson * addresses, and we do have the proper page loaded for 1919417aeaffSRichard Henderson * write, this shouldn't ever return. But just in case, 1920417aeaffSRichard Henderson * handle via stop-the-world. 192108dff435SRichard Henderson */ 192208dff435SRichard Henderson goto stop_the_world; 192308dff435SRichard Henderson } 1924187ba694SRichard Henderson /* Collect tlb flags for read. */ 1925417aeaffSRichard Henderson tlb_addr |= tlbe->addr_read; 192608dff435SRichard Henderson 192755df6fcfSPeter Maydell /* Notice an IO access or a needs-MMU-lookup access */ 19280953674eSRichard Henderson if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) { 1929d9bb58e5SYang Zhong /* There's really nothing that can be done to 1930d9bb58e5SYang Zhong support this apart from stop-the-world. */ 1931d9bb58e5SYang Zhong goto stop_the_world; 1932d9bb58e5SYang Zhong } 1933d9bb58e5SYang Zhong 193434d49937SPeter Maydell hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 1935d560225fSAnton Johansson full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; 193634d49937SPeter Maydell 193734d49937SPeter Maydell if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 1938d560225fSAnton Johansson notdirty_write(cpu, addr, size, full, retaddr); 1939417aeaffSRichard Henderson } 1940417aeaffSRichard Henderson 1941187ba694SRichard Henderson if (unlikely(tlb_addr & TLB_FORCE_SLOW)) { 1942187ba694SRichard Henderson int wp_flags = 0; 1943187ba694SRichard Henderson 1944187ba694SRichard Henderson if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) { 1945187ba694SRichard Henderson wp_flags |= BP_MEM_WRITE; 1946187ba694SRichard Henderson } 1947187ba694SRichard Henderson if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) { 1948187ba694SRichard Henderson wp_flags |= BP_MEM_READ; 1949187ba694SRichard Henderson } 1950187ba694SRichard Henderson if (wp_flags) { 1951d560225fSAnton Johansson cpu_check_watchpoint(cpu, addr, size, 1952187ba694SRichard Henderson full->attrs, wp_flags, retaddr); 1953187ba694SRichard Henderson } 195434d49937SPeter Maydell } 195534d49937SPeter Maydell 195634d49937SPeter Maydell return hostaddr; 1957d9bb58e5SYang Zhong 1958d9bb58e5SYang Zhong stop_the_world: 1959d560225fSAnton Johansson cpu_loop_exit_atomic(cpu, retaddr); 1960d9bb58e5SYang Zhong } 1961d9bb58e5SYang Zhong 1962eed56642SAlex Bennée /* 1963eed56642SAlex Bennée * Load Helpers 1964eed56642SAlex Bennée * 1965eed56642SAlex Bennée * We support two different access types. SOFTMMU_CODE_ACCESS is 1966eed56642SAlex Bennée * specifically for reading instructions from system memory. It is 1967eed56642SAlex Bennée * called by the translation loop and in some helpers where the code 1968eed56642SAlex Bennée * is disassembled. It shouldn't be called directly by guest code. 1969cdfac37bSRichard Henderson * 1970eed56642SAlex Bennée * For the benefit of TCG generated code, we want to avoid the 1971eed56642SAlex Bennée * complication of ABI-specific return type promotion and always 1972eed56642SAlex Bennée * return a value extended to the register size of the host. This is 1973eed56642SAlex Bennée * tcg_target_long, except in the case of a 32-bit host and 64-bit 1974eed56642SAlex Bennée * data, and for that we always have uint64_t. 1975eed56642SAlex Bennée * 1976eed56642SAlex Bennée * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. 1977eed56642SAlex Bennée */ 1978eed56642SAlex Bennée 19798cfdacaaSRichard Henderson /** 19808cfdacaaSRichard Henderson * do_ld_mmio_beN: 1981d50ef446SAnton Johansson * @cpu: generic cpu state 19821966855eSRichard Henderson * @full: page parameters 19838cfdacaaSRichard Henderson * @ret_be: accumulated data 19841966855eSRichard Henderson * @addr: virtual address 19851966855eSRichard Henderson * @size: number of bytes 19868cfdacaaSRichard Henderson * @mmu_idx: virtual address context 19878cfdacaaSRichard Henderson * @ra: return address into tcg generated code, or 0 19881966855eSRichard Henderson * Context: iothread lock held 19898cfdacaaSRichard Henderson * 19901966855eSRichard Henderson * Load @size bytes from @addr, which is memory-mapped i/o. 19918cfdacaaSRichard Henderson * The bytes are concatenated in big-endian order with @ret_be. 19928cfdacaaSRichard Henderson */ 1993d50ef446SAnton Johansson static uint64_t int_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full, 19941966855eSRichard Henderson uint64_t ret_be, vaddr addr, int size, 19958bf67267SRichard Henderson int mmu_idx, MMUAccessType type, uintptr_t ra, 19968bf67267SRichard Henderson MemoryRegion *mr, hwaddr mr_offset) 19972dd92606SRichard Henderson { 1998190aba80SRichard Henderson do { 199913e61747SRichard Henderson MemOp this_mop; 200013e61747SRichard Henderson unsigned this_size; 200113e61747SRichard Henderson uint64_t val; 200213e61747SRichard Henderson MemTxResult r; 200313e61747SRichard Henderson 2004190aba80SRichard Henderson /* Read aligned pieces up to 8 bytes. */ 200513e61747SRichard Henderson this_mop = ctz32(size | (int)addr | 8); 200613e61747SRichard Henderson this_size = 1 << this_mop; 200713e61747SRichard Henderson this_mop |= MO_BE; 200813e61747SRichard Henderson 20098bf67267SRichard Henderson r = memory_region_dispatch_read(mr, mr_offset, &val, 20108bf67267SRichard Henderson this_mop, full->attrs); 201113e61747SRichard Henderson if (unlikely(r != MEMTX_OK)) { 2012d50ef446SAnton Johansson io_failed(cpu, full, addr, this_size, type, mmu_idx, r, ra); 20138cfdacaaSRichard Henderson } 201413e61747SRichard Henderson if (this_size == 8) { 201513e61747SRichard Henderson return val; 201613e61747SRichard Henderson } 201713e61747SRichard Henderson 201813e61747SRichard Henderson ret_be = (ret_be << (this_size * 8)) | val; 201913e61747SRichard Henderson addr += this_size; 202013e61747SRichard Henderson mr_offset += this_size; 202113e61747SRichard Henderson size -= this_size; 2022190aba80SRichard Henderson } while (size); 202313e61747SRichard Henderson 20248cfdacaaSRichard Henderson return ret_be; 20258cfdacaaSRichard Henderson } 20268cfdacaaSRichard Henderson 2027d50ef446SAnton Johansson static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full, 20288bf67267SRichard Henderson uint64_t ret_be, vaddr addr, int size, 20298bf67267SRichard Henderson int mmu_idx, MMUAccessType type, uintptr_t ra) 20308bf67267SRichard Henderson { 20318bf67267SRichard Henderson MemoryRegionSection *section; 20328bf67267SRichard Henderson MemoryRegion *mr; 20338bf67267SRichard Henderson hwaddr mr_offset; 20348bf67267SRichard Henderson MemTxAttrs attrs; 20358bf67267SRichard Henderson uint64_t ret; 20368bf67267SRichard Henderson 20378bf67267SRichard Henderson tcg_debug_assert(size > 0 && size <= 8); 20388bf67267SRichard Henderson 20398bf67267SRichard Henderson attrs = full->attrs; 2040d50ef446SAnton Johansson section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); 20418bf67267SRichard Henderson mr = section->mr; 20428bf67267SRichard Henderson 20438bf67267SRichard Henderson qemu_mutex_lock_iothread(); 2044d50ef446SAnton Johansson ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx, 20458bf67267SRichard Henderson type, ra, mr, mr_offset); 20468bf67267SRichard Henderson qemu_mutex_unlock_iothread(); 20478bf67267SRichard Henderson 20488bf67267SRichard Henderson return ret; 20498bf67267SRichard Henderson } 20508bf67267SRichard Henderson 2051d50ef446SAnton Johansson static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full, 20528bf67267SRichard Henderson uint64_t ret_be, vaddr addr, int size, 20538bf67267SRichard Henderson int mmu_idx, uintptr_t ra) 20548bf67267SRichard Henderson { 20558bf67267SRichard Henderson MemoryRegionSection *section; 20568bf67267SRichard Henderson MemoryRegion *mr; 20578bf67267SRichard Henderson hwaddr mr_offset; 20588bf67267SRichard Henderson MemTxAttrs attrs; 20598bf67267SRichard Henderson uint64_t a, b; 20608bf67267SRichard Henderson 20618bf67267SRichard Henderson tcg_debug_assert(size > 8 && size <= 16); 20628bf67267SRichard Henderson 20638bf67267SRichard Henderson attrs = full->attrs; 2064d50ef446SAnton Johansson section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); 20658bf67267SRichard Henderson mr = section->mr; 20668bf67267SRichard Henderson 20678bf67267SRichard Henderson qemu_mutex_lock_iothread(); 2068d50ef446SAnton Johansson a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx, 20698bf67267SRichard Henderson MMU_DATA_LOAD, ra, mr, mr_offset); 2070d50ef446SAnton Johansson b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx, 20718bf67267SRichard Henderson MMU_DATA_LOAD, ra, mr, mr_offset + size - 8); 20728bf67267SRichard Henderson qemu_mutex_unlock_iothread(); 20738bf67267SRichard Henderson 20748bf67267SRichard Henderson return int128_make128(b, a); 20758bf67267SRichard Henderson } 20768bf67267SRichard Henderson 20778cfdacaaSRichard Henderson /** 20788cfdacaaSRichard Henderson * do_ld_bytes_beN 20798cfdacaaSRichard Henderson * @p: translation parameters 20808cfdacaaSRichard Henderson * @ret_be: accumulated data 20818cfdacaaSRichard Henderson * 20828cfdacaaSRichard Henderson * Load @p->size bytes from @p->haddr, which is RAM. 20838cfdacaaSRichard Henderson * The bytes to concatenated in big-endian order with @ret_be. 20848cfdacaaSRichard Henderson */ 20858cfdacaaSRichard Henderson static uint64_t do_ld_bytes_beN(MMULookupPageData *p, uint64_t ret_be) 20868cfdacaaSRichard Henderson { 20878cfdacaaSRichard Henderson uint8_t *haddr = p->haddr; 20888cfdacaaSRichard Henderson int i, size = p->size; 20898cfdacaaSRichard Henderson 20908cfdacaaSRichard Henderson for (i = 0; i < size; i++) { 20918cfdacaaSRichard Henderson ret_be = (ret_be << 8) | haddr[i]; 20928cfdacaaSRichard Henderson } 20938cfdacaaSRichard Henderson return ret_be; 20948cfdacaaSRichard Henderson } 20958cfdacaaSRichard Henderson 2096cdfac37bSRichard Henderson /** 2097cdfac37bSRichard Henderson * do_ld_parts_beN 2098cdfac37bSRichard Henderson * @p: translation parameters 2099cdfac37bSRichard Henderson * @ret_be: accumulated data 2100cdfac37bSRichard Henderson * 2101cdfac37bSRichard Henderson * As do_ld_bytes_beN, but atomically on each aligned part. 2102cdfac37bSRichard Henderson */ 2103cdfac37bSRichard Henderson static uint64_t do_ld_parts_beN(MMULookupPageData *p, uint64_t ret_be) 2104cdfac37bSRichard Henderson { 2105cdfac37bSRichard Henderson void *haddr = p->haddr; 2106cdfac37bSRichard Henderson int size = p->size; 2107cdfac37bSRichard Henderson 2108cdfac37bSRichard Henderson do { 2109cdfac37bSRichard Henderson uint64_t x; 2110cdfac37bSRichard Henderson int n; 2111cdfac37bSRichard Henderson 2112cdfac37bSRichard Henderson /* 2113cdfac37bSRichard Henderson * Find minimum of alignment and size. 2114cdfac37bSRichard Henderson * This is slightly stronger than required by MO_ATOM_SUBALIGN, which 2115cdfac37bSRichard Henderson * would have only checked the low bits of addr|size once at the start, 2116cdfac37bSRichard Henderson * but is just as easy. 2117cdfac37bSRichard Henderson */ 2118cdfac37bSRichard Henderson switch (((uintptr_t)haddr | size) & 7) { 2119cdfac37bSRichard Henderson case 4: 2120cdfac37bSRichard Henderson x = cpu_to_be32(load_atomic4(haddr)); 2121cdfac37bSRichard Henderson ret_be = (ret_be << 32) | x; 2122cdfac37bSRichard Henderson n = 4; 2123cdfac37bSRichard Henderson break; 2124cdfac37bSRichard Henderson case 2: 2125cdfac37bSRichard Henderson case 6: 2126cdfac37bSRichard Henderson x = cpu_to_be16(load_atomic2(haddr)); 2127cdfac37bSRichard Henderson ret_be = (ret_be << 16) | x; 2128cdfac37bSRichard Henderson n = 2; 2129cdfac37bSRichard Henderson break; 2130cdfac37bSRichard Henderson default: 2131cdfac37bSRichard Henderson x = *(uint8_t *)haddr; 2132cdfac37bSRichard Henderson ret_be = (ret_be << 8) | x; 2133cdfac37bSRichard Henderson n = 1; 2134cdfac37bSRichard Henderson break; 2135cdfac37bSRichard Henderson case 0: 2136cdfac37bSRichard Henderson g_assert_not_reached(); 2137cdfac37bSRichard Henderson } 2138cdfac37bSRichard Henderson haddr += n; 2139cdfac37bSRichard Henderson size -= n; 2140cdfac37bSRichard Henderson } while (size != 0); 2141cdfac37bSRichard Henderson return ret_be; 2142cdfac37bSRichard Henderson } 2143cdfac37bSRichard Henderson 2144cdfac37bSRichard Henderson /** 2145cdfac37bSRichard Henderson * do_ld_parts_be4 2146cdfac37bSRichard Henderson * @p: translation parameters 2147cdfac37bSRichard Henderson * @ret_be: accumulated data 2148cdfac37bSRichard Henderson * 2149cdfac37bSRichard Henderson * As do_ld_bytes_beN, but with one atomic load. 2150cdfac37bSRichard Henderson * Four aligned bytes are guaranteed to cover the load. 2151cdfac37bSRichard Henderson */ 2152cdfac37bSRichard Henderson static uint64_t do_ld_whole_be4(MMULookupPageData *p, uint64_t ret_be) 2153cdfac37bSRichard Henderson { 2154cdfac37bSRichard Henderson int o = p->addr & 3; 2155cdfac37bSRichard Henderson uint32_t x = load_atomic4(p->haddr - o); 2156cdfac37bSRichard Henderson 2157cdfac37bSRichard Henderson x = cpu_to_be32(x); 2158cdfac37bSRichard Henderson x <<= o * 8; 2159cdfac37bSRichard Henderson x >>= (4 - p->size) * 8; 2160cdfac37bSRichard Henderson return (ret_be << (p->size * 8)) | x; 2161cdfac37bSRichard Henderson } 2162cdfac37bSRichard Henderson 2163cdfac37bSRichard Henderson /** 2164cdfac37bSRichard Henderson * do_ld_parts_be8 2165cdfac37bSRichard Henderson * @p: translation parameters 2166cdfac37bSRichard Henderson * @ret_be: accumulated data 2167cdfac37bSRichard Henderson * 2168cdfac37bSRichard Henderson * As do_ld_bytes_beN, but with one atomic load. 2169cdfac37bSRichard Henderson * Eight aligned bytes are guaranteed to cover the load. 2170cdfac37bSRichard Henderson */ 2171d50ef446SAnton Johansson static uint64_t do_ld_whole_be8(CPUState *cpu, uintptr_t ra, 2172cdfac37bSRichard Henderson MMULookupPageData *p, uint64_t ret_be) 2173cdfac37bSRichard Henderson { 2174cdfac37bSRichard Henderson int o = p->addr & 7; 2175*73fda56fSAnton Johansson uint64_t x = load_atomic8_or_exit(cpu, ra, p->haddr - o); 2176cdfac37bSRichard Henderson 2177cdfac37bSRichard Henderson x = cpu_to_be64(x); 2178cdfac37bSRichard Henderson x <<= o * 8; 2179cdfac37bSRichard Henderson x >>= (8 - p->size) * 8; 2180cdfac37bSRichard Henderson return (ret_be << (p->size * 8)) | x; 2181cdfac37bSRichard Henderson } 2182cdfac37bSRichard Henderson 218335c653c4SRichard Henderson /** 218435c653c4SRichard Henderson * do_ld_parts_be16 218535c653c4SRichard Henderson * @p: translation parameters 218635c653c4SRichard Henderson * @ret_be: accumulated data 218735c653c4SRichard Henderson * 218835c653c4SRichard Henderson * As do_ld_bytes_beN, but with one atomic load. 218935c653c4SRichard Henderson * 16 aligned bytes are guaranteed to cover the load. 219035c653c4SRichard Henderson */ 2191d50ef446SAnton Johansson static Int128 do_ld_whole_be16(CPUState *cpu, uintptr_t ra, 219235c653c4SRichard Henderson MMULookupPageData *p, uint64_t ret_be) 219335c653c4SRichard Henderson { 219435c653c4SRichard Henderson int o = p->addr & 15; 2195*73fda56fSAnton Johansson Int128 x, y = load_atomic16_or_exit(cpu, ra, p->haddr - o); 219635c653c4SRichard Henderson int size = p->size; 219735c653c4SRichard Henderson 219835c653c4SRichard Henderson if (!HOST_BIG_ENDIAN) { 219935c653c4SRichard Henderson y = bswap128(y); 220035c653c4SRichard Henderson } 220135c653c4SRichard Henderson y = int128_lshift(y, o * 8); 220235c653c4SRichard Henderson y = int128_urshift(y, (16 - size) * 8); 220335c653c4SRichard Henderson x = int128_make64(ret_be); 220435c653c4SRichard Henderson x = int128_lshift(x, size * 8); 220535c653c4SRichard Henderson return int128_or(x, y); 220635c653c4SRichard Henderson } 220735c653c4SRichard Henderson 22088cfdacaaSRichard Henderson /* 22098cfdacaaSRichard Henderson * Wrapper for the above. 22108cfdacaaSRichard Henderson */ 2211d50ef446SAnton Johansson static uint64_t do_ld_beN(CPUState *cpu, MMULookupPageData *p, 2212cdfac37bSRichard Henderson uint64_t ret_be, int mmu_idx, MMUAccessType type, 2213cdfac37bSRichard Henderson MemOp mop, uintptr_t ra) 22148cfdacaaSRichard Henderson { 2215cdfac37bSRichard Henderson MemOp atom; 2216cdfac37bSRichard Henderson unsigned tmp, half_size; 2217cdfac37bSRichard Henderson 22188cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2219d50ef446SAnton Johansson return do_ld_mmio_beN(cpu, p->full, ret_be, p->addr, p->size, 22201966855eSRichard Henderson mmu_idx, type, ra); 2221cdfac37bSRichard Henderson } 2222cdfac37bSRichard Henderson 2223cdfac37bSRichard Henderson /* 2224cdfac37bSRichard Henderson * It is a given that we cross a page and therefore there is no 2225cdfac37bSRichard Henderson * atomicity for the load as a whole, but subobjects may need attention. 2226cdfac37bSRichard Henderson */ 2227cdfac37bSRichard Henderson atom = mop & MO_ATOM_MASK; 2228cdfac37bSRichard Henderson switch (atom) { 2229cdfac37bSRichard Henderson case MO_ATOM_SUBALIGN: 2230cdfac37bSRichard Henderson return do_ld_parts_beN(p, ret_be); 2231cdfac37bSRichard Henderson 2232cdfac37bSRichard Henderson case MO_ATOM_IFALIGN_PAIR: 2233cdfac37bSRichard Henderson case MO_ATOM_WITHIN16_PAIR: 2234cdfac37bSRichard Henderson tmp = mop & MO_SIZE; 2235cdfac37bSRichard Henderson tmp = tmp ? tmp - 1 : 0; 2236cdfac37bSRichard Henderson half_size = 1 << tmp; 2237cdfac37bSRichard Henderson if (atom == MO_ATOM_IFALIGN_PAIR 2238cdfac37bSRichard Henderson ? p->size == half_size 2239cdfac37bSRichard Henderson : p->size >= half_size) { 2240cdfac37bSRichard Henderson if (!HAVE_al8_fast && p->size < 4) { 2241cdfac37bSRichard Henderson return do_ld_whole_be4(p, ret_be); 22428cfdacaaSRichard Henderson } else { 2243d50ef446SAnton Johansson return do_ld_whole_be8(cpu, ra, p, ret_be); 2244cdfac37bSRichard Henderson } 2245cdfac37bSRichard Henderson } 2246cdfac37bSRichard Henderson /* fall through */ 2247cdfac37bSRichard Henderson 2248cdfac37bSRichard Henderson case MO_ATOM_IFALIGN: 2249cdfac37bSRichard Henderson case MO_ATOM_WITHIN16: 2250cdfac37bSRichard Henderson case MO_ATOM_NONE: 22518cfdacaaSRichard Henderson return do_ld_bytes_beN(p, ret_be); 2252cdfac37bSRichard Henderson 2253cdfac37bSRichard Henderson default: 2254cdfac37bSRichard Henderson g_assert_not_reached(); 22558cfdacaaSRichard Henderson } 22568cfdacaaSRichard Henderson } 22578cfdacaaSRichard Henderson 225835c653c4SRichard Henderson /* 225935c653c4SRichard Henderson * Wrapper for the above, for 8 < size < 16. 226035c653c4SRichard Henderson */ 2261d50ef446SAnton Johansson static Int128 do_ld16_beN(CPUState *cpu, MMULookupPageData *p, 226235c653c4SRichard Henderson uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra) 226335c653c4SRichard Henderson { 226435c653c4SRichard Henderson int size = p->size; 226535c653c4SRichard Henderson uint64_t b; 226635c653c4SRichard Henderson MemOp atom; 226735c653c4SRichard Henderson 226835c653c4SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2269d50ef446SAnton Johansson return do_ld16_mmio_beN(cpu, p->full, a, p->addr, size, mmu_idx, ra); 227035c653c4SRichard Henderson } 227135c653c4SRichard Henderson 227235c653c4SRichard Henderson /* 227335c653c4SRichard Henderson * It is a given that we cross a page and therefore there is no 227435c653c4SRichard Henderson * atomicity for the load as a whole, but subobjects may need attention. 227535c653c4SRichard Henderson */ 227635c653c4SRichard Henderson atom = mop & MO_ATOM_MASK; 227735c653c4SRichard Henderson switch (atom) { 227835c653c4SRichard Henderson case MO_ATOM_SUBALIGN: 227935c653c4SRichard Henderson p->size = size - 8; 228035c653c4SRichard Henderson a = do_ld_parts_beN(p, a); 228135c653c4SRichard Henderson p->haddr += size - 8; 228235c653c4SRichard Henderson p->size = 8; 228335c653c4SRichard Henderson b = do_ld_parts_beN(p, 0); 228435c653c4SRichard Henderson break; 228535c653c4SRichard Henderson 228635c653c4SRichard Henderson case MO_ATOM_WITHIN16_PAIR: 228735c653c4SRichard Henderson /* Since size > 8, this is the half that must be atomic. */ 2288d50ef446SAnton Johansson return do_ld_whole_be16(cpu, ra, p, a); 228935c653c4SRichard Henderson 229035c653c4SRichard Henderson case MO_ATOM_IFALIGN_PAIR: 229135c653c4SRichard Henderson /* 229235c653c4SRichard Henderson * Since size > 8, both halves are misaligned, 229335c653c4SRichard Henderson * and so neither is atomic. 229435c653c4SRichard Henderson */ 229535c653c4SRichard Henderson case MO_ATOM_IFALIGN: 229635c653c4SRichard Henderson case MO_ATOM_WITHIN16: 229735c653c4SRichard Henderson case MO_ATOM_NONE: 229835c653c4SRichard Henderson p->size = size - 8; 229935c653c4SRichard Henderson a = do_ld_bytes_beN(p, a); 230035c653c4SRichard Henderson b = ldq_be_p(p->haddr + size - 8); 230135c653c4SRichard Henderson break; 230235c653c4SRichard Henderson 230335c653c4SRichard Henderson default: 230435c653c4SRichard Henderson g_assert_not_reached(); 230535c653c4SRichard Henderson } 230635c653c4SRichard Henderson 230735c653c4SRichard Henderson return int128_make128(b, a); 230835c653c4SRichard Henderson } 230935c653c4SRichard Henderson 2310d50ef446SAnton Johansson static uint8_t do_ld_1(CPUState *cpu, MMULookupPageData *p, int mmu_idx, 23118cfdacaaSRichard Henderson MMUAccessType type, uintptr_t ra) 23128cfdacaaSRichard Henderson { 23138cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2314d50ef446SAnton Johansson return do_ld_mmio_beN(cpu, p->full, 0, p->addr, 1, mmu_idx, type, ra); 23158cfdacaaSRichard Henderson } else { 23168cfdacaaSRichard Henderson return *(uint8_t *)p->haddr; 23178cfdacaaSRichard Henderson } 23188cfdacaaSRichard Henderson } 23198cfdacaaSRichard Henderson 2320d50ef446SAnton Johansson static uint16_t do_ld_2(CPUState *cpu, MMULookupPageData *p, int mmu_idx, 23218cfdacaaSRichard Henderson MMUAccessType type, MemOp memop, uintptr_t ra) 23228cfdacaaSRichard Henderson { 2323f7eaf9d7SRichard Henderson uint16_t ret; 23248cfdacaaSRichard Henderson 23258cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2326d50ef446SAnton Johansson ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 2, mmu_idx, type, ra); 2327f7eaf9d7SRichard Henderson if ((memop & MO_BSWAP) == MO_LE) { 2328f7eaf9d7SRichard Henderson ret = bswap16(ret); 23298cfdacaaSRichard Henderson } 2330f7eaf9d7SRichard Henderson } else { 23318cfdacaaSRichard Henderson /* Perform the load host endian, then swap if necessary. */ 2332*73fda56fSAnton Johansson ret = load_atom_2(cpu, ra, p->haddr, memop); 23338cfdacaaSRichard Henderson if (memop & MO_BSWAP) { 23348cfdacaaSRichard Henderson ret = bswap16(ret); 23358cfdacaaSRichard Henderson } 2336f7eaf9d7SRichard Henderson } 23378cfdacaaSRichard Henderson return ret; 23388cfdacaaSRichard Henderson } 23398cfdacaaSRichard Henderson 2340d50ef446SAnton Johansson static uint32_t do_ld_4(CPUState *cpu, MMULookupPageData *p, int mmu_idx, 23418cfdacaaSRichard Henderson MMUAccessType type, MemOp memop, uintptr_t ra) 23428cfdacaaSRichard Henderson { 23438cfdacaaSRichard Henderson uint32_t ret; 23448cfdacaaSRichard Henderson 23458cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2346d50ef446SAnton Johansson ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 4, mmu_idx, type, ra); 2347f7eaf9d7SRichard Henderson if ((memop & MO_BSWAP) == MO_LE) { 2348f7eaf9d7SRichard Henderson ret = bswap32(ret); 23498cfdacaaSRichard Henderson } 2350f7eaf9d7SRichard Henderson } else { 23518cfdacaaSRichard Henderson /* Perform the load host endian. */ 2352*73fda56fSAnton Johansson ret = load_atom_4(cpu, ra, p->haddr, memop); 23538cfdacaaSRichard Henderson if (memop & MO_BSWAP) { 23548cfdacaaSRichard Henderson ret = bswap32(ret); 23558cfdacaaSRichard Henderson } 2356f7eaf9d7SRichard Henderson } 23578cfdacaaSRichard Henderson return ret; 23588cfdacaaSRichard Henderson } 23598cfdacaaSRichard Henderson 2360d50ef446SAnton Johansson static uint64_t do_ld_8(CPUState *cpu, MMULookupPageData *p, int mmu_idx, 23618cfdacaaSRichard Henderson MMUAccessType type, MemOp memop, uintptr_t ra) 23628cfdacaaSRichard Henderson { 23638cfdacaaSRichard Henderson uint64_t ret; 23648cfdacaaSRichard Henderson 23658cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2366d50ef446SAnton Johansson ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 8, mmu_idx, type, ra); 2367f7eaf9d7SRichard Henderson if ((memop & MO_BSWAP) == MO_LE) { 2368f7eaf9d7SRichard Henderson ret = bswap64(ret); 23698cfdacaaSRichard Henderson } 2370f7eaf9d7SRichard Henderson } else { 23718cfdacaaSRichard Henderson /* Perform the load host endian. */ 2372*73fda56fSAnton Johansson ret = load_atom_8(cpu, ra, p->haddr, memop); 23738cfdacaaSRichard Henderson if (memop & MO_BSWAP) { 23748cfdacaaSRichard Henderson ret = bswap64(ret); 23758cfdacaaSRichard Henderson } 2376f7eaf9d7SRichard Henderson } 23778cfdacaaSRichard Henderson return ret; 23788cfdacaaSRichard Henderson } 23798cfdacaaSRichard Henderson 2380d50ef446SAnton Johansson static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, 23818cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType access_type) 23828cfdacaaSRichard Henderson { 23838cfdacaaSRichard Henderson MMULookupLocals l; 23848cfdacaaSRichard Henderson bool crosspage; 23858cfdacaaSRichard Henderson 2386f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 2387d50ef446SAnton Johansson crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); 23888cfdacaaSRichard Henderson tcg_debug_assert(!crosspage); 23898cfdacaaSRichard Henderson 2390d50ef446SAnton Johansson return do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra); 23912dd92606SRichard Henderson } 23922dd92606SRichard Henderson 239324e46e6cSRichard Henderson tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr, 23949002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2395eed56642SAlex Bennée { 23960cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8); 2397d50ef446SAnton Johansson return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD); 23982dd92606SRichard Henderson } 23992dd92606SRichard Henderson 2400d50ef446SAnton Johansson static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, 24018cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType access_type) 24022dd92606SRichard Henderson { 24038cfdacaaSRichard Henderson MMULookupLocals l; 24048cfdacaaSRichard Henderson bool crosspage; 24058cfdacaaSRichard Henderson uint16_t ret; 24068cfdacaaSRichard Henderson uint8_t a, b; 24078cfdacaaSRichard Henderson 2408f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 2409d50ef446SAnton Johansson crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); 24108cfdacaaSRichard Henderson if (likely(!crosspage)) { 2411d50ef446SAnton Johansson return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); 24128cfdacaaSRichard Henderson } 24138cfdacaaSRichard Henderson 2414d50ef446SAnton Johansson a = do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra); 2415d50ef446SAnton Johansson b = do_ld_1(cpu, &l.page[1], l.mmu_idx, access_type, ra); 24168cfdacaaSRichard Henderson 24178cfdacaaSRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 24188cfdacaaSRichard Henderson ret = a | (b << 8); 24198cfdacaaSRichard Henderson } else { 24208cfdacaaSRichard Henderson ret = b | (a << 8); 24218cfdacaaSRichard Henderson } 24228cfdacaaSRichard Henderson return ret; 2423eed56642SAlex Bennée } 2424eed56642SAlex Bennée 242524e46e6cSRichard Henderson tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr, 24269002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2427eed56642SAlex Bennée { 24280cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); 2429d50ef446SAnton Johansson return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD); 24302dd92606SRichard Henderson } 24312dd92606SRichard Henderson 2432d50ef446SAnton Johansson static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, 24338cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType access_type) 24342dd92606SRichard Henderson { 24358cfdacaaSRichard Henderson MMULookupLocals l; 24368cfdacaaSRichard Henderson bool crosspage; 24378cfdacaaSRichard Henderson uint32_t ret; 24388cfdacaaSRichard Henderson 2439f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 2440d50ef446SAnton Johansson crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); 24418cfdacaaSRichard Henderson if (likely(!crosspage)) { 2442d50ef446SAnton Johansson return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); 24438cfdacaaSRichard Henderson } 24448cfdacaaSRichard Henderson 2445d50ef446SAnton Johansson ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra); 2446d50ef446SAnton Johansson ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra); 24478cfdacaaSRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 24488cfdacaaSRichard Henderson ret = bswap32(ret); 24498cfdacaaSRichard Henderson } 24508cfdacaaSRichard Henderson return ret; 2451eed56642SAlex Bennée } 2452eed56642SAlex Bennée 245324e46e6cSRichard Henderson tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr, 24549002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2455eed56642SAlex Bennée { 24560cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); 2457d50ef446SAnton Johansson return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD); 24588cfdacaaSRichard Henderson } 24598cfdacaaSRichard Henderson 2460d50ef446SAnton Johansson static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, 24618cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType access_type) 24628cfdacaaSRichard Henderson { 24638cfdacaaSRichard Henderson MMULookupLocals l; 24648cfdacaaSRichard Henderson bool crosspage; 24658cfdacaaSRichard Henderson uint64_t ret; 24668cfdacaaSRichard Henderson 2467f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 2468d50ef446SAnton Johansson crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); 24698cfdacaaSRichard Henderson if (likely(!crosspage)) { 2470d50ef446SAnton Johansson return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); 24718cfdacaaSRichard Henderson } 24728cfdacaaSRichard Henderson 2473d50ef446SAnton Johansson ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra); 2474d50ef446SAnton Johansson ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra); 24758cfdacaaSRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 24768cfdacaaSRichard Henderson ret = bswap64(ret); 24778cfdacaaSRichard Henderson } 24788cfdacaaSRichard Henderson return ret; 2479eed56642SAlex Bennée } 2480eed56642SAlex Bennée 248124e46e6cSRichard Henderson uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr, 24829002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2483eed56642SAlex Bennée { 24840cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); 2485d50ef446SAnton Johansson return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD); 2486eed56642SAlex Bennée } 2487eed56642SAlex Bennée 2488eed56642SAlex Bennée /* 2489eed56642SAlex Bennée * Provide signed versions of the load routines as well. We can of course 2490eed56642SAlex Bennée * avoid this for 64-bit data, or for 32-bit data on 32-bit host. 2491eed56642SAlex Bennée */ 2492eed56642SAlex Bennée 249324e46e6cSRichard Henderson tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr, 24949002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2495eed56642SAlex Bennée { 24960cadc1edSRichard Henderson return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr); 2497eed56642SAlex Bennée } 2498eed56642SAlex Bennée 249924e46e6cSRichard Henderson tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr, 25009002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2501eed56642SAlex Bennée { 25020cadc1edSRichard Henderson return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr); 2503eed56642SAlex Bennée } 2504eed56642SAlex Bennée 250524e46e6cSRichard Henderson tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr, 25069002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2507eed56642SAlex Bennée { 25080cadc1edSRichard Henderson return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr); 2509eed56642SAlex Bennée } 2510eed56642SAlex Bennée 2511d50ef446SAnton Johansson static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr, 251235c653c4SRichard Henderson MemOpIdx oi, uintptr_t ra) 251335c653c4SRichard Henderson { 251435c653c4SRichard Henderson MMULookupLocals l; 251535c653c4SRichard Henderson bool crosspage; 251635c653c4SRichard Henderson uint64_t a, b; 251735c653c4SRichard Henderson Int128 ret; 251835c653c4SRichard Henderson int first; 251935c653c4SRichard Henderson 2520f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 2521d50ef446SAnton Johansson crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l); 252235c653c4SRichard Henderson if (likely(!crosspage)) { 252335c653c4SRichard Henderson if (unlikely(l.page[0].flags & TLB_MMIO)) { 2524d50ef446SAnton Johansson ret = do_ld16_mmio_beN(cpu, l.page[0].full, 0, addr, 16, 25258bf67267SRichard Henderson l.mmu_idx, ra); 2526f7eaf9d7SRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 2527f7eaf9d7SRichard Henderson ret = bswap128(ret); 252835c653c4SRichard Henderson } 2529f7eaf9d7SRichard Henderson } else { 2530f7eaf9d7SRichard Henderson /* Perform the load host endian. */ 2531*73fda56fSAnton Johansson ret = load_atom_16(cpu, ra, l.page[0].haddr, l.memop); 253235c653c4SRichard Henderson if (l.memop & MO_BSWAP) { 253335c653c4SRichard Henderson ret = bswap128(ret); 253435c653c4SRichard Henderson } 2535f7eaf9d7SRichard Henderson } 253635c653c4SRichard Henderson return ret; 253735c653c4SRichard Henderson } 253835c653c4SRichard Henderson 253935c653c4SRichard Henderson first = l.page[0].size; 254035c653c4SRichard Henderson if (first == 8) { 254135c653c4SRichard Henderson MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64; 254235c653c4SRichard Henderson 2543d50ef446SAnton Johansson a = do_ld_8(cpu, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra); 2544d50ef446SAnton Johansson b = do_ld_8(cpu, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra); 254535c653c4SRichard Henderson if ((mop8 & MO_BSWAP) == MO_LE) { 254635c653c4SRichard Henderson ret = int128_make128(a, b); 254735c653c4SRichard Henderson } else { 254835c653c4SRichard Henderson ret = int128_make128(b, a); 254935c653c4SRichard Henderson } 255035c653c4SRichard Henderson return ret; 255135c653c4SRichard Henderson } 255235c653c4SRichard Henderson 255335c653c4SRichard Henderson if (first < 8) { 2554d50ef446SAnton Johansson a = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, 255535c653c4SRichard Henderson MMU_DATA_LOAD, l.memop, ra); 2556d50ef446SAnton Johansson ret = do_ld16_beN(cpu, &l.page[1], a, l.mmu_idx, l.memop, ra); 255735c653c4SRichard Henderson } else { 2558d50ef446SAnton Johansson ret = do_ld16_beN(cpu, &l.page[0], 0, l.mmu_idx, l.memop, ra); 255935c653c4SRichard Henderson b = int128_getlo(ret); 256035c653c4SRichard Henderson ret = int128_lshift(ret, l.page[1].size * 8); 256135c653c4SRichard Henderson a = int128_gethi(ret); 2562d50ef446SAnton Johansson b = do_ld_beN(cpu, &l.page[1], b, l.mmu_idx, 256335c653c4SRichard Henderson MMU_DATA_LOAD, l.memop, ra); 256435c653c4SRichard Henderson ret = int128_make128(b, a); 256535c653c4SRichard Henderson } 256635c653c4SRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 256735c653c4SRichard Henderson ret = bswap128(ret); 256835c653c4SRichard Henderson } 256935c653c4SRichard Henderson return ret; 257035c653c4SRichard Henderson } 257135c653c4SRichard Henderson 257224e46e6cSRichard Henderson Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr, 257335c653c4SRichard Henderson uint32_t oi, uintptr_t retaddr) 257435c653c4SRichard Henderson { 257535c653c4SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); 2576d50ef446SAnton Johansson return do_ld16_mmu(env_cpu(env), addr, oi, retaddr); 257735c653c4SRichard Henderson } 257835c653c4SRichard Henderson 2579e570597aSRichard Henderson Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi) 258035c653c4SRichard Henderson { 258135c653c4SRichard Henderson return helper_ld16_mmu(env, addr, oi, GETPC()); 258235c653c4SRichard Henderson } 258335c653c4SRichard Henderson 2584eed56642SAlex Bennée /* 2585d03f1408SRichard Henderson * Load helpers for cpu_ldst.h. 2586d03f1408SRichard Henderson */ 2587d03f1408SRichard Henderson 25888cfdacaaSRichard Henderson static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi) 2589d03f1408SRichard Henderson { 259037aff087SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 2591d03f1408SRichard Henderson } 2592d03f1408SRichard Henderson 2593f83bcecbSRichard Henderson uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) 2594d03f1408SRichard Henderson { 25958cfdacaaSRichard Henderson uint8_t ret; 25968cfdacaaSRichard Henderson 25970cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB); 2598d50ef446SAnton Johansson ret = do_ld1_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD); 25998cfdacaaSRichard Henderson plugin_load_cb(env, addr, oi); 26008cfdacaaSRichard Henderson return ret; 2601d03f1408SRichard Henderson } 2602d03f1408SRichard Henderson 2603fbea7a40SRichard Henderson uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr, 2604f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2605d03f1408SRichard Henderson { 26068cfdacaaSRichard Henderson uint16_t ret; 26078cfdacaaSRichard Henderson 2608fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); 2609d50ef446SAnton Johansson ret = do_ld2_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD); 26108cfdacaaSRichard Henderson plugin_load_cb(env, addr, oi); 26118cfdacaaSRichard Henderson return ret; 2612d03f1408SRichard Henderson } 2613d03f1408SRichard Henderson 2614fbea7a40SRichard Henderson uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr, 2615f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2616d03f1408SRichard Henderson { 26178cfdacaaSRichard Henderson uint32_t ret; 26188cfdacaaSRichard Henderson 2619fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); 2620d50ef446SAnton Johansson ret = do_ld4_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD); 26218cfdacaaSRichard Henderson plugin_load_cb(env, addr, oi); 26228cfdacaaSRichard Henderson return ret; 2623d03f1408SRichard Henderson } 2624d03f1408SRichard Henderson 2625fbea7a40SRichard Henderson uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr, 2626f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2627d03f1408SRichard Henderson { 26288cfdacaaSRichard Henderson uint64_t ret; 26298cfdacaaSRichard Henderson 2630fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); 2631d50ef446SAnton Johansson ret = do_ld8_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD); 26328cfdacaaSRichard Henderson plugin_load_cb(env, addr, oi); 26338cfdacaaSRichard Henderson return ret; 2634d03f1408SRichard Henderson } 2635d03f1408SRichard Henderson 2636fbea7a40SRichard Henderson Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, 2637cb48f365SRichard Henderson MemOpIdx oi, uintptr_t ra) 2638cb48f365SRichard Henderson { 263935c653c4SRichard Henderson Int128 ret; 2640cb48f365SRichard Henderson 2641fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); 2642d50ef446SAnton Johansson ret = do_ld16_mmu(env_cpu(env), addr, oi, ra); 264335c653c4SRichard Henderson plugin_load_cb(env, addr, oi); 264435c653c4SRichard Henderson return ret; 2645cb48f365SRichard Henderson } 2646cb48f365SRichard Henderson 2647d03f1408SRichard Henderson /* 2648eed56642SAlex Bennée * Store Helpers 2649eed56642SAlex Bennée */ 2650eed56642SAlex Bennée 265159213461SRichard Henderson /** 265259213461SRichard Henderson * do_st_mmio_leN: 2653d50ef446SAnton Johansson * @cpu: generic cpu state 26541966855eSRichard Henderson * @full: page parameters 265559213461SRichard Henderson * @val_le: data to store 26561966855eSRichard Henderson * @addr: virtual address 26571966855eSRichard Henderson * @size: number of bytes 265859213461SRichard Henderson * @mmu_idx: virtual address context 265959213461SRichard Henderson * @ra: return address into tcg generated code, or 0 26601966855eSRichard Henderson * Context: iothread lock held 266159213461SRichard Henderson * 26621966855eSRichard Henderson * Store @size bytes at @addr, which is memory-mapped i/o. 266359213461SRichard Henderson * The bytes to store are extracted in little-endian order from @val_le; 266459213461SRichard Henderson * return the bytes of @val_le beyond @p->size that have not been stored. 266559213461SRichard Henderson */ 2666d50ef446SAnton Johansson static uint64_t int_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, 26671966855eSRichard Henderson uint64_t val_le, vaddr addr, int size, 26681f9823ceSRichard Henderson int mmu_idx, uintptr_t ra, 26691f9823ceSRichard Henderson MemoryRegion *mr, hwaddr mr_offset) 26706b8b622eSRichard Henderson { 2671190aba80SRichard Henderson do { 26725646d6a7SRichard Henderson MemOp this_mop; 26735646d6a7SRichard Henderson unsigned this_size; 26745646d6a7SRichard Henderson MemTxResult r; 26755646d6a7SRichard Henderson 2676190aba80SRichard Henderson /* Store aligned pieces up to 8 bytes. */ 26775646d6a7SRichard Henderson this_mop = ctz32(size | (int)addr | 8); 26785646d6a7SRichard Henderson this_size = 1 << this_mop; 26795646d6a7SRichard Henderson this_mop |= MO_LE; 26805646d6a7SRichard Henderson 26815646d6a7SRichard Henderson r = memory_region_dispatch_write(mr, mr_offset, val_le, 26821f9823ceSRichard Henderson this_mop, full->attrs); 26835646d6a7SRichard Henderson if (unlikely(r != MEMTX_OK)) { 2684d50ef446SAnton Johansson io_failed(cpu, full, addr, this_size, MMU_DATA_STORE, 26855646d6a7SRichard Henderson mmu_idx, r, ra); 268659213461SRichard Henderson } 26875646d6a7SRichard Henderson if (this_size == 8) { 26885646d6a7SRichard Henderson return 0; 26895646d6a7SRichard Henderson } 26905646d6a7SRichard Henderson 26915646d6a7SRichard Henderson val_le >>= this_size * 8; 26925646d6a7SRichard Henderson addr += this_size; 26935646d6a7SRichard Henderson mr_offset += this_size; 26945646d6a7SRichard Henderson size -= this_size; 2695190aba80SRichard Henderson } while (size); 2696190aba80SRichard Henderson 269759213461SRichard Henderson return val_le; 269859213461SRichard Henderson } 269959213461SRichard Henderson 2700d50ef446SAnton Johansson static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, 27011f9823ceSRichard Henderson uint64_t val_le, vaddr addr, int size, 27021f9823ceSRichard Henderson int mmu_idx, uintptr_t ra) 27031f9823ceSRichard Henderson { 27041f9823ceSRichard Henderson MemoryRegionSection *section; 27051f9823ceSRichard Henderson hwaddr mr_offset; 27061f9823ceSRichard Henderson MemoryRegion *mr; 27071f9823ceSRichard Henderson MemTxAttrs attrs; 27081f9823ceSRichard Henderson uint64_t ret; 27091f9823ceSRichard Henderson 27101f9823ceSRichard Henderson tcg_debug_assert(size > 0 && size <= 8); 27111f9823ceSRichard Henderson 27121f9823ceSRichard Henderson attrs = full->attrs; 2713d50ef446SAnton Johansson section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); 27141f9823ceSRichard Henderson mr = section->mr; 27151f9823ceSRichard Henderson 27161f9823ceSRichard Henderson qemu_mutex_lock_iothread(); 2717d50ef446SAnton Johansson ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx, 27181f9823ceSRichard Henderson ra, mr, mr_offset); 27191f9823ceSRichard Henderson qemu_mutex_unlock_iothread(); 27201f9823ceSRichard Henderson 27211f9823ceSRichard Henderson return ret; 27221f9823ceSRichard Henderson } 27231f9823ceSRichard Henderson 2724d50ef446SAnton Johansson static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, 27251f9823ceSRichard Henderson Int128 val_le, vaddr addr, int size, 27261f9823ceSRichard Henderson int mmu_idx, uintptr_t ra) 27271f9823ceSRichard Henderson { 27281f9823ceSRichard Henderson MemoryRegionSection *section; 27291f9823ceSRichard Henderson MemoryRegion *mr; 27301f9823ceSRichard Henderson hwaddr mr_offset; 27311f9823ceSRichard Henderson MemTxAttrs attrs; 27321f9823ceSRichard Henderson uint64_t ret; 27331f9823ceSRichard Henderson 27341f9823ceSRichard Henderson tcg_debug_assert(size > 8 && size <= 16); 27351f9823ceSRichard Henderson 27361f9823ceSRichard Henderson attrs = full->attrs; 2737d50ef446SAnton Johansson section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); 27381f9823ceSRichard Henderson mr = section->mr; 27391f9823ceSRichard Henderson 27401f9823ceSRichard Henderson qemu_mutex_lock_iothread(); 2741d50ef446SAnton Johansson int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8, 27421f9823ceSRichard Henderson mmu_idx, ra, mr, mr_offset); 2743d50ef446SAnton Johansson ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8, 27441f9823ceSRichard Henderson size - 8, mmu_idx, ra, mr, mr_offset + 8); 27451f9823ceSRichard Henderson qemu_mutex_unlock_iothread(); 27461f9823ceSRichard Henderson 27471f9823ceSRichard Henderson return ret; 27481f9823ceSRichard Henderson } 27491f9823ceSRichard Henderson 27506b8b622eSRichard Henderson /* 275159213461SRichard Henderson * Wrapper for the above. 27526b8b622eSRichard Henderson */ 2753d50ef446SAnton Johansson static uint64_t do_st_leN(CPUState *cpu, MMULookupPageData *p, 27545b36f268SRichard Henderson uint64_t val_le, int mmu_idx, 27555b36f268SRichard Henderson MemOp mop, uintptr_t ra) 275659213461SRichard Henderson { 27575b36f268SRichard Henderson MemOp atom; 27585b36f268SRichard Henderson unsigned tmp, half_size; 27595b36f268SRichard Henderson 276059213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2761d50ef446SAnton Johansson return do_st_mmio_leN(cpu, p->full, val_le, p->addr, 27621966855eSRichard Henderson p->size, mmu_idx, ra); 276359213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 276459213461SRichard Henderson return val_le >> (p->size * 8); 27655b36f268SRichard Henderson } 27665b36f268SRichard Henderson 27675b36f268SRichard Henderson /* 27685b36f268SRichard Henderson * It is a given that we cross a page and therefore there is no atomicity 27695b36f268SRichard Henderson * for the store as a whole, but subobjects may need attention. 27705b36f268SRichard Henderson */ 27715b36f268SRichard Henderson atom = mop & MO_ATOM_MASK; 27725b36f268SRichard Henderson switch (atom) { 27735b36f268SRichard Henderson case MO_ATOM_SUBALIGN: 27745b36f268SRichard Henderson return store_parts_leN(p->haddr, p->size, val_le); 27755b36f268SRichard Henderson 27765b36f268SRichard Henderson case MO_ATOM_IFALIGN_PAIR: 27775b36f268SRichard Henderson case MO_ATOM_WITHIN16_PAIR: 27785b36f268SRichard Henderson tmp = mop & MO_SIZE; 27795b36f268SRichard Henderson tmp = tmp ? tmp - 1 : 0; 27805b36f268SRichard Henderson half_size = 1 << tmp; 27815b36f268SRichard Henderson if (atom == MO_ATOM_IFALIGN_PAIR 27825b36f268SRichard Henderson ? p->size == half_size 27835b36f268SRichard Henderson : p->size >= half_size) { 27845b36f268SRichard Henderson if (!HAVE_al8_fast && p->size <= 4) { 27855b36f268SRichard Henderson return store_whole_le4(p->haddr, p->size, val_le); 27865b36f268SRichard Henderson } else if (HAVE_al8) { 27875b36f268SRichard Henderson return store_whole_le8(p->haddr, p->size, val_le); 27886b8b622eSRichard Henderson } else { 2789d50ef446SAnton Johansson cpu_loop_exit_atomic(cpu, ra); 27905b36f268SRichard Henderson } 27915b36f268SRichard Henderson } 27925b36f268SRichard Henderson /* fall through */ 27935b36f268SRichard Henderson 27945b36f268SRichard Henderson case MO_ATOM_IFALIGN: 27955b36f268SRichard Henderson case MO_ATOM_WITHIN16: 27965b36f268SRichard Henderson case MO_ATOM_NONE: 27975b36f268SRichard Henderson return store_bytes_leN(p->haddr, p->size, val_le); 27985b36f268SRichard Henderson 27995b36f268SRichard Henderson default: 28005b36f268SRichard Henderson g_assert_not_reached(); 28016b8b622eSRichard Henderson } 28026b8b622eSRichard Henderson } 28036b8b622eSRichard Henderson 280435c653c4SRichard Henderson /* 280535c653c4SRichard Henderson * Wrapper for the above, for 8 < size < 16. 280635c653c4SRichard Henderson */ 2807d50ef446SAnton Johansson static uint64_t do_st16_leN(CPUState *cpu, MMULookupPageData *p, 280835c653c4SRichard Henderson Int128 val_le, int mmu_idx, 280935c653c4SRichard Henderson MemOp mop, uintptr_t ra) 281035c653c4SRichard Henderson { 281135c653c4SRichard Henderson int size = p->size; 281235c653c4SRichard Henderson MemOp atom; 281335c653c4SRichard Henderson 281435c653c4SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2815d50ef446SAnton Johansson return do_st16_mmio_leN(cpu, p->full, val_le, p->addr, 28161f9823ceSRichard Henderson size, mmu_idx, ra); 281735c653c4SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 281835c653c4SRichard Henderson return int128_gethi(val_le) >> ((size - 8) * 8); 281935c653c4SRichard Henderson } 282035c653c4SRichard Henderson 282135c653c4SRichard Henderson /* 282235c653c4SRichard Henderson * It is a given that we cross a page and therefore there is no atomicity 282335c653c4SRichard Henderson * for the store as a whole, but subobjects may need attention. 282435c653c4SRichard Henderson */ 282535c653c4SRichard Henderson atom = mop & MO_ATOM_MASK; 282635c653c4SRichard Henderson switch (atom) { 282735c653c4SRichard Henderson case MO_ATOM_SUBALIGN: 282835c653c4SRichard Henderson store_parts_leN(p->haddr, 8, int128_getlo(val_le)); 282935c653c4SRichard Henderson return store_parts_leN(p->haddr + 8, p->size - 8, 283035c653c4SRichard Henderson int128_gethi(val_le)); 283135c653c4SRichard Henderson 283235c653c4SRichard Henderson case MO_ATOM_WITHIN16_PAIR: 283335c653c4SRichard Henderson /* Since size > 8, this is the half that must be atomic. */ 28348dc24ff4SRichard Henderson if (!HAVE_ATOMIC128_RW) { 2835d50ef446SAnton Johansson cpu_loop_exit_atomic(cpu, ra); 283635c653c4SRichard Henderson } 283735c653c4SRichard Henderson return store_whole_le16(p->haddr, p->size, val_le); 283835c653c4SRichard Henderson 283935c653c4SRichard Henderson case MO_ATOM_IFALIGN_PAIR: 284035c653c4SRichard Henderson /* 284135c653c4SRichard Henderson * Since size > 8, both halves are misaligned, 284235c653c4SRichard Henderson * and so neither is atomic. 284335c653c4SRichard Henderson */ 284435c653c4SRichard Henderson case MO_ATOM_IFALIGN: 28452be6a486SRichard Henderson case MO_ATOM_WITHIN16: 284635c653c4SRichard Henderson case MO_ATOM_NONE: 284735c653c4SRichard Henderson stq_le_p(p->haddr, int128_getlo(val_le)); 284835c653c4SRichard Henderson return store_bytes_leN(p->haddr + 8, p->size - 8, 284935c653c4SRichard Henderson int128_gethi(val_le)); 285035c653c4SRichard Henderson 285135c653c4SRichard Henderson default: 285235c653c4SRichard Henderson g_assert_not_reached(); 285335c653c4SRichard Henderson } 285435c653c4SRichard Henderson } 285535c653c4SRichard Henderson 2856d50ef446SAnton Johansson static void do_st_1(CPUState *cpu, MMULookupPageData *p, uint8_t val, 285759213461SRichard Henderson int mmu_idx, uintptr_t ra) 2858eed56642SAlex Bennée { 285959213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2860d50ef446SAnton Johansson do_st_mmio_leN(cpu, p->full, val, p->addr, 1, mmu_idx, ra); 286159213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 286259213461SRichard Henderson /* nothing */ 28635b87b3e6SRichard Henderson } else { 286459213461SRichard Henderson *(uint8_t *)p->haddr = val; 28655b87b3e6SRichard Henderson } 2866eed56642SAlex Bennée } 2867eed56642SAlex Bennée 2868d50ef446SAnton Johansson static void do_st_2(CPUState *cpu, MMULookupPageData *p, uint16_t val, 286959213461SRichard Henderson int mmu_idx, MemOp memop, uintptr_t ra) 2870eed56642SAlex Bennée { 287159213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2872f7eaf9d7SRichard Henderson if ((memop & MO_BSWAP) != MO_LE) { 2873f7eaf9d7SRichard Henderson val = bswap16(val); 2874f7eaf9d7SRichard Henderson } 2875d50ef446SAnton Johansson do_st_mmio_leN(cpu, p->full, val, p->addr, 2, mmu_idx, ra); 287659213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 287759213461SRichard Henderson /* nothing */ 287859213461SRichard Henderson } else { 287959213461SRichard Henderson /* Swap to host endian if necessary, then store. */ 288059213461SRichard Henderson if (memop & MO_BSWAP) { 288159213461SRichard Henderson val = bswap16(val); 288259213461SRichard Henderson } 2883*73fda56fSAnton Johansson store_atom_2(cpu, ra, p->haddr, memop, val); 288459213461SRichard Henderson } 288559213461SRichard Henderson } 288659213461SRichard Henderson 2887d50ef446SAnton Johansson static void do_st_4(CPUState *cpu, MMULookupPageData *p, uint32_t val, 288859213461SRichard Henderson int mmu_idx, MemOp memop, uintptr_t ra) 288959213461SRichard Henderson { 289059213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2891f7eaf9d7SRichard Henderson if ((memop & MO_BSWAP) != MO_LE) { 2892f7eaf9d7SRichard Henderson val = bswap32(val); 2893f7eaf9d7SRichard Henderson } 2894d50ef446SAnton Johansson do_st_mmio_leN(cpu, p->full, val, p->addr, 4, mmu_idx, ra); 289559213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 289659213461SRichard Henderson /* nothing */ 289759213461SRichard Henderson } else { 289859213461SRichard Henderson /* Swap to host endian if necessary, then store. */ 289959213461SRichard Henderson if (memop & MO_BSWAP) { 290059213461SRichard Henderson val = bswap32(val); 290159213461SRichard Henderson } 2902*73fda56fSAnton Johansson store_atom_4(cpu, ra, p->haddr, memop, val); 290359213461SRichard Henderson } 290459213461SRichard Henderson } 290559213461SRichard Henderson 2906d50ef446SAnton Johansson static void do_st_8(CPUState *cpu, MMULookupPageData *p, uint64_t val, 290759213461SRichard Henderson int mmu_idx, MemOp memop, uintptr_t ra) 290859213461SRichard Henderson { 290959213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2910f7eaf9d7SRichard Henderson if ((memop & MO_BSWAP) != MO_LE) { 2911f7eaf9d7SRichard Henderson val = bswap64(val); 2912f7eaf9d7SRichard Henderson } 2913d50ef446SAnton Johansson do_st_mmio_leN(cpu, p->full, val, p->addr, 8, mmu_idx, ra); 291459213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 291559213461SRichard Henderson /* nothing */ 291659213461SRichard Henderson } else { 291759213461SRichard Henderson /* Swap to host endian if necessary, then store. */ 291859213461SRichard Henderson if (memop & MO_BSWAP) { 291959213461SRichard Henderson val = bswap64(val); 292059213461SRichard Henderson } 2921*73fda56fSAnton Johansson store_atom_8(cpu, ra, p->haddr, memop, val); 292259213461SRichard Henderson } 2923eed56642SAlex Bennée } 2924eed56642SAlex Bennée 292524e46e6cSRichard Henderson void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val, 292659213461SRichard Henderson MemOpIdx oi, uintptr_t ra) 2927f83bcecbSRichard Henderson { 292859213461SRichard Henderson MMULookupLocals l; 292959213461SRichard Henderson bool crosspage; 293059213461SRichard Henderson 29310cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8); 2932f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 2933d50ef446SAnton Johansson crosspage = mmu_lookup(env_cpu(env), addr, oi, ra, MMU_DATA_STORE, &l); 293459213461SRichard Henderson tcg_debug_assert(!crosspage); 293559213461SRichard Henderson 2936d50ef446SAnton Johansson do_st_1(env_cpu(env), &l.page[0], val, l.mmu_idx, ra); 2937f83bcecbSRichard Henderson } 2938f83bcecbSRichard Henderson 2939d50ef446SAnton Johansson static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val, 294059213461SRichard Henderson MemOpIdx oi, uintptr_t ra) 2941f83bcecbSRichard Henderson { 294259213461SRichard Henderson MMULookupLocals l; 294359213461SRichard Henderson bool crosspage; 294459213461SRichard Henderson uint8_t a, b; 294559213461SRichard Henderson 2946f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 2947d50ef446SAnton Johansson crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); 294859213461SRichard Henderson if (likely(!crosspage)) { 2949d50ef446SAnton Johansson do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); 295059213461SRichard Henderson return; 295159213461SRichard Henderson } 295259213461SRichard Henderson 295359213461SRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 295459213461SRichard Henderson a = val, b = val >> 8; 295559213461SRichard Henderson } else { 295659213461SRichard Henderson b = val, a = val >> 8; 295759213461SRichard Henderson } 2958d50ef446SAnton Johansson do_st_1(cpu, &l.page[0], a, l.mmu_idx, ra); 2959d50ef446SAnton Johansson do_st_1(cpu, &l.page[1], b, l.mmu_idx, ra); 2960f83bcecbSRichard Henderson } 2961f83bcecbSRichard Henderson 296224e46e6cSRichard Henderson void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val, 29639002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2964eed56642SAlex Bennée { 29650cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); 2966d50ef446SAnton Johansson do_st2_mmu(env_cpu(env), addr, val, oi, retaddr); 2967f83bcecbSRichard Henderson } 2968f83bcecbSRichard Henderson 2969d50ef446SAnton Johansson static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val, 297059213461SRichard Henderson MemOpIdx oi, uintptr_t ra) 2971f83bcecbSRichard Henderson { 297259213461SRichard Henderson MMULookupLocals l; 297359213461SRichard Henderson bool crosspage; 297459213461SRichard Henderson 2975f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 2976d50ef446SAnton Johansson crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); 297759213461SRichard Henderson if (likely(!crosspage)) { 2978d50ef446SAnton Johansson do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); 297959213461SRichard Henderson return; 298059213461SRichard Henderson } 298159213461SRichard Henderson 298259213461SRichard Henderson /* Swap to little endian for simplicity, then store by bytes. */ 298359213461SRichard Henderson if ((l.memop & MO_BSWAP) != MO_LE) { 298459213461SRichard Henderson val = bswap32(val); 298559213461SRichard Henderson } 2986d50ef446SAnton Johansson val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); 2987d50ef446SAnton Johansson (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra); 2988eed56642SAlex Bennée } 2989eed56642SAlex Bennée 299024e46e6cSRichard Henderson void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val, 29919002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2992eed56642SAlex Bennée { 29930cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); 2994d50ef446SAnton Johansson do_st4_mmu(env_cpu(env), addr, val, oi, retaddr); 299559213461SRichard Henderson } 299659213461SRichard Henderson 2997d50ef446SAnton Johansson static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val, 299859213461SRichard Henderson MemOpIdx oi, uintptr_t ra) 299959213461SRichard Henderson { 300059213461SRichard Henderson MMULookupLocals l; 300159213461SRichard Henderson bool crosspage; 300259213461SRichard Henderson 3003f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 3004d50ef446SAnton Johansson crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); 300559213461SRichard Henderson if (likely(!crosspage)) { 3006d50ef446SAnton Johansson do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); 300759213461SRichard Henderson return; 300859213461SRichard Henderson } 300959213461SRichard Henderson 301059213461SRichard Henderson /* Swap to little endian for simplicity, then store by bytes. */ 301159213461SRichard Henderson if ((l.memop & MO_BSWAP) != MO_LE) { 301259213461SRichard Henderson val = bswap64(val); 301359213461SRichard Henderson } 3014d50ef446SAnton Johansson val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); 3015d50ef446SAnton Johansson (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra); 3016eed56642SAlex Bennée } 3017eed56642SAlex Bennée 301824e46e6cSRichard Henderson void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val, 30199002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 3020eed56642SAlex Bennée { 30210cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); 3022d50ef446SAnton Johansson do_st8_mmu(env_cpu(env), addr, val, oi, retaddr); 3023eed56642SAlex Bennée } 3024d9bb58e5SYang Zhong 3025d50ef446SAnton Johansson static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val, 302635c653c4SRichard Henderson MemOpIdx oi, uintptr_t ra) 302735c653c4SRichard Henderson { 302835c653c4SRichard Henderson MMULookupLocals l; 302935c653c4SRichard Henderson bool crosspage; 303035c653c4SRichard Henderson uint64_t a, b; 303135c653c4SRichard Henderson int first; 303235c653c4SRichard Henderson 3033f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 3034d50ef446SAnton Johansson crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); 303535c653c4SRichard Henderson if (likely(!crosspage)) { 3036f7eaf9d7SRichard Henderson if (unlikely(l.page[0].flags & TLB_MMIO)) { 3037f7eaf9d7SRichard Henderson if ((l.memop & MO_BSWAP) != MO_LE) { 3038f7eaf9d7SRichard Henderson val = bswap128(val); 3039f7eaf9d7SRichard Henderson } 3040d50ef446SAnton Johansson do_st16_mmio_leN(cpu, l.page[0].full, val, addr, 16, l.mmu_idx, ra); 3041f7eaf9d7SRichard Henderson } else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) { 3042f7eaf9d7SRichard Henderson /* nothing */ 3043f7eaf9d7SRichard Henderson } else { 304435c653c4SRichard Henderson /* Swap to host endian if necessary, then store. */ 304535c653c4SRichard Henderson if (l.memop & MO_BSWAP) { 304635c653c4SRichard Henderson val = bswap128(val); 304735c653c4SRichard Henderson } 3048*73fda56fSAnton Johansson store_atom_16(cpu, ra, l.page[0].haddr, l.memop, val); 304935c653c4SRichard Henderson } 305035c653c4SRichard Henderson return; 305135c653c4SRichard Henderson } 305235c653c4SRichard Henderson 305335c653c4SRichard Henderson first = l.page[0].size; 305435c653c4SRichard Henderson if (first == 8) { 305535c653c4SRichard Henderson MemOp mop8 = (l.memop & ~(MO_SIZE | MO_BSWAP)) | MO_64; 305635c653c4SRichard Henderson 305735c653c4SRichard Henderson if (l.memop & MO_BSWAP) { 305835c653c4SRichard Henderson val = bswap128(val); 305935c653c4SRichard Henderson } 306035c653c4SRichard Henderson if (HOST_BIG_ENDIAN) { 306135c653c4SRichard Henderson b = int128_getlo(val), a = int128_gethi(val); 306235c653c4SRichard Henderson } else { 306335c653c4SRichard Henderson a = int128_getlo(val), b = int128_gethi(val); 306435c653c4SRichard Henderson } 3065d50ef446SAnton Johansson do_st_8(cpu, &l.page[0], a, l.mmu_idx, mop8, ra); 3066d50ef446SAnton Johansson do_st_8(cpu, &l.page[1], b, l.mmu_idx, mop8, ra); 306735c653c4SRichard Henderson return; 306835c653c4SRichard Henderson } 306935c653c4SRichard Henderson 307035c653c4SRichard Henderson if ((l.memop & MO_BSWAP) != MO_LE) { 307135c653c4SRichard Henderson val = bswap128(val); 307235c653c4SRichard Henderson } 307335c653c4SRichard Henderson if (first < 8) { 3074d50ef446SAnton Johansson do_st_leN(cpu, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra); 307535c653c4SRichard Henderson val = int128_urshift(val, first * 8); 3076d50ef446SAnton Johansson do_st16_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra); 307735c653c4SRichard Henderson } else { 3078d50ef446SAnton Johansson b = do_st16_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); 3079d50ef446SAnton Johansson do_st_leN(cpu, &l.page[1], b, l.mmu_idx, l.memop, ra); 308035c653c4SRichard Henderson } 308135c653c4SRichard Henderson } 308235c653c4SRichard Henderson 308324e46e6cSRichard Henderson void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val, 308435c653c4SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 308535c653c4SRichard Henderson { 308635c653c4SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); 3087d50ef446SAnton Johansson do_st16_mmu(env_cpu(env), addr, val, oi, retaddr); 308835c653c4SRichard Henderson } 308935c653c4SRichard Henderson 3090e570597aSRichard Henderson void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi) 309135c653c4SRichard Henderson { 309235c653c4SRichard Henderson helper_st16_mmu(env, addr, val, oi, GETPC()); 309335c653c4SRichard Henderson } 309435c653c4SRichard Henderson 3095d03f1408SRichard Henderson /* 3096d03f1408SRichard Henderson * Store Helpers for cpu_ldst.h 3097d03f1408SRichard Henderson */ 3098d03f1408SRichard Henderson 309959213461SRichard Henderson static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi) 3100d03f1408SRichard Henderson { 310137aff087SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 3102d03f1408SRichard Henderson } 3103d03f1408SRichard Henderson 3104022b9bceSAnton Johansson void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, 3105f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 3106d03f1408SRichard Henderson { 31070cadc1edSRichard Henderson helper_stb_mmu(env, addr, val, oi, retaddr); 310859213461SRichard Henderson plugin_store_cb(env, addr, oi); 3109d03f1408SRichard Henderson } 3110d03f1408SRichard Henderson 3111022b9bceSAnton Johansson void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, 3112f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 3113d03f1408SRichard Henderson { 3114fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); 3115d50ef446SAnton Johansson do_st2_mmu(env_cpu(env), addr, val, oi, retaddr); 311659213461SRichard Henderson plugin_store_cb(env, addr, oi); 3117d03f1408SRichard Henderson } 3118d03f1408SRichard Henderson 3119022b9bceSAnton Johansson void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, 3120f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 3121d03f1408SRichard Henderson { 3122fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); 3123d50ef446SAnton Johansson do_st4_mmu(env_cpu(env), addr, val, oi, retaddr); 312459213461SRichard Henderson plugin_store_cb(env, addr, oi); 3125d03f1408SRichard Henderson } 3126d03f1408SRichard Henderson 3127022b9bceSAnton Johansson void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, 3128f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 3129d03f1408SRichard Henderson { 3130fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); 3131d50ef446SAnton Johansson do_st8_mmu(env_cpu(env), addr, val, oi, retaddr); 313259213461SRichard Henderson plugin_store_cb(env, addr, oi); 3133b9e60257SRichard Henderson } 3134b9e60257SRichard Henderson 3135022b9bceSAnton Johansson void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val, 3136f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 3137b9e60257SRichard Henderson { 3138fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); 3139d50ef446SAnton Johansson do_st16_mmu(env_cpu(env), addr, val, oi, retaddr); 314035c653c4SRichard Henderson plugin_store_cb(env, addr, oi); 3141cb48f365SRichard Henderson } 3142cb48f365SRichard Henderson 3143f83bcecbSRichard Henderson #include "ldst_common.c.inc" 3144cfe04a4bSRichard Henderson 3145be9568b4SRichard Henderson /* 3146be9568b4SRichard Henderson * First set of functions passes in OI and RETADDR. 3147be9568b4SRichard Henderson * This makes them callable from other helpers. 3148be9568b4SRichard Henderson */ 3149d9bb58e5SYang Zhong 3150d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \ 3151be9568b4SRichard Henderson glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) 3152a754f7f3SRichard Henderson 3153707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP 3154d9bb58e5SYang Zhong 3155139c1837SPaolo Bonzini #include "atomic_common.c.inc" 3156d9bb58e5SYang Zhong 3157d9bb58e5SYang Zhong #define DATA_SIZE 1 3158d9bb58e5SYang Zhong #include "atomic_template.h" 3159d9bb58e5SYang Zhong 3160d9bb58e5SYang Zhong #define DATA_SIZE 2 3161d9bb58e5SYang Zhong #include "atomic_template.h" 3162d9bb58e5SYang Zhong 3163d9bb58e5SYang Zhong #define DATA_SIZE 4 3164d9bb58e5SYang Zhong #include "atomic_template.h" 3165d9bb58e5SYang Zhong 3166d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 3167d9bb58e5SYang Zhong #define DATA_SIZE 8 3168d9bb58e5SYang Zhong #include "atomic_template.h" 3169d9bb58e5SYang Zhong #endif 3170d9bb58e5SYang Zhong 317176f9d6adSRichard Henderson #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128 3172d9bb58e5SYang Zhong #define DATA_SIZE 16 3173d9bb58e5SYang Zhong #include "atomic_template.h" 3174d9bb58e5SYang Zhong #endif 3175d9bb58e5SYang Zhong 3176d9bb58e5SYang Zhong /* Code access functions. */ 3177d9bb58e5SYang Zhong 3178fc4120a3SRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) 3179eed56642SAlex Bennée { 31809002ffcbSRichard Henderson MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); 3181d50ef446SAnton Johansson return do_ld1_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH); 31824cef72d0SAlex Bennée } 31834cef72d0SAlex Bennée 3184fc4120a3SRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) 31852dd92606SRichard Henderson { 31869002ffcbSRichard Henderson MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); 3187d50ef446SAnton Johansson return do_ld2_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH); 31882dd92606SRichard Henderson } 31892dd92606SRichard Henderson 3190fc4120a3SRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) 31914cef72d0SAlex Bennée { 31929002ffcbSRichard Henderson MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); 3193d50ef446SAnton Johansson return do_ld4_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH); 3194eed56642SAlex Bennée } 3195d9bb58e5SYang Zhong 3196fc4120a3SRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) 3197eed56642SAlex Bennée { 3198fc313c64SFrédéric Pétrot MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true)); 3199d50ef446SAnton Johansson return do_ld8_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH); 3200eed56642SAlex Bennée } 320128990626SRichard Henderson 320228990626SRichard Henderson uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr, 320328990626SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 320428990626SRichard Henderson { 3205d50ef446SAnton Johansson return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); 320628990626SRichard Henderson } 320728990626SRichard Henderson 320828990626SRichard Henderson uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr, 320928990626SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 321028990626SRichard Henderson { 3211d50ef446SAnton Johansson return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); 321228990626SRichard Henderson } 321328990626SRichard Henderson 321428990626SRichard Henderson uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr, 321528990626SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 321628990626SRichard Henderson { 3217d50ef446SAnton Johansson return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); 321828990626SRichard Henderson } 321928990626SRichard Henderson 322028990626SRichard Henderson uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, 322128990626SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 322228990626SRichard Henderson { 3223d50ef446SAnton Johansson return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); 322428990626SRichard Henderson } 3225