1d9bb58e5SYang Zhong /* 2d9bb58e5SYang Zhong * Common CPU TLB handling 3d9bb58e5SYang Zhong * 4d9bb58e5SYang Zhong * Copyright (c) 2003 Fabrice Bellard 5d9bb58e5SYang Zhong * 6d9bb58e5SYang Zhong * This library is free software; you can redistribute it and/or 7d9bb58e5SYang Zhong * modify it under the terms of the GNU Lesser General Public 8d9bb58e5SYang Zhong * License as published by the Free Software Foundation; either 9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version. 10d9bb58e5SYang Zhong * 11d9bb58e5SYang Zhong * This library is distributed in the hope that it will be useful, 12d9bb58e5SYang Zhong * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d9bb58e5SYang Zhong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14d9bb58e5SYang Zhong * Lesser General Public License for more details. 15d9bb58e5SYang Zhong * 16d9bb58e5SYang Zhong * You should have received a copy of the GNU Lesser General Public 17d9bb58e5SYang Zhong * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18d9bb58e5SYang Zhong */ 19d9bb58e5SYang Zhong 20d9bb58e5SYang Zhong #include "qemu/osdep.h" 21d9bb58e5SYang Zhong #include "qemu/main-loop.h" 2278271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h" 23d9bb58e5SYang Zhong #include "exec/exec-all.h" 24d9bb58e5SYang Zhong #include "exec/memory.h" 25d9bb58e5SYang Zhong #include "exec/cpu_ldst.h" 26d9bb58e5SYang Zhong #include "exec/cputlb.h" 27d9bb58e5SYang Zhong #include "exec/memory-internal.h" 28d9bb58e5SYang Zhong #include "exec/ram_addr.h" 29d9bb58e5SYang Zhong #include "tcg/tcg.h" 30d9bb58e5SYang Zhong #include "qemu/error-report.h" 31d9bb58e5SYang Zhong #include "exec/log.h" 32d9bb58e5SYang Zhong #include "exec/helper-proto.h" 33d9bb58e5SYang Zhong #include "qemu/atomic.h" 34e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 353b9bd3f4SPaolo Bonzini #include "exec/translate-all.h" 3651807763SPhilippe Mathieu-Daudé #include "trace.h" 37e5ceadffSPhilippe Mathieu-Daudé #include "tb-hash.h" 3865269192SPhilippe Mathieu-Daudé #include "internal.h" 39235537faSAlex Bennée #ifdef CONFIG_PLUGIN 40235537faSAlex Bennée #include "qemu/plugin-memory.h" 41235537faSAlex Bennée #endif 42d2ba8026SRichard Henderson #include "tcg/tcg-ldst.h" 4335c653c4SRichard Henderson #include "exec/helper-proto.h" 44d9bb58e5SYang Zhong 45d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 46d9bb58e5SYang Zhong /* #define DEBUG_TLB */ 47d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */ 48d9bb58e5SYang Zhong 49d9bb58e5SYang Zhong #ifdef DEBUG_TLB 50d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1 51d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG 52d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 1 53d9bb58e5SYang Zhong # else 54d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 55d9bb58e5SYang Zhong # endif 56d9bb58e5SYang Zhong #else 57d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0 58d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 59d9bb58e5SYang Zhong #endif 60d9bb58e5SYang Zhong 61d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \ 62d9bb58e5SYang Zhong if (DEBUG_TLB_LOG_GATE) { \ 63d9bb58e5SYang Zhong qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 64d9bb58e5SYang Zhong ## __VA_ARGS__); \ 65d9bb58e5SYang Zhong } else if (DEBUG_TLB_GATE) { \ 66d9bb58e5SYang Zhong fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 67d9bb58e5SYang Zhong } \ 68d9bb58e5SYang Zhong } while (0) 69d9bb58e5SYang Zhong 70ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do { \ 71d9bb58e5SYang Zhong if (DEBUG_TLB_GATE) { \ 72ea9025cbSEmilio G. Cota g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ 73d9bb58e5SYang Zhong } \ 74d9bb58e5SYang Zhong } while (0) 75d9bb58e5SYang Zhong 76d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a 77d9bb58e5SYang Zhong * target_ulong even on 32 bit builds */ 78d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); 79d9bb58e5SYang Zhong 80d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 81d9bb58e5SYang Zhong */ 82d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 83d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 84d9bb58e5SYang Zhong 85722a1c1eSRichard Henderson static inline size_t tlb_n_entries(CPUTLBDescFast *fast) 867a1efe1bSRichard Henderson { 87722a1c1eSRichard Henderson return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; 887a1efe1bSRichard Henderson } 897a1efe1bSRichard Henderson 90722a1c1eSRichard Henderson static inline size_t sizeof_tlb(CPUTLBDescFast *fast) 9186e1eff8SEmilio G. Cota { 92722a1c1eSRichard Henderson return fast->mask + (1 << CPU_TLB_ENTRY_BITS); 9386e1eff8SEmilio G. Cota } 9486e1eff8SEmilio G. Cota 9579e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, 9686e1eff8SEmilio G. Cota size_t max_entries) 9786e1eff8SEmilio G. Cota { 9879e42085SRichard Henderson desc->window_begin_ns = ns; 9979e42085SRichard Henderson desc->window_max_entries = max_entries; 10086e1eff8SEmilio G. Cota } 10186e1eff8SEmilio G. Cota 1020f4abea8SRichard Henderson static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) 1030f4abea8SRichard Henderson { 104a976a99aSRichard Henderson CPUJumpCache *jc = cpu->tb_jmp_cache; 10599ab4d50SEric Auger int i, i0; 1060f4abea8SRichard Henderson 10799ab4d50SEric Auger if (unlikely(!jc)) { 10899ab4d50SEric Auger return; 10999ab4d50SEric Auger } 11099ab4d50SEric Auger 11199ab4d50SEric Auger i0 = tb_jmp_cache_hash_page(page_addr); 1120f4abea8SRichard Henderson for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { 113a976a99aSRichard Henderson qatomic_set(&jc->array[i0 + i].tb, NULL); 1140f4abea8SRichard Henderson } 1150f4abea8SRichard Henderson } 1160f4abea8SRichard Henderson 11786e1eff8SEmilio G. Cota /** 11886e1eff8SEmilio G. Cota * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary 11971ccd47bSRichard Henderson * @desc: The CPUTLBDesc portion of the TLB 12071ccd47bSRichard Henderson * @fast: The CPUTLBDescFast portion of the same TLB 12186e1eff8SEmilio G. Cota * 12286e1eff8SEmilio G. Cota * Called with tlb_lock_held. 12386e1eff8SEmilio G. Cota * 12486e1eff8SEmilio G. Cota * We have two main constraints when resizing a TLB: (1) we only resize it 12586e1eff8SEmilio G. Cota * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing 12686e1eff8SEmilio G. Cota * the array or unnecessarily flushing it), which means we do not control how 12786e1eff8SEmilio G. Cota * frequently the resizing can occur; (2) we don't have access to the guest's 12886e1eff8SEmilio G. Cota * future scheduling decisions, and therefore have to decide the magnitude of 12986e1eff8SEmilio G. Cota * the resize based on past observations. 13086e1eff8SEmilio G. Cota * 13186e1eff8SEmilio G. Cota * In general, a memory-hungry process can benefit greatly from an appropriately 13286e1eff8SEmilio G. Cota * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that 13386e1eff8SEmilio G. Cota * we just have to make the TLB as large as possible; while an oversized TLB 13486e1eff8SEmilio G. Cota * results in minimal TLB miss rates, it also takes longer to be flushed 13586e1eff8SEmilio G. Cota * (flushes can be _very_ frequent), and the reduced locality can also hurt 13686e1eff8SEmilio G. Cota * performance. 13786e1eff8SEmilio G. Cota * 13886e1eff8SEmilio G. Cota * To achieve near-optimal performance for all kinds of workloads, we: 13986e1eff8SEmilio G. Cota * 14086e1eff8SEmilio G. Cota * 1. Aggressively increase the size of the TLB when the use rate of the 14186e1eff8SEmilio G. Cota * TLB being flushed is high, since it is likely that in the near future this 14286e1eff8SEmilio G. Cota * memory-hungry process will execute again, and its memory hungriness will 14386e1eff8SEmilio G. Cota * probably be similar. 14486e1eff8SEmilio G. Cota * 14586e1eff8SEmilio G. Cota * 2. Slowly reduce the size of the TLB as the use rate declines over a 14686e1eff8SEmilio G. Cota * reasonably large time window. The rationale is that if in such a time window 14786e1eff8SEmilio G. Cota * we have not observed a high TLB use rate, it is likely that we won't observe 14886e1eff8SEmilio G. Cota * it in the near future. In that case, once a time window expires we downsize 14986e1eff8SEmilio G. Cota * the TLB to match the maximum use rate observed in the window. 15086e1eff8SEmilio G. Cota * 15186e1eff8SEmilio G. Cota * 3. Try to keep the maximum use rate in a time window in the 30-70% range, 15286e1eff8SEmilio G. Cota * since in that range performance is likely near-optimal. Recall that the TLB 15386e1eff8SEmilio G. Cota * is direct mapped, so we want the use rate to be low (or at least not too 15486e1eff8SEmilio G. Cota * high), since otherwise we are likely to have a significant amount of 15586e1eff8SEmilio G. Cota * conflict misses. 15686e1eff8SEmilio G. Cota */ 1573c3959f2SRichard Henderson static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, 1583c3959f2SRichard Henderson int64_t now) 15986e1eff8SEmilio G. Cota { 16071ccd47bSRichard Henderson size_t old_size = tlb_n_entries(fast); 16186e1eff8SEmilio G. Cota size_t rate; 16286e1eff8SEmilio G. Cota size_t new_size = old_size; 16386e1eff8SEmilio G. Cota int64_t window_len_ms = 100; 16486e1eff8SEmilio G. Cota int64_t window_len_ns = window_len_ms * 1000 * 1000; 16579e42085SRichard Henderson bool window_expired = now > desc->window_begin_ns + window_len_ns; 16686e1eff8SEmilio G. Cota 16779e42085SRichard Henderson if (desc->n_used_entries > desc->window_max_entries) { 16879e42085SRichard Henderson desc->window_max_entries = desc->n_used_entries; 16986e1eff8SEmilio G. Cota } 17079e42085SRichard Henderson rate = desc->window_max_entries * 100 / old_size; 17186e1eff8SEmilio G. Cota 17286e1eff8SEmilio G. Cota if (rate > 70) { 17386e1eff8SEmilio G. Cota new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); 17486e1eff8SEmilio G. Cota } else if (rate < 30 && window_expired) { 17579e42085SRichard Henderson size_t ceil = pow2ceil(desc->window_max_entries); 17679e42085SRichard Henderson size_t expected_rate = desc->window_max_entries * 100 / ceil; 17786e1eff8SEmilio G. Cota 17886e1eff8SEmilio G. Cota /* 17986e1eff8SEmilio G. Cota * Avoid undersizing when the max number of entries seen is just below 18086e1eff8SEmilio G. Cota * a pow2. For instance, if max_entries == 1025, the expected use rate 18186e1eff8SEmilio G. Cota * would be 1025/2048==50%. However, if max_entries == 1023, we'd get 18286e1eff8SEmilio G. Cota * 1023/1024==99.9% use rate, so we'd likely end up doubling the size 18386e1eff8SEmilio G. Cota * later. Thus, make sure that the expected use rate remains below 70%. 18486e1eff8SEmilio G. Cota * (and since we double the size, that means the lowest rate we'd 18586e1eff8SEmilio G. Cota * expect to get is 35%, which is still in the 30-70% range where 18686e1eff8SEmilio G. Cota * we consider that the size is appropriate.) 18786e1eff8SEmilio G. Cota */ 18886e1eff8SEmilio G. Cota if (expected_rate > 70) { 18986e1eff8SEmilio G. Cota ceil *= 2; 19086e1eff8SEmilio G. Cota } 19186e1eff8SEmilio G. Cota new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); 19286e1eff8SEmilio G. Cota } 19386e1eff8SEmilio G. Cota 19486e1eff8SEmilio G. Cota if (new_size == old_size) { 19586e1eff8SEmilio G. Cota if (window_expired) { 19679e42085SRichard Henderson tlb_window_reset(desc, now, desc->n_used_entries); 19786e1eff8SEmilio G. Cota } 19886e1eff8SEmilio G. Cota return; 19986e1eff8SEmilio G. Cota } 20086e1eff8SEmilio G. Cota 20171ccd47bSRichard Henderson g_free(fast->table); 20225d3ec58SRichard Henderson g_free(desc->fulltlb); 20386e1eff8SEmilio G. Cota 20479e42085SRichard Henderson tlb_window_reset(desc, now, 0); 20586e1eff8SEmilio G. Cota /* desc->n_used_entries is cleared by the caller */ 20671ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 20771ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 20825d3ec58SRichard Henderson desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); 20971ccd47bSRichard Henderson 21086e1eff8SEmilio G. Cota /* 21186e1eff8SEmilio G. Cota * If the allocations fail, try smaller sizes. We just freed some 21286e1eff8SEmilio G. Cota * memory, so going back to half of new_size has a good chance of working. 21386e1eff8SEmilio G. Cota * Increased memory pressure elsewhere in the system might cause the 21486e1eff8SEmilio G. Cota * allocations to fail though, so we progressively reduce the allocation 21586e1eff8SEmilio G. Cota * size, aborting if we cannot even allocate the smallest TLB we support. 21686e1eff8SEmilio G. Cota */ 21725d3ec58SRichard Henderson while (fast->table == NULL || desc->fulltlb == NULL) { 21886e1eff8SEmilio G. Cota if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { 21986e1eff8SEmilio G. Cota error_report("%s: %s", __func__, strerror(errno)); 22086e1eff8SEmilio G. Cota abort(); 22186e1eff8SEmilio G. Cota } 22286e1eff8SEmilio G. Cota new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); 22371ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 22486e1eff8SEmilio G. Cota 22571ccd47bSRichard Henderson g_free(fast->table); 22625d3ec58SRichard Henderson g_free(desc->fulltlb); 22771ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 22825d3ec58SRichard Henderson desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); 22986e1eff8SEmilio G. Cota } 23086e1eff8SEmilio G. Cota } 23186e1eff8SEmilio G. Cota 232bbf021b0SRichard Henderson static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) 23386e1eff8SEmilio G. Cota { 2345c948e31SRichard Henderson desc->n_used_entries = 0; 2355c948e31SRichard Henderson desc->large_page_addr = -1; 2365c948e31SRichard Henderson desc->large_page_mask = -1; 2375c948e31SRichard Henderson desc->vindex = 0; 2385c948e31SRichard Henderson memset(fast->table, -1, sizeof_tlb(fast)); 2395c948e31SRichard Henderson memset(desc->vtable, -1, sizeof(desc->vtable)); 24086e1eff8SEmilio G. Cota } 24186e1eff8SEmilio G. Cota 2423c3959f2SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx, 2433c3959f2SRichard Henderson int64_t now) 244bbf021b0SRichard Henderson { 245bbf021b0SRichard Henderson CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; 246bbf021b0SRichard Henderson CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; 247bbf021b0SRichard Henderson 2483c3959f2SRichard Henderson tlb_mmu_resize_locked(desc, fast, now); 249bbf021b0SRichard Henderson tlb_mmu_flush_locked(desc, fast); 250bbf021b0SRichard Henderson } 251bbf021b0SRichard Henderson 25256e89f76SRichard Henderson static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) 25356e89f76SRichard Henderson { 25456e89f76SRichard Henderson size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; 25556e89f76SRichard Henderson 25656e89f76SRichard Henderson tlb_window_reset(desc, now, 0); 25756e89f76SRichard Henderson desc->n_used_entries = 0; 25856e89f76SRichard Henderson fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; 25956e89f76SRichard Henderson fast->table = g_new(CPUTLBEntry, n_entries); 26025d3ec58SRichard Henderson desc->fulltlb = g_new(CPUTLBEntryFull, n_entries); 2613c16304aSRichard Henderson tlb_mmu_flush_locked(desc, fast); 26256e89f76SRichard Henderson } 26356e89f76SRichard Henderson 26486e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) 26586e1eff8SEmilio G. Cota { 266a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries++; 26786e1eff8SEmilio G. Cota } 26886e1eff8SEmilio G. Cota 26986e1eff8SEmilio G. Cota static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) 27086e1eff8SEmilio G. Cota { 271a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].n_used_entries--; 27286e1eff8SEmilio G. Cota } 27386e1eff8SEmilio G. Cota 2745005e253SEmilio G. Cota void tlb_init(CPUState *cpu) 2755005e253SEmilio G. Cota { 27671aec354SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 27756e89f76SRichard Henderson int64_t now = get_clock_realtime(); 27856e89f76SRichard Henderson int i; 27971aec354SEmilio G. Cota 280a40ec84eSRichard Henderson qemu_spin_init(&env_tlb(env)->c.lock); 2813d1523ceSRichard Henderson 2823c16304aSRichard Henderson /* All tlbs are initialized flushed. */ 2833c16304aSRichard Henderson env_tlb(env)->c.dirty = 0; 28486e1eff8SEmilio G. Cota 28556e89f76SRichard Henderson for (i = 0; i < NB_MMU_MODES; i++) { 28656e89f76SRichard Henderson tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now); 28756e89f76SRichard Henderson } 2885005e253SEmilio G. Cota } 2895005e253SEmilio G. Cota 290816d9be5SEmilio G. Cota void tlb_destroy(CPUState *cpu) 291816d9be5SEmilio G. Cota { 292816d9be5SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 293816d9be5SEmilio G. Cota int i; 294816d9be5SEmilio G. Cota 295816d9be5SEmilio G. Cota qemu_spin_destroy(&env_tlb(env)->c.lock); 296816d9be5SEmilio G. Cota for (i = 0; i < NB_MMU_MODES; i++) { 297816d9be5SEmilio G. Cota CPUTLBDesc *desc = &env_tlb(env)->d[i]; 298816d9be5SEmilio G. Cota CPUTLBDescFast *fast = &env_tlb(env)->f[i]; 299816d9be5SEmilio G. Cota 300816d9be5SEmilio G. Cota g_free(fast->table); 30125d3ec58SRichard Henderson g_free(desc->fulltlb); 302816d9be5SEmilio G. Cota } 303816d9be5SEmilio G. Cota } 304816d9be5SEmilio G. Cota 305d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus 306d9bb58e5SYang Zhong * 307d9bb58e5SYang Zhong * If the wait flag is set then the src cpu's helper will be queued as 308d9bb58e5SYang Zhong * "safe" work and the loop exited creating a synchronisation point 309d9bb58e5SYang Zhong * where all queued work will be finished before execution starts 310d9bb58e5SYang Zhong * again. 311d9bb58e5SYang Zhong */ 312d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 313d9bb58e5SYang Zhong run_on_cpu_data d) 314d9bb58e5SYang Zhong { 315d9bb58e5SYang Zhong CPUState *cpu; 316d9bb58e5SYang Zhong 317d9bb58e5SYang Zhong CPU_FOREACH(cpu) { 318d9bb58e5SYang Zhong if (cpu != src) { 319d9bb58e5SYang Zhong async_run_on_cpu(cpu, fn, d); 320d9bb58e5SYang Zhong } 321d9bb58e5SYang Zhong } 322d9bb58e5SYang Zhong } 323d9bb58e5SYang Zhong 324e09de0a2SRichard Henderson void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) 32583974cf4SEmilio G. Cota { 32683974cf4SEmilio G. Cota CPUState *cpu; 327e09de0a2SRichard Henderson size_t full = 0, part = 0, elide = 0; 32883974cf4SEmilio G. Cota 32983974cf4SEmilio G. Cota CPU_FOREACH(cpu) { 33083974cf4SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 33183974cf4SEmilio G. Cota 332d73415a3SStefan Hajnoczi full += qatomic_read(&env_tlb(env)->c.full_flush_count); 333d73415a3SStefan Hajnoczi part += qatomic_read(&env_tlb(env)->c.part_flush_count); 334d73415a3SStefan Hajnoczi elide += qatomic_read(&env_tlb(env)->c.elide_flush_count); 33583974cf4SEmilio G. Cota } 336e09de0a2SRichard Henderson *pfull = full; 337e09de0a2SRichard Henderson *ppart = part; 338e09de0a2SRichard Henderson *pelide = elide; 33983974cf4SEmilio G. Cota } 340d9bb58e5SYang Zhong 341d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 342d9bb58e5SYang Zhong { 343d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 3443d1523ceSRichard Henderson uint16_t asked = data.host_int; 3453d1523ceSRichard Henderson uint16_t all_dirty, work, to_clean; 3463c3959f2SRichard Henderson int64_t now = get_clock_realtime(); 347d9bb58e5SYang Zhong 348d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 349d9bb58e5SYang Zhong 3503d1523ceSRichard Henderson tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); 351d9bb58e5SYang Zhong 352a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 35360a2ad7dSRichard Henderson 354a40ec84eSRichard Henderson all_dirty = env_tlb(env)->c.dirty; 3553d1523ceSRichard Henderson to_clean = asked & all_dirty; 3563d1523ceSRichard Henderson all_dirty &= ~to_clean; 357a40ec84eSRichard Henderson env_tlb(env)->c.dirty = all_dirty; 3583d1523ceSRichard Henderson 3593d1523ceSRichard Henderson for (work = to_clean; work != 0; work &= work - 1) { 3603d1523ceSRichard Henderson int mmu_idx = ctz32(work); 3613c3959f2SRichard Henderson tlb_flush_one_mmuidx_locked(env, mmu_idx, now); 362d9bb58e5SYang Zhong } 3633d1523ceSRichard Henderson 364a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 365d9bb58e5SYang Zhong 366a976a99aSRichard Henderson tcg_flush_jmp_cache(cpu); 36764f2674bSRichard Henderson 3683d1523ceSRichard Henderson if (to_clean == ALL_MMUIDX_BITS) { 369d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.full_flush_count, 370a40ec84eSRichard Henderson env_tlb(env)->c.full_flush_count + 1); 371e09de0a2SRichard Henderson } else { 372d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.part_flush_count, 373a40ec84eSRichard Henderson env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); 3743d1523ceSRichard Henderson if (to_clean != asked) { 375d73415a3SStefan Hajnoczi qatomic_set(&env_tlb(env)->c.elide_flush_count, 376a40ec84eSRichard Henderson env_tlb(env)->c.elide_flush_count + 3773d1523ceSRichard Henderson ctpop16(asked & ~to_clean)); 3783d1523ceSRichard Henderson } 37964f2674bSRichard Henderson } 380d9bb58e5SYang Zhong } 381d9bb58e5SYang Zhong 382d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 383d9bb58e5SYang Zhong { 384d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 385d9bb58e5SYang Zhong 38664f2674bSRichard Henderson if (cpu->created && !qemu_cpu_is_self(cpu)) { 387d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 388ab651105SRichard Henderson RUN_ON_CPU_HOST_INT(idxmap)); 389d9bb58e5SYang Zhong } else { 39060a2ad7dSRichard Henderson tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); 391d9bb58e5SYang Zhong } 392d9bb58e5SYang Zhong } 393d9bb58e5SYang Zhong 39464f2674bSRichard Henderson void tlb_flush(CPUState *cpu) 39564f2674bSRichard Henderson { 39664f2674bSRichard Henderson tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); 39764f2674bSRichard Henderson } 39864f2674bSRichard Henderson 399d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 400d9bb58e5SYang Zhong { 401d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 402d9bb58e5SYang Zhong 403d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 404d9bb58e5SYang Zhong 405d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 406d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 407d9bb58e5SYang Zhong } 408d9bb58e5SYang Zhong 40964f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu) 41064f2674bSRichard Henderson { 41164f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); 41264f2674bSRichard Henderson } 41364f2674bSRichard Henderson 41464f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) 415d9bb58e5SYang Zhong { 416d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 417d9bb58e5SYang Zhong 418d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 419d9bb58e5SYang Zhong 420d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 421d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 422d9bb58e5SYang Zhong } 423d9bb58e5SYang Zhong 42464f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu) 42564f2674bSRichard Henderson { 42664f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); 42764f2674bSRichard Henderson } 42864f2674bSRichard Henderson 4293ab6e68cSRichard Henderson static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry, 4303ab6e68cSRichard Henderson target_ulong page, target_ulong mask) 4313ab6e68cSRichard Henderson { 4323ab6e68cSRichard Henderson page &= mask; 4333ab6e68cSRichard Henderson mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK; 4343ab6e68cSRichard Henderson 4353ab6e68cSRichard Henderson return (page == (tlb_entry->addr_read & mask) || 4363ab6e68cSRichard Henderson page == (tlb_addr_write(tlb_entry) & mask) || 4373ab6e68cSRichard Henderson page == (tlb_entry->addr_code & mask)); 4383ab6e68cSRichard Henderson } 4393ab6e68cSRichard Henderson 44068fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, 44168fea038SRichard Henderson target_ulong page) 442d9bb58e5SYang Zhong { 4433ab6e68cSRichard Henderson return tlb_hit_page_mask_anyprot(tlb_entry, page, -1); 44468fea038SRichard Henderson } 44568fea038SRichard Henderson 4463cea94bbSEmilio G. Cota /** 4473cea94bbSEmilio G. Cota * tlb_entry_is_empty - return true if the entry is not in use 4483cea94bbSEmilio G. Cota * @te: pointer to CPUTLBEntry 4493cea94bbSEmilio G. Cota */ 4503cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) 4513cea94bbSEmilio G. Cota { 4523cea94bbSEmilio G. Cota return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; 4533cea94bbSEmilio G. Cota } 4543cea94bbSEmilio G. Cota 45553d28455SRichard Henderson /* Called with tlb_c.lock held */ 4563ab6e68cSRichard Henderson static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry, 4573ab6e68cSRichard Henderson target_ulong page, 4583ab6e68cSRichard Henderson target_ulong mask) 45968fea038SRichard Henderson { 4603ab6e68cSRichard Henderson if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) { 461d9bb58e5SYang Zhong memset(tlb_entry, -1, sizeof(*tlb_entry)); 46286e1eff8SEmilio G. Cota return true; 463d9bb58e5SYang Zhong } 46486e1eff8SEmilio G. Cota return false; 465d9bb58e5SYang Zhong } 466d9bb58e5SYang Zhong 4673ab6e68cSRichard Henderson static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, 46868fea038SRichard Henderson target_ulong page) 46968fea038SRichard Henderson { 4703ab6e68cSRichard Henderson return tlb_flush_entry_mask_locked(tlb_entry, page, -1); 4713ab6e68cSRichard Henderson } 4723ab6e68cSRichard Henderson 4733ab6e68cSRichard Henderson /* Called with tlb_c.lock held */ 4743ab6e68cSRichard Henderson static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx, 4753ab6e68cSRichard Henderson target_ulong page, 4763ab6e68cSRichard Henderson target_ulong mask) 4773ab6e68cSRichard Henderson { 478a40ec84eSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; 47968fea038SRichard Henderson int k; 48071aec354SEmilio G. Cota 48129a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 48268fea038SRichard Henderson for (k = 0; k < CPU_VTLB_SIZE; k++) { 4833ab6e68cSRichard Henderson if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) { 48486e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 48586e1eff8SEmilio G. Cota } 48668fea038SRichard Henderson } 48768fea038SRichard Henderson } 48868fea038SRichard Henderson 4893ab6e68cSRichard Henderson static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, 4903ab6e68cSRichard Henderson target_ulong page) 4913ab6e68cSRichard Henderson { 4923ab6e68cSRichard Henderson tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1); 4933ab6e68cSRichard Henderson } 4943ab6e68cSRichard Henderson 4951308e026SRichard Henderson static void tlb_flush_page_locked(CPUArchState *env, int midx, 4961308e026SRichard Henderson target_ulong page) 4971308e026SRichard Henderson { 498a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; 499a40ec84eSRichard Henderson target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; 5001308e026SRichard Henderson 5011308e026SRichard Henderson /* Check if we need to flush due to large pages. */ 5021308e026SRichard Henderson if ((page & lp_mask) == lp_addr) { 5031308e026SRichard Henderson tlb_debug("forcing full flush midx %d (" 5041308e026SRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 5051308e026SRichard Henderson midx, lp_addr, lp_mask); 5063c3959f2SRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 5071308e026SRichard Henderson } else { 50886e1eff8SEmilio G. Cota if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { 50986e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, midx); 51086e1eff8SEmilio G. Cota } 5111308e026SRichard Henderson tlb_flush_vtlb_page_locked(env, midx, page); 5121308e026SRichard Henderson } 5131308e026SRichard Henderson } 5141308e026SRichard Henderson 5157b7d00e0SRichard Henderson /** 5167b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_0: 5177b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5187b7d00e0SRichard Henderson * @addr: page of virtual address to flush 5197b7d00e0SRichard Henderson * @idxmap: set of mmu_idx to flush 5207b7d00e0SRichard Henderson * 5217b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, flush one page 5227b7d00e0SRichard Henderson * at @addr from the tlbs indicated by @idxmap from @cpu. 523d9bb58e5SYang Zhong */ 5247b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, 5257b7d00e0SRichard Henderson target_ulong addr, 5267b7d00e0SRichard Henderson uint16_t idxmap) 527d9bb58e5SYang Zhong { 528d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 529d9bb58e5SYang Zhong int mmu_idx; 530d9bb58e5SYang Zhong 531d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 532d9bb58e5SYang Zhong 5337b7d00e0SRichard Henderson tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap); 534d9bb58e5SYang Zhong 535a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 536d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 5377b7d00e0SRichard Henderson if ((idxmap >> mmu_idx) & 1) { 5381308e026SRichard Henderson tlb_flush_page_locked(env, mmu_idx, addr); 539d9bb58e5SYang Zhong } 540d9bb58e5SYang Zhong } 541a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 542d9bb58e5SYang Zhong 5431d41a79bSRichard Henderson /* 5441d41a79bSRichard Henderson * Discard jump cache entries for any tb which might potentially 5451d41a79bSRichard Henderson * overlap the flushed page, which includes the previous. 5461d41a79bSRichard Henderson */ 5471d41a79bSRichard Henderson tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); 5481d41a79bSRichard Henderson tb_jmp_cache_clear_page(cpu, addr); 549d9bb58e5SYang Zhong } 550d9bb58e5SYang Zhong 5517b7d00e0SRichard Henderson /** 5527b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_1: 5537b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5547b7d00e0SRichard Henderson * @data: encoded addr + idxmap 5557b7d00e0SRichard Henderson * 5567b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5577b7d00e0SRichard Henderson * async_run_on_cpu. The idxmap parameter is encoded in the page 5587b7d00e0SRichard Henderson * offset of the target_ptr field. This limits the set of mmu_idx 5597b7d00e0SRichard Henderson * that can be passed via this method. 5607b7d00e0SRichard Henderson */ 5617b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, 5627b7d00e0SRichard Henderson run_on_cpu_data data) 5637b7d00e0SRichard Henderson { 5647b7d00e0SRichard Henderson target_ulong addr_and_idxmap = (target_ulong) data.target_ptr; 5657b7d00e0SRichard Henderson target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK; 5667b7d00e0SRichard Henderson uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; 5677b7d00e0SRichard Henderson 5687b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 5697b7d00e0SRichard Henderson } 5707b7d00e0SRichard Henderson 5717b7d00e0SRichard Henderson typedef struct { 5727b7d00e0SRichard Henderson target_ulong addr; 5737b7d00e0SRichard Henderson uint16_t idxmap; 5747b7d00e0SRichard Henderson } TLBFlushPageByMMUIdxData; 5757b7d00e0SRichard Henderson 5767b7d00e0SRichard Henderson /** 5777b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_2: 5787b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5797b7d00e0SRichard Henderson * @data: allocated addr + idxmap 5807b7d00e0SRichard Henderson * 5817b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5827b7d00e0SRichard Henderson * async_run_on_cpu. The addr+idxmap parameters are stored in a 5837b7d00e0SRichard Henderson * TLBFlushPageByMMUIdxData structure that has been allocated 5847b7d00e0SRichard Henderson * specifically for this helper. Free the structure when done. 5857b7d00e0SRichard Henderson */ 5867b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, 5877b7d00e0SRichard Henderson run_on_cpu_data data) 5887b7d00e0SRichard Henderson { 5897b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = data.host_ptr; 5907b7d00e0SRichard Henderson 5917b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); 5927b7d00e0SRichard Henderson g_free(d); 5937b7d00e0SRichard Henderson } 5947b7d00e0SRichard Henderson 595d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) 596d9bb58e5SYang Zhong { 597d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); 598d9bb58e5SYang Zhong 599d9bb58e5SYang Zhong /* This should already be page aligned */ 6007b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 601d9bb58e5SYang Zhong 6027b7d00e0SRichard Henderson if (qemu_cpu_is_self(cpu)) { 6037b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 6047b7d00e0SRichard Henderson } else if (idxmap < TARGET_PAGE_SIZE) { 6057b7d00e0SRichard Henderson /* 6067b7d00e0SRichard Henderson * Most targets have only a few mmu_idx. In the case where 6077b7d00e0SRichard Henderson * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid 6087b7d00e0SRichard Henderson * allocating memory for this operation. 6097b7d00e0SRichard Henderson */ 6107b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, 6117b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 612d9bb58e5SYang Zhong } else { 6137b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); 6147b7d00e0SRichard Henderson 6157b7d00e0SRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 6167b7d00e0SRichard Henderson d->addr = addr; 6177b7d00e0SRichard Henderson d->idxmap = idxmap; 6187b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, 6197b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 620d9bb58e5SYang Zhong } 621d9bb58e5SYang Zhong } 622d9bb58e5SYang Zhong 623f8144c6cSRichard Henderson void tlb_flush_page(CPUState *cpu, target_ulong addr) 624f8144c6cSRichard Henderson { 625f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); 626f8144c6cSRichard Henderson } 627f8144c6cSRichard Henderson 628d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, 629d9bb58e5SYang Zhong uint16_t idxmap) 630d9bb58e5SYang Zhong { 631d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 632d9bb58e5SYang Zhong 633d9bb58e5SYang Zhong /* This should already be page aligned */ 6347b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 635d9bb58e5SYang Zhong 6367b7d00e0SRichard Henderson /* 6377b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6387b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6397b7d00e0SRichard Henderson */ 6407b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6417b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6427b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6437b7d00e0SRichard Henderson } else { 6447b7d00e0SRichard Henderson CPUState *dst_cpu; 6457b7d00e0SRichard Henderson 6467b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6477b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6487b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6497b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d 6507b7d00e0SRichard Henderson = g_new(TLBFlushPageByMMUIdxData, 1); 6517b7d00e0SRichard Henderson 6527b7d00e0SRichard Henderson d->addr = addr; 6537b7d00e0SRichard Henderson d->idxmap = idxmap; 6547b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6557b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6567b7d00e0SRichard Henderson } 6577b7d00e0SRichard Henderson } 6587b7d00e0SRichard Henderson } 6597b7d00e0SRichard Henderson 6607b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); 661d9bb58e5SYang Zhong } 662d9bb58e5SYang Zhong 663f8144c6cSRichard Henderson void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 664f8144c6cSRichard Henderson { 665f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); 666f8144c6cSRichard Henderson } 667f8144c6cSRichard Henderson 668d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 669d9bb58e5SYang Zhong target_ulong addr, 670d9bb58e5SYang Zhong uint16_t idxmap) 671d9bb58e5SYang Zhong { 672d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 673d9bb58e5SYang Zhong 674d9bb58e5SYang Zhong /* This should already be page aligned */ 6757b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 676d9bb58e5SYang Zhong 6777b7d00e0SRichard Henderson /* 6787b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6797b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6807b7d00e0SRichard Henderson */ 6817b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6827b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6837b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6847b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6857b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6867b7d00e0SRichard Henderson } else { 6877b7d00e0SRichard Henderson CPUState *dst_cpu; 6887b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d; 6897b7d00e0SRichard Henderson 6907b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6917b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6927b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6937b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6947b7d00e0SRichard Henderson d->addr = addr; 6957b7d00e0SRichard Henderson d->idxmap = idxmap; 6967b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6977b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6987b7d00e0SRichard Henderson } 6997b7d00e0SRichard Henderson } 7007b7d00e0SRichard Henderson 7017b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 7027b7d00e0SRichard Henderson d->addr = addr; 7037b7d00e0SRichard Henderson d->idxmap = idxmap; 7047b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, 7057b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 7067b7d00e0SRichard Henderson } 707d9bb58e5SYang Zhong } 708d9bb58e5SYang Zhong 709f8144c6cSRichard Henderson void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) 710d9bb58e5SYang Zhong { 711f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); 712d9bb58e5SYang Zhong } 713d9bb58e5SYang Zhong 7143c4ddec1SRichard Henderson static void tlb_flush_range_locked(CPUArchState *env, int midx, 7153c4ddec1SRichard Henderson target_ulong addr, target_ulong len, 7163c4ddec1SRichard Henderson unsigned bits) 7173ab6e68cSRichard Henderson { 7183ab6e68cSRichard Henderson CPUTLBDesc *d = &env_tlb(env)->d[midx]; 7193ab6e68cSRichard Henderson CPUTLBDescFast *f = &env_tlb(env)->f[midx]; 7203ab6e68cSRichard Henderson target_ulong mask = MAKE_64BIT_MASK(0, bits); 7213ab6e68cSRichard Henderson 7223ab6e68cSRichard Henderson /* 7233ab6e68cSRichard Henderson * If @bits is smaller than the tlb size, there may be multiple entries 7243ab6e68cSRichard Henderson * within the TLB; otherwise all addresses that match under @mask hit 7253ab6e68cSRichard Henderson * the same TLB entry. 7263ab6e68cSRichard Henderson * TODO: Perhaps allow bits to be a few bits less than the size. 7273ab6e68cSRichard Henderson * For now, just flush the entire TLB. 7283c4ddec1SRichard Henderson * 7293c4ddec1SRichard Henderson * If @len is larger than the tlb size, then it will take longer to 7303c4ddec1SRichard Henderson * test all of the entries in the TLB than it will to flush it all. 7313ab6e68cSRichard Henderson */ 7323c4ddec1SRichard Henderson if (mask < f->mask || len > f->mask) { 7333ab6e68cSRichard Henderson tlb_debug("forcing full flush midx %d (" 7343c4ddec1SRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n", 7353c4ddec1SRichard Henderson midx, addr, mask, len); 7363ab6e68cSRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 7373ab6e68cSRichard Henderson return; 7383ab6e68cSRichard Henderson } 7393ab6e68cSRichard Henderson 7403c4ddec1SRichard Henderson /* 7413c4ddec1SRichard Henderson * Check if we need to flush due to large pages. 7423c4ddec1SRichard Henderson * Because large_page_mask contains all 1's from the msb, 7433c4ddec1SRichard Henderson * we only need to test the end of the range. 7443c4ddec1SRichard Henderson */ 7453c4ddec1SRichard Henderson if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) { 7463ab6e68cSRichard Henderson tlb_debug("forcing full flush midx %d (" 7473ab6e68cSRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 7483ab6e68cSRichard Henderson midx, d->large_page_addr, d->large_page_mask); 7493ab6e68cSRichard Henderson tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); 7503ab6e68cSRichard Henderson return; 7513ab6e68cSRichard Henderson } 7523ab6e68cSRichard Henderson 7533c4ddec1SRichard Henderson for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) { 7543c4ddec1SRichard Henderson target_ulong page = addr + i; 7553c4ddec1SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, midx, page); 7563c4ddec1SRichard Henderson 7573c4ddec1SRichard Henderson if (tlb_flush_entry_mask_locked(entry, page, mask)) { 7583ab6e68cSRichard Henderson tlb_n_used_entries_dec(env, midx); 7593ab6e68cSRichard Henderson } 7603ab6e68cSRichard Henderson tlb_flush_vtlb_page_mask_locked(env, midx, page, mask); 7613ab6e68cSRichard Henderson } 7623c4ddec1SRichard Henderson } 7633ab6e68cSRichard Henderson 7643ab6e68cSRichard Henderson typedef struct { 7653ab6e68cSRichard Henderson target_ulong addr; 7663c4ddec1SRichard Henderson target_ulong len; 7673ab6e68cSRichard Henderson uint16_t idxmap; 7683ab6e68cSRichard Henderson uint16_t bits; 7693960a59fSRichard Henderson } TLBFlushRangeData; 7703ab6e68cSRichard Henderson 7716be48e45SRichard Henderson static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, 7723960a59fSRichard Henderson TLBFlushRangeData d) 7733ab6e68cSRichard Henderson { 7743ab6e68cSRichard Henderson CPUArchState *env = cpu->env_ptr; 7753ab6e68cSRichard Henderson int mmu_idx; 7763ab6e68cSRichard Henderson 7773ab6e68cSRichard Henderson assert_cpu_is_self(cpu); 7783ab6e68cSRichard Henderson 7793c4ddec1SRichard Henderson tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n", 7803c4ddec1SRichard Henderson d.addr, d.bits, d.len, d.idxmap); 7813ab6e68cSRichard Henderson 7823ab6e68cSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 7833ab6e68cSRichard Henderson for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 7843ab6e68cSRichard Henderson if ((d.idxmap >> mmu_idx) & 1) { 7853c4ddec1SRichard Henderson tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits); 7863ab6e68cSRichard Henderson } 7873ab6e68cSRichard Henderson } 7883ab6e68cSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 7893ab6e68cSRichard Henderson 790cfc2a2d6SIdan Horowitz /* 791cfc2a2d6SIdan Horowitz * If the length is larger than the jump cache size, then it will take 792cfc2a2d6SIdan Horowitz * longer to clear each entry individually than it will to clear it all. 793cfc2a2d6SIdan Horowitz */ 794cfc2a2d6SIdan Horowitz if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) { 795a976a99aSRichard Henderson tcg_flush_jmp_cache(cpu); 796cfc2a2d6SIdan Horowitz return; 797cfc2a2d6SIdan Horowitz } 798cfc2a2d6SIdan Horowitz 7991d41a79bSRichard Henderson /* 8001d41a79bSRichard Henderson * Discard jump cache entries for any tb which might potentially 8011d41a79bSRichard Henderson * overlap the flushed pages, which includes the previous. 8021d41a79bSRichard Henderson */ 8031d41a79bSRichard Henderson d.addr -= TARGET_PAGE_SIZE; 8041d41a79bSRichard Henderson for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) { 8051d41a79bSRichard Henderson tb_jmp_cache_clear_page(cpu, d.addr); 8061d41a79bSRichard Henderson d.addr += TARGET_PAGE_SIZE; 8073c4ddec1SRichard Henderson } 8083ab6e68cSRichard Henderson } 8093ab6e68cSRichard Henderson 810206a583dSRichard Henderson static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu, 8113ab6e68cSRichard Henderson run_on_cpu_data data) 8123ab6e68cSRichard Henderson { 8133960a59fSRichard Henderson TLBFlushRangeData *d = data.host_ptr; 8146be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(cpu, *d); 8153ab6e68cSRichard Henderson g_free(d); 8163ab6e68cSRichard Henderson } 8173ab6e68cSRichard Henderson 818e5b1921bSRichard Henderson void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, 819e5b1921bSRichard Henderson target_ulong len, uint16_t idxmap, 820e5b1921bSRichard Henderson unsigned bits) 8213ab6e68cSRichard Henderson { 8223960a59fSRichard Henderson TLBFlushRangeData d; 8233ab6e68cSRichard Henderson 824e5b1921bSRichard Henderson /* 825e5b1921bSRichard Henderson * If all bits are significant, and len is small, 826e5b1921bSRichard Henderson * this devolves to tlb_flush_page. 827e5b1921bSRichard Henderson */ 828e5b1921bSRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 8293ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, idxmap); 8303ab6e68cSRichard Henderson return; 8313ab6e68cSRichard Henderson } 8323ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 8333ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 8343ab6e68cSRichard Henderson tlb_flush_by_mmuidx(cpu, idxmap); 8353ab6e68cSRichard Henderson return; 8363ab6e68cSRichard Henderson } 8373ab6e68cSRichard Henderson 8383ab6e68cSRichard Henderson /* This should already be page aligned */ 8393ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 840e5b1921bSRichard Henderson d.len = len; 8413ab6e68cSRichard Henderson d.idxmap = idxmap; 8423ab6e68cSRichard Henderson d.bits = bits; 8433ab6e68cSRichard Henderson 8443ab6e68cSRichard Henderson if (qemu_cpu_is_self(cpu)) { 8456be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(cpu, d); 8463ab6e68cSRichard Henderson } else { 8473ab6e68cSRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 8483960a59fSRichard Henderson TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); 849206a583dSRichard Henderson async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1, 8503ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 8513ab6e68cSRichard Henderson } 8523ab6e68cSRichard Henderson } 8533ab6e68cSRichard Henderson 854e5b1921bSRichard Henderson void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, 855e5b1921bSRichard Henderson uint16_t idxmap, unsigned bits) 856e5b1921bSRichard Henderson { 857e5b1921bSRichard Henderson tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); 858e5b1921bSRichard Henderson } 859e5b1921bSRichard Henderson 860600b819fSRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, 861600b819fSRichard Henderson target_ulong addr, target_ulong len, 862600b819fSRichard Henderson uint16_t idxmap, unsigned bits) 8633ab6e68cSRichard Henderson { 8643960a59fSRichard Henderson TLBFlushRangeData d; 865d34e4d1aSRichard Henderson CPUState *dst_cpu; 8663ab6e68cSRichard Henderson 867600b819fSRichard Henderson /* 868600b819fSRichard Henderson * If all bits are significant, and len is small, 869600b819fSRichard Henderson * this devolves to tlb_flush_page. 870600b819fSRichard Henderson */ 871600b819fSRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 8723ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap); 8733ab6e68cSRichard Henderson return; 8743ab6e68cSRichard Henderson } 8753ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 8763ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 8773ab6e68cSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap); 8783ab6e68cSRichard Henderson return; 8793ab6e68cSRichard Henderson } 8803ab6e68cSRichard Henderson 8813ab6e68cSRichard Henderson /* This should already be page aligned */ 8823ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 883600b819fSRichard Henderson d.len = len; 8843ab6e68cSRichard Henderson d.idxmap = idxmap; 8853ab6e68cSRichard Henderson d.bits = bits; 8863ab6e68cSRichard Henderson 8873ab6e68cSRichard Henderson /* Allocate a separate data block for each destination cpu. */ 8883ab6e68cSRichard Henderson CPU_FOREACH(dst_cpu) { 8893ab6e68cSRichard Henderson if (dst_cpu != src_cpu) { 8903960a59fSRichard Henderson TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); 8913ab6e68cSRichard Henderson async_run_on_cpu(dst_cpu, 892206a583dSRichard Henderson tlb_flush_range_by_mmuidx_async_1, 8933ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 8943ab6e68cSRichard Henderson } 8953ab6e68cSRichard Henderson } 8963ab6e68cSRichard Henderson 8976be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(src_cpu, d); 8983ab6e68cSRichard Henderson } 8993ab6e68cSRichard Henderson 900600b819fSRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, 901600b819fSRichard Henderson target_ulong addr, 902600b819fSRichard Henderson uint16_t idxmap, unsigned bits) 903600b819fSRichard Henderson { 904600b819fSRichard Henderson tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE, 905600b819fSRichard Henderson idxmap, bits); 906600b819fSRichard Henderson } 907600b819fSRichard Henderson 908c13b27d8SRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 9093ab6e68cSRichard Henderson target_ulong addr, 910c13b27d8SRichard Henderson target_ulong len, 9113ab6e68cSRichard Henderson uint16_t idxmap, 9123ab6e68cSRichard Henderson unsigned bits) 9133ab6e68cSRichard Henderson { 914d34e4d1aSRichard Henderson TLBFlushRangeData d, *p; 915d34e4d1aSRichard Henderson CPUState *dst_cpu; 9163ab6e68cSRichard Henderson 917c13b27d8SRichard Henderson /* 918c13b27d8SRichard Henderson * If all bits are significant, and len is small, 919c13b27d8SRichard Henderson * this devolves to tlb_flush_page. 920c13b27d8SRichard Henderson */ 921c13b27d8SRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 9223ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); 9233ab6e68cSRichard Henderson return; 9243ab6e68cSRichard Henderson } 9253ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 9263ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 9273ab6e68cSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); 9283ab6e68cSRichard Henderson return; 9293ab6e68cSRichard Henderson } 9303ab6e68cSRichard Henderson 9313ab6e68cSRichard Henderson /* This should already be page aligned */ 9323ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 933c13b27d8SRichard Henderson d.len = len; 9343ab6e68cSRichard Henderson d.idxmap = idxmap; 9353ab6e68cSRichard Henderson d.bits = bits; 9363ab6e68cSRichard Henderson 9373ab6e68cSRichard Henderson /* Allocate a separate data block for each destination cpu. */ 9383ab6e68cSRichard Henderson CPU_FOREACH(dst_cpu) { 9393ab6e68cSRichard Henderson if (dst_cpu != src_cpu) { 9406d244788SRichard Henderson p = g_memdup(&d, sizeof(d)); 941206a583dSRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1, 9423ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 9433ab6e68cSRichard Henderson } 9443ab6e68cSRichard Henderson } 9453ab6e68cSRichard Henderson 9466d244788SRichard Henderson p = g_memdup(&d, sizeof(d)); 947206a583dSRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1, 9483ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 9493ab6e68cSRichard Henderson } 9503ab6e68cSRichard Henderson 951c13b27d8SRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 952c13b27d8SRichard Henderson target_ulong addr, 953c13b27d8SRichard Henderson uint16_t idxmap, 954c13b27d8SRichard Henderson unsigned bits) 955c13b27d8SRichard Henderson { 956c13b27d8SRichard Henderson tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE, 957c13b27d8SRichard Henderson idxmap, bits); 958c13b27d8SRichard Henderson } 959c13b27d8SRichard Henderson 960d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr' 961d9bb58e5SYang Zhong can be detected */ 962d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr) 963d9bb58e5SYang Zhong { 96493b99616SRichard Henderson cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK, 96593b99616SRichard Henderson TARGET_PAGE_SIZE, 966d9bb58e5SYang Zhong DIRTY_MEMORY_CODE); 967d9bb58e5SYang Zhong } 968d9bb58e5SYang Zhong 969d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer 970d9bb58e5SYang Zhong tested for self modifying code */ 971d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr) 972d9bb58e5SYang Zhong { 973d9bb58e5SYang Zhong cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 974d9bb58e5SYang Zhong } 975d9bb58e5SYang Zhong 976d9bb58e5SYang Zhong 977d9bb58e5SYang Zhong /* 978d9bb58e5SYang Zhong * Dirty write flag handling 979d9bb58e5SYang Zhong * 980d9bb58e5SYang Zhong * When the TCG code writes to a location it looks up the address in 981d9bb58e5SYang Zhong * the TLB and uses that data to compute the final address. If any of 982d9bb58e5SYang Zhong * the lower bits of the address are set then the slow path is forced. 983d9bb58e5SYang Zhong * There are a number of reasons to do this but for normal RAM the 984d9bb58e5SYang Zhong * most usual is detecting writes to code regions which may invalidate 985d9bb58e5SYang Zhong * generated code. 986d9bb58e5SYang Zhong * 98771aec354SEmilio G. Cota * Other vCPUs might be reading their TLBs during guest execution, so we update 988d73415a3SStefan Hajnoczi * te->addr_write with qatomic_set. We don't need to worry about this for 98971aec354SEmilio G. Cota * oversized guests as MTTCG is disabled for them. 990d9bb58e5SYang Zhong * 99153d28455SRichard Henderson * Called with tlb_c.lock held. 992d9bb58e5SYang Zhong */ 99371aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 99471aec354SEmilio G. Cota uintptr_t start, uintptr_t length) 995d9bb58e5SYang Zhong { 996d9bb58e5SYang Zhong uintptr_t addr = tlb_entry->addr_write; 997d9bb58e5SYang Zhong 9987b0d792cSRichard Henderson if ((addr & (TLB_INVALID_MASK | TLB_MMIO | 9997b0d792cSRichard Henderson TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { 1000d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 1001d9bb58e5SYang Zhong addr += tlb_entry->addend; 1002d9bb58e5SYang Zhong if ((addr - start) < length) { 1003d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST 100471aec354SEmilio G. Cota tlb_entry->addr_write |= TLB_NOTDIRTY; 1005d9bb58e5SYang Zhong #else 1006d73415a3SStefan Hajnoczi qatomic_set(&tlb_entry->addr_write, 100771aec354SEmilio G. Cota tlb_entry->addr_write | TLB_NOTDIRTY); 1008d9bb58e5SYang Zhong #endif 1009d9bb58e5SYang Zhong } 101071aec354SEmilio G. Cota } 101171aec354SEmilio G. Cota } 101271aec354SEmilio G. Cota 101371aec354SEmilio G. Cota /* 101453d28455SRichard Henderson * Called with tlb_c.lock held. 101571aec354SEmilio G. Cota * Called only from the vCPU context, i.e. the TLB's owner thread. 101671aec354SEmilio G. Cota */ 101771aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 101871aec354SEmilio G. Cota { 101971aec354SEmilio G. Cota *d = *s; 102071aec354SEmilio G. Cota } 1021d9bb58e5SYang Zhong 1022d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 102371aec354SEmilio G. Cota * the target vCPU). 102453d28455SRichard Henderson * We must take tlb_c.lock to avoid racing with another vCPU update. The only 102571aec354SEmilio G. Cota * thing actually updated is the target TLB entry ->addr_write flags. 1026d9bb58e5SYang Zhong */ 1027d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 1028d9bb58e5SYang Zhong { 1029d9bb58e5SYang Zhong CPUArchState *env; 1030d9bb58e5SYang Zhong 1031d9bb58e5SYang Zhong int mmu_idx; 1032d9bb58e5SYang Zhong 1033d9bb58e5SYang Zhong env = cpu->env_ptr; 1034a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 1035d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1036d9bb58e5SYang Zhong unsigned int i; 1037722a1c1eSRichard Henderson unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); 1038d9bb58e5SYang Zhong 103986e1eff8SEmilio G. Cota for (i = 0; i < n; i++) { 1040a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], 1041a40ec84eSRichard Henderson start1, length); 1042d9bb58e5SYang Zhong } 1043d9bb58e5SYang Zhong 1044d9bb58e5SYang Zhong for (i = 0; i < CPU_VTLB_SIZE; i++) { 1045a40ec84eSRichard Henderson tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i], 1046a40ec84eSRichard Henderson start1, length); 1047d9bb58e5SYang Zhong } 1048d9bb58e5SYang Zhong } 1049a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1050d9bb58e5SYang Zhong } 1051d9bb58e5SYang Zhong 105253d28455SRichard Henderson /* Called with tlb_c.lock held */ 105371aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 105471aec354SEmilio G. Cota target_ulong vaddr) 1055d9bb58e5SYang Zhong { 1056d9bb58e5SYang Zhong if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { 1057d9bb58e5SYang Zhong tlb_entry->addr_write = vaddr; 1058d9bb58e5SYang Zhong } 1059d9bb58e5SYang Zhong } 1060d9bb58e5SYang Zhong 1061d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr 1062d9bb58e5SYang Zhong so that it is no longer dirty */ 1063d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) 1064d9bb58e5SYang Zhong { 1065d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 1066d9bb58e5SYang Zhong int mmu_idx; 1067d9bb58e5SYang Zhong 1068d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 1069d9bb58e5SYang Zhong 1070d9bb58e5SYang Zhong vaddr &= TARGET_PAGE_MASK; 1071a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 1072d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1073383beda9SRichard Henderson tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); 1074d9bb58e5SYang Zhong } 1075d9bb58e5SYang Zhong 1076d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1077d9bb58e5SYang Zhong int k; 1078d9bb58e5SYang Zhong for (k = 0; k < CPU_VTLB_SIZE; k++) { 1079a40ec84eSRichard Henderson tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); 1080d9bb58e5SYang Zhong } 1081d9bb58e5SYang Zhong } 1082a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1083d9bb58e5SYang Zhong } 1084d9bb58e5SYang Zhong 1085d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by 1086d9bb58e5SYang Zhong large pages and trigger a full TLB flush if these are invalidated. */ 10871308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx, 10881308e026SRichard Henderson target_ulong vaddr, target_ulong size) 1089d9bb58e5SYang Zhong { 1090a40ec84eSRichard Henderson target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; 10911308e026SRichard Henderson target_ulong lp_mask = ~(size - 1); 1092d9bb58e5SYang Zhong 10931308e026SRichard Henderson if (lp_addr == (target_ulong)-1) { 10941308e026SRichard Henderson /* No previous large page. */ 10951308e026SRichard Henderson lp_addr = vaddr; 10961308e026SRichard Henderson } else { 1097d9bb58e5SYang Zhong /* Extend the existing region to include the new page. 10981308e026SRichard Henderson This is a compromise between unnecessary flushes and 10991308e026SRichard Henderson the cost of maintaining a full variable size TLB. */ 1100a40ec84eSRichard Henderson lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; 11011308e026SRichard Henderson while (((lp_addr ^ vaddr) & lp_mask) != 0) { 11021308e026SRichard Henderson lp_mask <<= 1; 1103d9bb58e5SYang Zhong } 11041308e026SRichard Henderson } 1105a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; 1106a40ec84eSRichard Henderson env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; 1107d9bb58e5SYang Zhong } 1108d9bb58e5SYang Zhong 110940473689SRichard Henderson /* 111040473689SRichard Henderson * Add a new TLB entry. At most one entry for a given virtual address 1111d9bb58e5SYang Zhong * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 1112d9bb58e5SYang Zhong * supplied size is only used by tlb_flush_page. 1113d9bb58e5SYang Zhong * 1114d9bb58e5SYang Zhong * Called from TCG-generated code, which is under an RCU read-side 1115d9bb58e5SYang Zhong * critical section. 1116d9bb58e5SYang Zhong */ 111740473689SRichard Henderson void tlb_set_page_full(CPUState *cpu, int mmu_idx, 111840473689SRichard Henderson target_ulong vaddr, CPUTLBEntryFull *full) 1119d9bb58e5SYang Zhong { 1120d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 1121a40ec84eSRichard Henderson CPUTLB *tlb = env_tlb(env); 1122a40ec84eSRichard Henderson CPUTLBDesc *desc = &tlb->d[mmu_idx]; 1123d9bb58e5SYang Zhong MemoryRegionSection *section; 1124d9bb58e5SYang Zhong unsigned int index; 1125d9bb58e5SYang Zhong target_ulong address; 11268f5db641SRichard Henderson target_ulong write_address; 1127d9bb58e5SYang Zhong uintptr_t addend; 112868fea038SRichard Henderson CPUTLBEntry *te, tn; 112955df6fcfSPeter Maydell hwaddr iotlb, xlat, sz, paddr_page; 113055df6fcfSPeter Maydell target_ulong vaddr_page; 113140473689SRichard Henderson int asidx, wp_flags, prot; 11328f5db641SRichard Henderson bool is_ram, is_romd; 1133d9bb58e5SYang Zhong 1134d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 113555df6fcfSPeter Maydell 113640473689SRichard Henderson if (full->lg_page_size <= TARGET_PAGE_BITS) { 113755df6fcfSPeter Maydell sz = TARGET_PAGE_SIZE; 113855df6fcfSPeter Maydell } else { 113940473689SRichard Henderson sz = (hwaddr)1 << full->lg_page_size; 114040473689SRichard Henderson tlb_add_large_page(env, mmu_idx, vaddr, sz); 114155df6fcfSPeter Maydell } 114255df6fcfSPeter Maydell vaddr_page = vaddr & TARGET_PAGE_MASK; 114340473689SRichard Henderson paddr_page = full->phys_addr & TARGET_PAGE_MASK; 114455df6fcfSPeter Maydell 114540473689SRichard Henderson prot = full->prot; 114640473689SRichard Henderson asidx = cpu_asidx_from_attrs(cpu, full->attrs); 114755df6fcfSPeter Maydell section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 114840473689SRichard Henderson &xlat, &sz, full->attrs, &prot); 1149d9bb58e5SYang Zhong assert(sz >= TARGET_PAGE_SIZE); 1150d9bb58e5SYang Zhong 1151883f2c59SPhilippe Mathieu-Daudé tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" HWADDR_FMT_plx 1152d9bb58e5SYang Zhong " prot=%x idx=%d\n", 115340473689SRichard Henderson vaddr, full->phys_addr, prot, mmu_idx); 1154d9bb58e5SYang Zhong 115555df6fcfSPeter Maydell address = vaddr_page; 115640473689SRichard Henderson if (full->lg_page_size < TARGET_PAGE_BITS) { 115730d7e098SRichard Henderson /* Repeat the MMU check and TLB fill on every access. */ 115830d7e098SRichard Henderson address |= TLB_INVALID_MASK; 115955df6fcfSPeter Maydell } 116040473689SRichard Henderson if (full->attrs.byte_swap) { 11615b87b3e6SRichard Henderson address |= TLB_BSWAP; 1162a26fc6f5STony Nguyen } 11638f5db641SRichard Henderson 11648f5db641SRichard Henderson is_ram = memory_region_is_ram(section->mr); 11658f5db641SRichard Henderson is_romd = memory_region_is_romd(section->mr); 11668f5db641SRichard Henderson 11678f5db641SRichard Henderson if (is_ram || is_romd) { 11688f5db641SRichard Henderson /* RAM and ROMD both have associated host memory. */ 1169d9bb58e5SYang Zhong addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 11708f5db641SRichard Henderson } else { 11718f5db641SRichard Henderson /* I/O does not; force the host address to NULL. */ 11728f5db641SRichard Henderson addend = 0; 1173d9bb58e5SYang Zhong } 1174d9bb58e5SYang Zhong 11758f5db641SRichard Henderson write_address = address; 11768f5db641SRichard Henderson if (is_ram) { 11778f5db641SRichard Henderson iotlb = memory_region_get_ram_addr(section->mr) + xlat; 11788f5db641SRichard Henderson /* 11798f5db641SRichard Henderson * Computing is_clean is expensive; avoid all that unless 11808f5db641SRichard Henderson * the page is actually writable. 11818f5db641SRichard Henderson */ 11828f5db641SRichard Henderson if (prot & PAGE_WRITE) { 11838f5db641SRichard Henderson if (section->readonly) { 11848f5db641SRichard Henderson write_address |= TLB_DISCARD_WRITE; 11858f5db641SRichard Henderson } else if (cpu_physical_memory_is_clean(iotlb)) { 11868f5db641SRichard Henderson write_address |= TLB_NOTDIRTY; 11878f5db641SRichard Henderson } 11888f5db641SRichard Henderson } 11898f5db641SRichard Henderson } else { 11908f5db641SRichard Henderson /* I/O or ROMD */ 11918f5db641SRichard Henderson iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; 11928f5db641SRichard Henderson /* 11938f5db641SRichard Henderson * Writes to romd devices must go through MMIO to enable write. 11948f5db641SRichard Henderson * Reads to romd devices go through the ram_ptr found above, 11958f5db641SRichard Henderson * but of course reads to I/O must go through MMIO. 11968f5db641SRichard Henderson */ 11978f5db641SRichard Henderson write_address |= TLB_MMIO; 11988f5db641SRichard Henderson if (!is_romd) { 11998f5db641SRichard Henderson address = write_address; 12008f5db641SRichard Henderson } 12018f5db641SRichard Henderson } 12028f5db641SRichard Henderson 120350b107c5SRichard Henderson wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, 120450b107c5SRichard Henderson TARGET_PAGE_SIZE); 1205d9bb58e5SYang Zhong 1206383beda9SRichard Henderson index = tlb_index(env, mmu_idx, vaddr_page); 1207383beda9SRichard Henderson te = tlb_entry(env, mmu_idx, vaddr_page); 1208d9bb58e5SYang Zhong 120968fea038SRichard Henderson /* 121071aec354SEmilio G. Cota * Hold the TLB lock for the rest of the function. We could acquire/release 121171aec354SEmilio G. Cota * the lock several times in the function, but it is faster to amortize the 121271aec354SEmilio G. Cota * acquisition cost by acquiring it just once. Note that this leads to 121371aec354SEmilio G. Cota * a longer critical section, but this is not a concern since the TLB lock 121471aec354SEmilio G. Cota * is unlikely to be contended. 121571aec354SEmilio G. Cota */ 1216a40ec84eSRichard Henderson qemu_spin_lock(&tlb->c.lock); 121771aec354SEmilio G. Cota 12183d1523ceSRichard Henderson /* Note that the tlb is no longer clean. */ 1219a40ec84eSRichard Henderson tlb->c.dirty |= 1 << mmu_idx; 12203d1523ceSRichard Henderson 122171aec354SEmilio G. Cota /* Make sure there's no cached translation for the new page. */ 122271aec354SEmilio G. Cota tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); 122371aec354SEmilio G. Cota 122471aec354SEmilio G. Cota /* 122568fea038SRichard Henderson * Only evict the old entry to the victim tlb if it's for a 122668fea038SRichard Henderson * different page; otherwise just overwrite the stale data. 122768fea038SRichard Henderson */ 12283cea94bbSEmilio G. Cota if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { 1229a40ec84eSRichard Henderson unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; 1230a40ec84eSRichard Henderson CPUTLBEntry *tv = &desc->vtable[vidx]; 123168fea038SRichard Henderson 123268fea038SRichard Henderson /* Evict the old entry into the victim tlb. */ 123371aec354SEmilio G. Cota copy_tlb_helper_locked(tv, te); 123425d3ec58SRichard Henderson desc->vfulltlb[vidx] = desc->fulltlb[index]; 123586e1eff8SEmilio G. Cota tlb_n_used_entries_dec(env, mmu_idx); 123668fea038SRichard Henderson } 1237d9bb58e5SYang Zhong 1238d9bb58e5SYang Zhong /* refill the tlb */ 1239ace41090SPeter Maydell /* 1240ace41090SPeter Maydell * At this point iotlb contains a physical section number in the lower 1241ace41090SPeter Maydell * TARGET_PAGE_BITS, and either 12428f5db641SRichard Henderson * + the ram_addr_t of the page base of the target RAM (RAM) 12438f5db641SRichard Henderson * + the offset within section->mr of the page base (I/O, ROMD) 124455df6fcfSPeter Maydell * We subtract the vaddr_page (which is page aligned and thus won't 1245ace41090SPeter Maydell * disturb the low bits) to give an offset which can be added to the 1246ace41090SPeter Maydell * (non-page-aligned) vaddr of the eventual memory access to get 1247ace41090SPeter Maydell * the MemoryRegion offset for the access. Note that the vaddr we 1248ace41090SPeter Maydell * subtract here is that of the page base, and not the same as the 1249ace41090SPeter Maydell * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). 1250ace41090SPeter Maydell */ 125140473689SRichard Henderson desc->fulltlb[index] = *full; 125225d3ec58SRichard Henderson desc->fulltlb[index].xlat_section = iotlb - vaddr_page; 125340473689SRichard Henderson desc->fulltlb[index].phys_addr = paddr_page; 1254d9bb58e5SYang Zhong 1255d9bb58e5SYang Zhong /* Now calculate the new entry */ 125655df6fcfSPeter Maydell tn.addend = addend - vaddr_page; 1257d9bb58e5SYang Zhong if (prot & PAGE_READ) { 1258d9bb58e5SYang Zhong tn.addr_read = address; 125950b107c5SRichard Henderson if (wp_flags & BP_MEM_READ) { 126050b107c5SRichard Henderson tn.addr_read |= TLB_WATCHPOINT; 126150b107c5SRichard Henderson } 1262d9bb58e5SYang Zhong } else { 1263d9bb58e5SYang Zhong tn.addr_read = -1; 1264d9bb58e5SYang Zhong } 1265d9bb58e5SYang Zhong 1266d9bb58e5SYang Zhong if (prot & PAGE_EXEC) { 12678f5db641SRichard Henderson tn.addr_code = address; 1268d9bb58e5SYang Zhong } else { 1269d9bb58e5SYang Zhong tn.addr_code = -1; 1270d9bb58e5SYang Zhong } 1271d9bb58e5SYang Zhong 1272d9bb58e5SYang Zhong tn.addr_write = -1; 1273d9bb58e5SYang Zhong if (prot & PAGE_WRITE) { 12748f5db641SRichard Henderson tn.addr_write = write_address; 1275f52bfb12SDavid Hildenbrand if (prot & PAGE_WRITE_INV) { 1276f52bfb12SDavid Hildenbrand tn.addr_write |= TLB_INVALID_MASK; 1277f52bfb12SDavid Hildenbrand } 127850b107c5SRichard Henderson if (wp_flags & BP_MEM_WRITE) { 127950b107c5SRichard Henderson tn.addr_write |= TLB_WATCHPOINT; 128050b107c5SRichard Henderson } 1281d9bb58e5SYang Zhong } 1282d9bb58e5SYang Zhong 128371aec354SEmilio G. Cota copy_tlb_helper_locked(te, &tn); 128486e1eff8SEmilio G. Cota tlb_n_used_entries_inc(env, mmu_idx); 1285a40ec84eSRichard Henderson qemu_spin_unlock(&tlb->c.lock); 1286d9bb58e5SYang Zhong } 1287d9bb58e5SYang Zhong 128840473689SRichard Henderson void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 128940473689SRichard Henderson hwaddr paddr, MemTxAttrs attrs, int prot, 129040473689SRichard Henderson int mmu_idx, target_ulong size) 129140473689SRichard Henderson { 129240473689SRichard Henderson CPUTLBEntryFull full = { 129340473689SRichard Henderson .phys_addr = paddr, 129440473689SRichard Henderson .attrs = attrs, 129540473689SRichard Henderson .prot = prot, 129640473689SRichard Henderson .lg_page_size = ctz64(size) 129740473689SRichard Henderson }; 129840473689SRichard Henderson 129940473689SRichard Henderson assert(is_power_of_2(size)); 130040473689SRichard Henderson tlb_set_page_full(cpu, mmu_idx, vaddr, &full); 130140473689SRichard Henderson } 130240473689SRichard Henderson 1303d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr, 1304d9bb58e5SYang Zhong hwaddr paddr, int prot, 1305d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 1306d9bb58e5SYang Zhong { 1307d9bb58e5SYang Zhong tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, 1308d9bb58e5SYang Zhong prot, mmu_idx, size); 1309d9bb58e5SYang Zhong } 1310d9bb58e5SYang Zhong 1311c319dc13SRichard Henderson /* 1312c319dc13SRichard Henderson * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the 1313c319dc13SRichard Henderson * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must 1314c319dc13SRichard Henderson * be discarded and looked up again (e.g. via tlb_entry()). 1315c319dc13SRichard Henderson */ 1316c319dc13SRichard Henderson static void tlb_fill(CPUState *cpu, target_ulong addr, int size, 1317c319dc13SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1318c319dc13SRichard Henderson { 1319c319dc13SRichard Henderson bool ok; 1320c319dc13SRichard Henderson 1321c319dc13SRichard Henderson /* 1322c319dc13SRichard Henderson * This is not a probe, so only valid return is success; failure 1323c319dc13SRichard Henderson * should result in exception + longjmp to the cpu loop. 1324c319dc13SRichard Henderson */ 13258810ee2aSAlex Bennée ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size, 1326e124536fSEduardo Habkost access_type, mmu_idx, false, retaddr); 1327c319dc13SRichard Henderson assert(ok); 1328c319dc13SRichard Henderson } 1329c319dc13SRichard Henderson 133078271684SClaudio Fontana static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, 133178271684SClaudio Fontana MMUAccessType access_type, 133278271684SClaudio Fontana int mmu_idx, uintptr_t retaddr) 133378271684SClaudio Fontana { 13348810ee2aSAlex Bennée cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, 13358810ee2aSAlex Bennée mmu_idx, retaddr); 133678271684SClaudio Fontana } 133778271684SClaudio Fontana 133878271684SClaudio Fontana static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, 133978271684SClaudio Fontana vaddr addr, unsigned size, 134078271684SClaudio Fontana MMUAccessType access_type, 134178271684SClaudio Fontana int mmu_idx, MemTxAttrs attrs, 134278271684SClaudio Fontana MemTxResult response, 134378271684SClaudio Fontana uintptr_t retaddr) 134478271684SClaudio Fontana { 134578271684SClaudio Fontana CPUClass *cc = CPU_GET_CLASS(cpu); 134678271684SClaudio Fontana 134778271684SClaudio Fontana if (!cpu->ignore_memory_transaction_failures && 134878271684SClaudio Fontana cc->tcg_ops->do_transaction_failed) { 134978271684SClaudio Fontana cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size, 135078271684SClaudio Fontana access_type, mmu_idx, attrs, 135178271684SClaudio Fontana response, retaddr); 135278271684SClaudio Fontana } 135378271684SClaudio Fontana } 135478271684SClaudio Fontana 135525d3ec58SRichard Henderson static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full, 1356f1be3696SRichard Henderson int mmu_idx, target_ulong addr, uintptr_t retaddr, 1357be5c4787STony Nguyen MMUAccessType access_type, MemOp op) 1358d9bb58e5SYang Zhong { 135929a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 13602d54f194SPeter Maydell hwaddr mr_offset; 13612d54f194SPeter Maydell MemoryRegionSection *section; 13622d54f194SPeter Maydell MemoryRegion *mr; 1363d9bb58e5SYang Zhong uint64_t val; 136404e3aabdSPeter Maydell MemTxResult r; 1365d9bb58e5SYang Zhong 136625d3ec58SRichard Henderson section = iotlb_to_section(cpu, full->xlat_section, full->attrs); 13672d54f194SPeter Maydell mr = section->mr; 136825d3ec58SRichard Henderson mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; 1369d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 137008565552SRichard Henderson if (!cpu->can_do_io) { 1371d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1372d9bb58e5SYang Zhong } 1373d9bb58e5SYang Zhong 137461b59fb2SRichard Henderson { 137561b59fb2SRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 137625d3ec58SRichard Henderson r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs); 137761b59fb2SRichard Henderson } 137861b59fb2SRichard Henderson 137904e3aabdSPeter Maydell if (r != MEMTX_OK) { 13802d54f194SPeter Maydell hwaddr physaddr = mr_offset + 13812d54f194SPeter Maydell section->offset_within_address_space - 13822d54f194SPeter Maydell section->offset_within_region; 13832d54f194SPeter Maydell 1384be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, 138525d3ec58SRichard Henderson mmu_idx, full->attrs, r, retaddr); 138604e3aabdSPeter Maydell } 1387d9bb58e5SYang Zhong return val; 1388d9bb58e5SYang Zhong } 1389d9bb58e5SYang Zhong 13902f3a57eeSAlex Bennée /* 139125d3ec58SRichard Henderson * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin. 139225d3ec58SRichard Henderson * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match 1393570ef309SAlex Bennée * because of the side effect of io_writex changing memory layout. 13942f3a57eeSAlex Bennée */ 139537523ff7SRichard Henderson static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section, 139637523ff7SRichard Henderson hwaddr mr_offset) 13972f3a57eeSAlex Bennée { 13982f3a57eeSAlex Bennée #ifdef CONFIG_PLUGIN 13992f3a57eeSAlex Bennée SavedIOTLB *saved = &cs->saved_iotlb; 14002f3a57eeSAlex Bennée saved->section = section; 14012f3a57eeSAlex Bennée saved->mr_offset = mr_offset; 14022f3a57eeSAlex Bennée #endif 14032f3a57eeSAlex Bennée } 14042f3a57eeSAlex Bennée 140525d3ec58SRichard Henderson static void io_writex(CPUArchState *env, CPUTLBEntryFull *full, 1406f1be3696SRichard Henderson int mmu_idx, uint64_t val, target_ulong addr, 1407be5c4787STony Nguyen uintptr_t retaddr, MemOp op) 1408d9bb58e5SYang Zhong { 140929a0af61SRichard Henderson CPUState *cpu = env_cpu(env); 14102d54f194SPeter Maydell hwaddr mr_offset; 14112d54f194SPeter Maydell MemoryRegionSection *section; 14122d54f194SPeter Maydell MemoryRegion *mr; 141304e3aabdSPeter Maydell MemTxResult r; 1414d9bb58e5SYang Zhong 141525d3ec58SRichard Henderson section = iotlb_to_section(cpu, full->xlat_section, full->attrs); 14162d54f194SPeter Maydell mr = section->mr; 141725d3ec58SRichard Henderson mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; 141808565552SRichard Henderson if (!cpu->can_do_io) { 1419d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1420d9bb58e5SYang Zhong } 1421d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 1422d9bb58e5SYang Zhong 14232f3a57eeSAlex Bennée /* 14242f3a57eeSAlex Bennée * The memory_region_dispatch may trigger a flush/resize 14252f3a57eeSAlex Bennée * so for plugins we save the iotlb_data just in case. 14262f3a57eeSAlex Bennée */ 142737523ff7SRichard Henderson save_iotlb_data(cpu, section, mr_offset); 14282f3a57eeSAlex Bennée 142961b59fb2SRichard Henderson { 143061b59fb2SRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 143125d3ec58SRichard Henderson r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs); 143261b59fb2SRichard Henderson } 143361b59fb2SRichard Henderson 143404e3aabdSPeter Maydell if (r != MEMTX_OK) { 14352d54f194SPeter Maydell hwaddr physaddr = mr_offset + 14362d54f194SPeter Maydell section->offset_within_address_space - 14372d54f194SPeter Maydell section->offset_within_region; 14382d54f194SPeter Maydell 1439be5c4787STony Nguyen cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), 144025d3ec58SRichard Henderson MMU_DATA_STORE, mmu_idx, full->attrs, r, 1441be5c4787STony Nguyen retaddr); 144204e3aabdSPeter Maydell } 1443d9bb58e5SYang Zhong } 1444d9bb58e5SYang Zhong 1445d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied 1446d9bb58e5SYang Zhong back to the main tlb. */ 1447d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, 14480b3c75adSRichard Henderson MMUAccessType access_type, target_ulong page) 1449d9bb58e5SYang Zhong { 1450d9bb58e5SYang Zhong size_t vidx; 145171aec354SEmilio G. Cota 145229a0af61SRichard Henderson assert_cpu_is_self(env_cpu(env)); 1453d9bb58e5SYang Zhong for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 1454a40ec84eSRichard Henderson CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; 14550b3c75adSRichard Henderson target_ulong cmp = tlb_read_idx(vtlb, access_type); 1456d9bb58e5SYang Zhong 1457d9bb58e5SYang Zhong if (cmp == page) { 1458d9bb58e5SYang Zhong /* Found entry in victim tlb, swap tlb and iotlb. */ 1459a40ec84eSRichard Henderson CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; 1460d9bb58e5SYang Zhong 1461a40ec84eSRichard Henderson qemu_spin_lock(&env_tlb(env)->c.lock); 146271aec354SEmilio G. Cota copy_tlb_helper_locked(&tmptlb, tlb); 146371aec354SEmilio G. Cota copy_tlb_helper_locked(tlb, vtlb); 146471aec354SEmilio G. Cota copy_tlb_helper_locked(vtlb, &tmptlb); 1465a40ec84eSRichard Henderson qemu_spin_unlock(&env_tlb(env)->c.lock); 1466d9bb58e5SYang Zhong 146725d3ec58SRichard Henderson CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 146825d3ec58SRichard Henderson CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx]; 146925d3ec58SRichard Henderson CPUTLBEntryFull tmpf; 147025d3ec58SRichard Henderson tmpf = *f1; *f1 = *f2; *f2 = tmpf; 1471d9bb58e5SYang Zhong return true; 1472d9bb58e5SYang Zhong } 1473d9bb58e5SYang Zhong } 1474d9bb58e5SYang Zhong return false; 1475d9bb58e5SYang Zhong } 1476d9bb58e5SYang Zhong 1477707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, 147825d3ec58SRichard Henderson CPUTLBEntryFull *full, uintptr_t retaddr) 1479707526adSRichard Henderson { 148025d3ec58SRichard Henderson ram_addr_t ram_addr = mem_vaddr + full->xlat_section; 1481707526adSRichard Henderson 1482707526adSRichard Henderson trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); 1483707526adSRichard Henderson 1484707526adSRichard Henderson if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { 1485f349e92eSPhilippe Mathieu-Daudé tb_invalidate_phys_range_fast(ram_addr, size, retaddr); 1486707526adSRichard Henderson } 1487707526adSRichard Henderson 1488707526adSRichard Henderson /* 1489707526adSRichard Henderson * Set both VGA and migration bits for simplicity and to remove 1490707526adSRichard Henderson * the notdirty callback faster. 1491707526adSRichard Henderson */ 1492707526adSRichard Henderson cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); 1493707526adSRichard Henderson 1494707526adSRichard Henderson /* We remove the notdirty callback only if the code has been flushed. */ 1495707526adSRichard Henderson if (!cpu_physical_memory_is_clean(ram_addr)) { 1496707526adSRichard Henderson trace_memory_notdirty_set_dirty(mem_vaddr); 1497707526adSRichard Henderson tlb_set_dirty(cpu, mem_vaddr); 1498707526adSRichard Henderson } 1499707526adSRichard Henderson } 1500707526adSRichard Henderson 1501069cfe77SRichard Henderson static int probe_access_internal(CPUArchState *env, target_ulong addr, 1502069cfe77SRichard Henderson int fault_size, MMUAccessType access_type, 1503069cfe77SRichard Henderson int mmu_idx, bool nonfault, 1504af803a4fSRichard Henderson void **phost, CPUTLBEntryFull **pfull, 1505af803a4fSRichard Henderson uintptr_t retaddr) 1506d9bb58e5SYang Zhong { 1507383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 1508383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 15090b3c75adSRichard Henderson target_ulong tlb_addr = tlb_read_idx(entry, access_type); 15100b3c75adSRichard Henderson target_ulong page_addr = addr & TARGET_PAGE_MASK; 15110b3c75adSRichard Henderson int flags = TLB_FLAGS_MASK; 1512ca86cf32SDavid Hildenbrand 1513069cfe77SRichard Henderson if (!tlb_hit_page(tlb_addr, page_addr)) { 15140b3c75adSRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index, access_type, page_addr)) { 1515069cfe77SRichard Henderson CPUState *cs = env_cpu(env); 1516069cfe77SRichard Henderson 15178810ee2aSAlex Bennée if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, 1518069cfe77SRichard Henderson mmu_idx, nonfault, retaddr)) { 1519069cfe77SRichard Henderson /* Non-faulting page table read failed. */ 1520069cfe77SRichard Henderson *phost = NULL; 1521af803a4fSRichard Henderson *pfull = NULL; 1522069cfe77SRichard Henderson return TLB_INVALID_MASK; 1523069cfe77SRichard Henderson } 1524069cfe77SRichard Henderson 152503a98189SDavid Hildenbrand /* TLB resize via tlb_fill may have moved the entry. */ 1526af803a4fSRichard Henderson index = tlb_index(env, mmu_idx, addr); 152703a98189SDavid Hildenbrand entry = tlb_entry(env, mmu_idx, addr); 1528c3c8bf57SRichard Henderson 1529c3c8bf57SRichard Henderson /* 1530c3c8bf57SRichard Henderson * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately, 1531c3c8bf57SRichard Henderson * to force the next access through tlb_fill. We've just 1532c3c8bf57SRichard Henderson * called tlb_fill, so we know that this entry *is* valid. 1533c3c8bf57SRichard Henderson */ 1534c3c8bf57SRichard Henderson flags &= ~TLB_INVALID_MASK; 1535d9bb58e5SYang Zhong } 15360b3c75adSRichard Henderson tlb_addr = tlb_read_idx(entry, access_type); 153703a98189SDavid Hildenbrand } 1538c3c8bf57SRichard Henderson flags &= tlb_addr; 153903a98189SDavid Hildenbrand 1540af803a4fSRichard Henderson *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 1541af803a4fSRichard Henderson 1542069cfe77SRichard Henderson /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ 1543069cfe77SRichard Henderson if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { 1544069cfe77SRichard Henderson *phost = NULL; 1545069cfe77SRichard Henderson return TLB_MMIO; 1546fef39ccdSDavid Hildenbrand } 1547fef39ccdSDavid Hildenbrand 1548069cfe77SRichard Henderson /* Everything else is RAM. */ 1549069cfe77SRichard Henderson *phost = (void *)((uintptr_t)addr + entry->addend); 1550069cfe77SRichard Henderson return flags; 1551069cfe77SRichard Henderson } 1552069cfe77SRichard Henderson 1553d507e6c5SRichard Henderson int probe_access_full(CPUArchState *env, target_ulong addr, int size, 1554069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, 1555af803a4fSRichard Henderson bool nonfault, void **phost, CPUTLBEntryFull **pfull, 1556af803a4fSRichard Henderson uintptr_t retaddr) 1557069cfe77SRichard Henderson { 1558d507e6c5SRichard Henderson int flags = probe_access_internal(env, addr, size, access_type, mmu_idx, 1559af803a4fSRichard Henderson nonfault, phost, pfull, retaddr); 1560069cfe77SRichard Henderson 1561069cfe77SRichard Henderson /* Handle clean RAM pages. */ 1562069cfe77SRichard Henderson if (unlikely(flags & TLB_NOTDIRTY)) { 1563af803a4fSRichard Henderson notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr); 1564069cfe77SRichard Henderson flags &= ~TLB_NOTDIRTY; 1565069cfe77SRichard Henderson } 1566069cfe77SRichard Henderson 1567069cfe77SRichard Henderson return flags; 1568069cfe77SRichard Henderson } 1569069cfe77SRichard Henderson 15701770b2f2SDaniel Henrique Barboza int probe_access_flags(CPUArchState *env, target_ulong addr, int size, 1571af803a4fSRichard Henderson MMUAccessType access_type, int mmu_idx, 1572af803a4fSRichard Henderson bool nonfault, void **phost, uintptr_t retaddr) 1573af803a4fSRichard Henderson { 1574af803a4fSRichard Henderson CPUTLBEntryFull *full; 15751770b2f2SDaniel Henrique Barboza int flags; 1576af803a4fSRichard Henderson 15771770b2f2SDaniel Henrique Barboza g_assert(-(addr | TARGET_PAGE_MASK) >= size); 15781770b2f2SDaniel Henrique Barboza 15791770b2f2SDaniel Henrique Barboza flags = probe_access_internal(env, addr, size, access_type, mmu_idx, 1580af803a4fSRichard Henderson nonfault, phost, &full, retaddr); 15811770b2f2SDaniel Henrique Barboza 15821770b2f2SDaniel Henrique Barboza /* Handle clean RAM pages. */ 15831770b2f2SDaniel Henrique Barboza if (unlikely(flags & TLB_NOTDIRTY)) { 15841770b2f2SDaniel Henrique Barboza notdirty_write(env_cpu(env), addr, 1, full, retaddr); 15851770b2f2SDaniel Henrique Barboza flags &= ~TLB_NOTDIRTY; 15861770b2f2SDaniel Henrique Barboza } 15871770b2f2SDaniel Henrique Barboza 15881770b2f2SDaniel Henrique Barboza return flags; 1589af803a4fSRichard Henderson } 1590af803a4fSRichard Henderson 1591069cfe77SRichard Henderson void *probe_access(CPUArchState *env, target_ulong addr, int size, 1592069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1593069cfe77SRichard Henderson { 1594af803a4fSRichard Henderson CPUTLBEntryFull *full; 1595069cfe77SRichard Henderson void *host; 1596069cfe77SRichard Henderson int flags; 1597069cfe77SRichard Henderson 1598069cfe77SRichard Henderson g_assert(-(addr | TARGET_PAGE_MASK) >= size); 1599069cfe77SRichard Henderson 1600069cfe77SRichard Henderson flags = probe_access_internal(env, addr, size, access_type, mmu_idx, 1601af803a4fSRichard Henderson false, &host, &full, retaddr); 1602069cfe77SRichard Henderson 1603069cfe77SRichard Henderson /* Per the interface, size == 0 merely faults the access. */ 1604069cfe77SRichard Henderson if (size == 0) { 160573bc0bd4SRichard Henderson return NULL; 160673bc0bd4SRichard Henderson } 160773bc0bd4SRichard Henderson 1608069cfe77SRichard Henderson if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { 160903a98189SDavid Hildenbrand /* Handle watchpoints. */ 1610069cfe77SRichard Henderson if (flags & TLB_WATCHPOINT) { 1611069cfe77SRichard Henderson int wp_access = (access_type == MMU_DATA_STORE 1612069cfe77SRichard Henderson ? BP_MEM_WRITE : BP_MEM_READ); 161303a98189SDavid Hildenbrand cpu_check_watchpoint(env_cpu(env), addr, size, 161425d3ec58SRichard Henderson full->attrs, wp_access, retaddr); 1615d9bb58e5SYang Zhong } 1616fef39ccdSDavid Hildenbrand 161773bc0bd4SRichard Henderson /* Handle clean RAM pages. */ 1618069cfe77SRichard Henderson if (flags & TLB_NOTDIRTY) { 161925d3ec58SRichard Henderson notdirty_write(env_cpu(env), addr, 1, full, retaddr); 162073bc0bd4SRichard Henderson } 1621fef39ccdSDavid Hildenbrand } 1622fef39ccdSDavid Hildenbrand 1623069cfe77SRichard Henderson return host; 1624d9bb58e5SYang Zhong } 1625d9bb58e5SYang Zhong 16264811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 16274811e909SRichard Henderson MMUAccessType access_type, int mmu_idx) 16284811e909SRichard Henderson { 1629af803a4fSRichard Henderson CPUTLBEntryFull *full; 1630069cfe77SRichard Henderson void *host; 1631069cfe77SRichard Henderson int flags; 16324811e909SRichard Henderson 1633069cfe77SRichard Henderson flags = probe_access_internal(env, addr, 0, access_type, 1634af803a4fSRichard Henderson mmu_idx, true, &host, &full, 0); 1635069cfe77SRichard Henderson 1636069cfe77SRichard Henderson /* No combination of flags are expected by the caller. */ 1637069cfe77SRichard Henderson return flags ? NULL : host; 16384811e909SRichard Henderson } 16394811e909SRichard Henderson 16407e0d9973SRichard Henderson /* 16417e0d9973SRichard Henderson * Return a ram_addr_t for the virtual address for execution. 16427e0d9973SRichard Henderson * 16437e0d9973SRichard Henderson * Return -1 if we can't translate and execute from an entire page 16447e0d9973SRichard Henderson * of RAM. This will force us to execute by loading and translating 16457e0d9973SRichard Henderson * one insn at a time, without caching. 16467e0d9973SRichard Henderson * 16477e0d9973SRichard Henderson * NOTE: This function will trigger an exception if the page is 16487e0d9973SRichard Henderson * not executable. 16497e0d9973SRichard Henderson */ 16507e0d9973SRichard Henderson tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, 16517e0d9973SRichard Henderson void **hostp) 16527e0d9973SRichard Henderson { 1653af803a4fSRichard Henderson CPUTLBEntryFull *full; 16547e0d9973SRichard Henderson void *p; 16557e0d9973SRichard Henderson 16567e0d9973SRichard Henderson (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH, 1657af803a4fSRichard Henderson cpu_mmu_index(env, true), false, &p, &full, 0); 16587e0d9973SRichard Henderson if (p == NULL) { 16597e0d9973SRichard Henderson return -1; 16607e0d9973SRichard Henderson } 1661ac01ec6fSWeiwei Li 1662ac01ec6fSWeiwei Li if (full->lg_page_size < TARGET_PAGE_BITS) { 1663ac01ec6fSWeiwei Li return -1; 1664ac01ec6fSWeiwei Li } 1665ac01ec6fSWeiwei Li 16667e0d9973SRichard Henderson if (hostp) { 16677e0d9973SRichard Henderson *hostp = p; 16687e0d9973SRichard Henderson } 16697e0d9973SRichard Henderson return qemu_ram_addr_from_host_nofail(p); 16707e0d9973SRichard Henderson } 16717e0d9973SRichard Henderson 1672cdfac37bSRichard Henderson /* Load/store with atomicity primitives. */ 1673cdfac37bSRichard Henderson #include "ldst_atomicity.c.inc" 1674cdfac37bSRichard Henderson 1675235537faSAlex Bennée #ifdef CONFIG_PLUGIN 1676235537faSAlex Bennée /* 1677235537faSAlex Bennée * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. 1678235537faSAlex Bennée * This should be a hot path as we will have just looked this path up 1679235537faSAlex Bennée * in the softmmu lookup code (or helper). We don't handle re-fills or 1680235537faSAlex Bennée * checking the victim table. This is purely informational. 1681235537faSAlex Bennée * 16822f3a57eeSAlex Bennée * This almost never fails as the memory access being instrumented 16832f3a57eeSAlex Bennée * should have just filled the TLB. The one corner case is io_writex 16842f3a57eeSAlex Bennée * which can cause TLB flushes and potential resizing of the TLBs 1685570ef309SAlex Bennée * losing the information we need. In those cases we need to recover 168625d3ec58SRichard Henderson * data from a copy of the CPUTLBEntryFull. As long as this always occurs 1687570ef309SAlex Bennée * from the same thread (which a mem callback will be) this is safe. 1688235537faSAlex Bennée */ 1689235537faSAlex Bennée 1690235537faSAlex Bennée bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, 1691235537faSAlex Bennée bool is_store, struct qemu_plugin_hwaddr *data) 1692235537faSAlex Bennée { 1693235537faSAlex Bennée CPUArchState *env = cpu->env_ptr; 1694235537faSAlex Bennée CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 1695235537faSAlex Bennée uintptr_t index = tlb_index(env, mmu_idx, addr); 1696235537faSAlex Bennée target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read; 1697235537faSAlex Bennée 1698235537faSAlex Bennée if (likely(tlb_hit(tlb_addr, addr))) { 1699235537faSAlex Bennée /* We must have an iotlb entry for MMIO */ 1700235537faSAlex Bennée if (tlb_addr & TLB_MMIO) { 170125d3ec58SRichard Henderson CPUTLBEntryFull *full; 170225d3ec58SRichard Henderson full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 1703235537faSAlex Bennée data->is_io = true; 170425d3ec58SRichard Henderson data->v.io.section = 170525d3ec58SRichard Henderson iotlb_to_section(cpu, full->xlat_section, full->attrs); 170625d3ec58SRichard Henderson data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr; 1707235537faSAlex Bennée } else { 1708235537faSAlex Bennée data->is_io = false; 17092d932039SAlex Bennée data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 1710235537faSAlex Bennée } 1711235537faSAlex Bennée return true; 17122f3a57eeSAlex Bennée } else { 17132f3a57eeSAlex Bennée SavedIOTLB *saved = &cpu->saved_iotlb; 17142f3a57eeSAlex Bennée data->is_io = true; 17152f3a57eeSAlex Bennée data->v.io.section = saved->section; 17162f3a57eeSAlex Bennée data->v.io.offset = saved->mr_offset; 17172f3a57eeSAlex Bennée return true; 1718235537faSAlex Bennée } 1719235537faSAlex Bennée } 1720235537faSAlex Bennée 1721235537faSAlex Bennée #endif 1722235537faSAlex Bennée 172308dff435SRichard Henderson /* 17248cfdacaaSRichard Henderson * Probe for a load/store operation. 17258cfdacaaSRichard Henderson * Return the host address and into @flags. 17268cfdacaaSRichard Henderson */ 17278cfdacaaSRichard Henderson 17288cfdacaaSRichard Henderson typedef struct MMULookupPageData { 17298cfdacaaSRichard Henderson CPUTLBEntryFull *full; 17308cfdacaaSRichard Henderson void *haddr; 17318cfdacaaSRichard Henderson target_ulong addr; 17328cfdacaaSRichard Henderson int flags; 17338cfdacaaSRichard Henderson int size; 17348cfdacaaSRichard Henderson } MMULookupPageData; 17358cfdacaaSRichard Henderson 17368cfdacaaSRichard Henderson typedef struct MMULookupLocals { 17378cfdacaaSRichard Henderson MMULookupPageData page[2]; 17388cfdacaaSRichard Henderson MemOp memop; 17398cfdacaaSRichard Henderson int mmu_idx; 17408cfdacaaSRichard Henderson } MMULookupLocals; 17418cfdacaaSRichard Henderson 17428cfdacaaSRichard Henderson /** 17438cfdacaaSRichard Henderson * mmu_lookup1: translate one page 17448cfdacaaSRichard Henderson * @env: cpu context 17458cfdacaaSRichard Henderson * @data: lookup parameters 17468cfdacaaSRichard Henderson * @mmu_idx: virtual address context 17478cfdacaaSRichard Henderson * @access_type: load/store/code 17488cfdacaaSRichard Henderson * @ra: return address into tcg generated code, or 0 17498cfdacaaSRichard Henderson * 17508cfdacaaSRichard Henderson * Resolve the translation for the one page at @data.addr, filling in 17518cfdacaaSRichard Henderson * the rest of @data with the results. If the translation fails, 17528cfdacaaSRichard Henderson * tlb_fill will longjmp out. Return true if the softmmu tlb for 17538cfdacaaSRichard Henderson * @mmu_idx may have resized. 17548cfdacaaSRichard Henderson */ 17558cfdacaaSRichard Henderson static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data, 17568cfdacaaSRichard Henderson int mmu_idx, MMUAccessType access_type, uintptr_t ra) 17578cfdacaaSRichard Henderson { 17588cfdacaaSRichard Henderson target_ulong addr = data->addr; 17598cfdacaaSRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 17608cfdacaaSRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 17618cfdacaaSRichard Henderson target_ulong tlb_addr = tlb_read_idx(entry, access_type); 17628cfdacaaSRichard Henderson bool maybe_resized = false; 17638cfdacaaSRichard Henderson 17648cfdacaaSRichard Henderson /* If the TLB entry is for a different page, reload and try again. */ 17658cfdacaaSRichard Henderson if (!tlb_hit(tlb_addr, addr)) { 17668cfdacaaSRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index, access_type, 17678cfdacaaSRichard Henderson addr & TARGET_PAGE_MASK)) { 17688cfdacaaSRichard Henderson tlb_fill(env_cpu(env), addr, data->size, access_type, mmu_idx, ra); 17698cfdacaaSRichard Henderson maybe_resized = true; 17708cfdacaaSRichard Henderson index = tlb_index(env, mmu_idx, addr); 17718cfdacaaSRichard Henderson entry = tlb_entry(env, mmu_idx, addr); 17728cfdacaaSRichard Henderson } 17738cfdacaaSRichard Henderson tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK; 17748cfdacaaSRichard Henderson } 17758cfdacaaSRichard Henderson 17768cfdacaaSRichard Henderson data->flags = tlb_addr & TLB_FLAGS_MASK; 17778cfdacaaSRichard Henderson data->full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 17788cfdacaaSRichard Henderson /* Compute haddr speculatively; depending on flags it might be invalid. */ 17798cfdacaaSRichard Henderson data->haddr = (void *)((uintptr_t)addr + entry->addend); 17808cfdacaaSRichard Henderson 17818cfdacaaSRichard Henderson return maybe_resized; 17828cfdacaaSRichard Henderson } 17838cfdacaaSRichard Henderson 17848cfdacaaSRichard Henderson /** 17858cfdacaaSRichard Henderson * mmu_watch_or_dirty 17868cfdacaaSRichard Henderson * @env: cpu context 17878cfdacaaSRichard Henderson * @data: lookup parameters 17888cfdacaaSRichard Henderson * @access_type: load/store/code 17898cfdacaaSRichard Henderson * @ra: return address into tcg generated code, or 0 17908cfdacaaSRichard Henderson * 17918cfdacaaSRichard Henderson * Trigger watchpoints for @data.addr:@data.size; 17928cfdacaaSRichard Henderson * record writes to protected clean pages. 17938cfdacaaSRichard Henderson */ 17948cfdacaaSRichard Henderson static void mmu_watch_or_dirty(CPUArchState *env, MMULookupPageData *data, 17958cfdacaaSRichard Henderson MMUAccessType access_type, uintptr_t ra) 17968cfdacaaSRichard Henderson { 17978cfdacaaSRichard Henderson CPUTLBEntryFull *full = data->full; 17988cfdacaaSRichard Henderson target_ulong addr = data->addr; 17998cfdacaaSRichard Henderson int flags = data->flags; 18008cfdacaaSRichard Henderson int size = data->size; 18018cfdacaaSRichard Henderson 18028cfdacaaSRichard Henderson /* On watchpoint hit, this will longjmp out. */ 18038cfdacaaSRichard Henderson if (flags & TLB_WATCHPOINT) { 18048cfdacaaSRichard Henderson int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ; 18058cfdacaaSRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, full->attrs, wp, ra); 18068cfdacaaSRichard Henderson flags &= ~TLB_WATCHPOINT; 18078cfdacaaSRichard Henderson } 18088cfdacaaSRichard Henderson 18098cfdacaaSRichard Henderson /* Note that notdirty is only set for writes. */ 18108cfdacaaSRichard Henderson if (flags & TLB_NOTDIRTY) { 18118cfdacaaSRichard Henderson notdirty_write(env_cpu(env), addr, size, full, ra); 18128cfdacaaSRichard Henderson flags &= ~TLB_NOTDIRTY; 18138cfdacaaSRichard Henderson } 18148cfdacaaSRichard Henderson data->flags = flags; 18158cfdacaaSRichard Henderson } 18168cfdacaaSRichard Henderson 18178cfdacaaSRichard Henderson /** 18188cfdacaaSRichard Henderson * mmu_lookup: translate page(s) 18198cfdacaaSRichard Henderson * @env: cpu context 18208cfdacaaSRichard Henderson * @addr: virtual address 18218cfdacaaSRichard Henderson * @oi: combined mmu_idx and MemOp 18228cfdacaaSRichard Henderson * @ra: return address into tcg generated code, or 0 18238cfdacaaSRichard Henderson * @access_type: load/store/code 18248cfdacaaSRichard Henderson * @l: output result 18258cfdacaaSRichard Henderson * 18268cfdacaaSRichard Henderson * Resolve the translation for the page(s) beginning at @addr, for MemOp.size 18278cfdacaaSRichard Henderson * bytes. Return true if the lookup crosses a page boundary. 18288cfdacaaSRichard Henderson */ 18298cfdacaaSRichard Henderson static bool mmu_lookup(CPUArchState *env, target_ulong addr, MemOpIdx oi, 18308cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType type, MMULookupLocals *l) 18318cfdacaaSRichard Henderson { 18328cfdacaaSRichard Henderson unsigned a_bits; 18338cfdacaaSRichard Henderson bool crosspage; 18348cfdacaaSRichard Henderson int flags; 18358cfdacaaSRichard Henderson 18368cfdacaaSRichard Henderson l->memop = get_memop(oi); 18378cfdacaaSRichard Henderson l->mmu_idx = get_mmuidx(oi); 18388cfdacaaSRichard Henderson 18398cfdacaaSRichard Henderson tcg_debug_assert(l->mmu_idx < NB_MMU_MODES); 18408cfdacaaSRichard Henderson 18418cfdacaaSRichard Henderson /* Handle CPU specific unaligned behaviour */ 18428cfdacaaSRichard Henderson a_bits = get_alignment_bits(l->memop); 18438cfdacaaSRichard Henderson if (addr & ((1 << a_bits) - 1)) { 18448cfdacaaSRichard Henderson cpu_unaligned_access(env_cpu(env), addr, type, l->mmu_idx, ra); 18458cfdacaaSRichard Henderson } 18468cfdacaaSRichard Henderson 18478cfdacaaSRichard Henderson l->page[0].addr = addr; 18488cfdacaaSRichard Henderson l->page[0].size = memop_size(l->memop); 18498cfdacaaSRichard Henderson l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK; 18508cfdacaaSRichard Henderson l->page[1].size = 0; 18518cfdacaaSRichard Henderson crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK; 18528cfdacaaSRichard Henderson 18538cfdacaaSRichard Henderson if (likely(!crosspage)) { 18548cfdacaaSRichard Henderson mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra); 18558cfdacaaSRichard Henderson 18568cfdacaaSRichard Henderson flags = l->page[0].flags; 18578cfdacaaSRichard Henderson if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) { 18588cfdacaaSRichard Henderson mmu_watch_or_dirty(env, &l->page[0], type, ra); 18598cfdacaaSRichard Henderson } 18608cfdacaaSRichard Henderson if (unlikely(flags & TLB_BSWAP)) { 18618cfdacaaSRichard Henderson l->memop ^= MO_BSWAP; 18628cfdacaaSRichard Henderson } 18638cfdacaaSRichard Henderson } else { 18648cfdacaaSRichard Henderson /* Finish compute of page crossing. */ 18658cfdacaaSRichard Henderson int size0 = l->page[1].addr - addr; 18668cfdacaaSRichard Henderson l->page[1].size = l->page[0].size - size0; 18678cfdacaaSRichard Henderson l->page[0].size = size0; 18688cfdacaaSRichard Henderson 18698cfdacaaSRichard Henderson /* 18708cfdacaaSRichard Henderson * Lookup both pages, recognizing exceptions from either. If the 18718cfdacaaSRichard Henderson * second lookup potentially resized, refresh first CPUTLBEntryFull. 18728cfdacaaSRichard Henderson */ 18738cfdacaaSRichard Henderson mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra); 18748cfdacaaSRichard Henderson if (mmu_lookup1(env, &l->page[1], l->mmu_idx, type, ra)) { 18758cfdacaaSRichard Henderson uintptr_t index = tlb_index(env, l->mmu_idx, addr); 18768cfdacaaSRichard Henderson l->page[0].full = &env_tlb(env)->d[l->mmu_idx].fulltlb[index]; 18778cfdacaaSRichard Henderson } 18788cfdacaaSRichard Henderson 18798cfdacaaSRichard Henderson flags = l->page[0].flags | l->page[1].flags; 18808cfdacaaSRichard Henderson if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) { 18818cfdacaaSRichard Henderson mmu_watch_or_dirty(env, &l->page[0], type, ra); 18828cfdacaaSRichard Henderson mmu_watch_or_dirty(env, &l->page[1], type, ra); 18838cfdacaaSRichard Henderson } 18848cfdacaaSRichard Henderson 18858cfdacaaSRichard Henderson /* 18868cfdacaaSRichard Henderson * Since target/sparc is the only user of TLB_BSWAP, and all 18878cfdacaaSRichard Henderson * Sparc accesses are aligned, any treatment across two pages 18888cfdacaaSRichard Henderson * would be arbitrary. Refuse it until there's a use. 18898cfdacaaSRichard Henderson */ 18908cfdacaaSRichard Henderson tcg_debug_assert((flags & TLB_BSWAP) == 0); 18918cfdacaaSRichard Henderson } 18928cfdacaaSRichard Henderson 18938cfdacaaSRichard Henderson return crosspage; 18948cfdacaaSRichard Henderson } 18958cfdacaaSRichard Henderson 18968cfdacaaSRichard Henderson /* 189708dff435SRichard Henderson * Probe for an atomic operation. Do not allow unaligned operations, 189808dff435SRichard Henderson * or io operations to proceed. Return the host address. 189908dff435SRichard Henderson */ 1900d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 19017bedee32SRichard Henderson MemOpIdx oi, int size, uintptr_t retaddr) 1902d9bb58e5SYang Zhong { 1903b826044fSRichard Henderson uintptr_t mmu_idx = get_mmuidx(oi); 190414776ab5STony Nguyen MemOp mop = get_memop(oi); 1905d9bb58e5SYang Zhong int a_bits = get_alignment_bits(mop); 190608dff435SRichard Henderson uintptr_t index; 190708dff435SRichard Henderson CPUTLBEntry *tlbe; 190808dff435SRichard Henderson target_ulong tlb_addr; 190934d49937SPeter Maydell void *hostaddr; 1910417aeaffSRichard Henderson CPUTLBEntryFull *full; 1911d9bb58e5SYang Zhong 1912b826044fSRichard Henderson tcg_debug_assert(mmu_idx < NB_MMU_MODES); 1913b826044fSRichard Henderson 1914d9bb58e5SYang Zhong /* Adjust the given return address. */ 1915d9bb58e5SYang Zhong retaddr -= GETPC_ADJ; 1916d9bb58e5SYang Zhong 1917d9bb58e5SYang Zhong /* Enforce guest required alignment. */ 1918d9bb58e5SYang Zhong if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 1919d9bb58e5SYang Zhong /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 192029a0af61SRichard Henderson cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 1921d9bb58e5SYang Zhong mmu_idx, retaddr); 1922d9bb58e5SYang Zhong } 1923d9bb58e5SYang Zhong 1924d9bb58e5SYang Zhong /* Enforce qemu required alignment. */ 192508dff435SRichard Henderson if (unlikely(addr & (size - 1))) { 1926d9bb58e5SYang Zhong /* We get here if guest alignment was not requested, 1927d9bb58e5SYang Zhong or was not enforced by cpu_unaligned_access above. 1928d9bb58e5SYang Zhong We might widen the access and emulate, but for now 1929d9bb58e5SYang Zhong mark an exception and exit the cpu loop. */ 1930d9bb58e5SYang Zhong goto stop_the_world; 1931d9bb58e5SYang Zhong } 1932d9bb58e5SYang Zhong 193308dff435SRichard Henderson index = tlb_index(env, mmu_idx, addr); 193408dff435SRichard Henderson tlbe = tlb_entry(env, mmu_idx, addr); 193508dff435SRichard Henderson 1936d9bb58e5SYang Zhong /* Check TLB entry and enforce page permissions. */ 193708dff435SRichard Henderson tlb_addr = tlb_addr_write(tlbe); 1938334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 19390b3c75adSRichard Henderson if (!victim_tlb_hit(env, mmu_idx, index, MMU_DATA_STORE, 19400b3c75adSRichard Henderson addr & TARGET_PAGE_MASK)) { 194108dff435SRichard Henderson tlb_fill(env_cpu(env), addr, size, 194208dff435SRichard Henderson MMU_DATA_STORE, mmu_idx, retaddr); 19436d967cb8SEmilio G. Cota index = tlb_index(env, mmu_idx, addr); 19446d967cb8SEmilio G. Cota tlbe = tlb_entry(env, mmu_idx, addr); 1945d9bb58e5SYang Zhong } 1946403f290cSEmilio G. Cota tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; 1947d9bb58e5SYang Zhong } 1948d9bb58e5SYang Zhong 1949417aeaffSRichard Henderson /* 1950417aeaffSRichard Henderson * Let the guest notice RMW on a write-only page. 1951417aeaffSRichard Henderson * We have just verified that the page is writable. 1952417aeaffSRichard Henderson * Subpage lookups may have left TLB_INVALID_MASK set, 1953417aeaffSRichard Henderson * but addr_read will only be -1 if PAGE_READ was unset. 1954417aeaffSRichard Henderson */ 1955417aeaffSRichard Henderson if (unlikely(tlbe->addr_read == -1)) { 19567bedee32SRichard Henderson tlb_fill(env_cpu(env), addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); 195708dff435SRichard Henderson /* 1958417aeaffSRichard Henderson * Since we don't support reads and writes to different 1959417aeaffSRichard Henderson * addresses, and we do have the proper page loaded for 1960417aeaffSRichard Henderson * write, this shouldn't ever return. But just in case, 1961417aeaffSRichard Henderson * handle via stop-the-world. 196208dff435SRichard Henderson */ 196308dff435SRichard Henderson goto stop_the_world; 196408dff435SRichard Henderson } 1965417aeaffSRichard Henderson /* Collect TLB_WATCHPOINT for read. */ 1966417aeaffSRichard Henderson tlb_addr |= tlbe->addr_read; 196708dff435SRichard Henderson 196855df6fcfSPeter Maydell /* Notice an IO access or a needs-MMU-lookup access */ 19690953674eSRichard Henderson if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) { 1970d9bb58e5SYang Zhong /* There's really nothing that can be done to 1971d9bb58e5SYang Zhong support this apart from stop-the-world. */ 1972d9bb58e5SYang Zhong goto stop_the_world; 1973d9bb58e5SYang Zhong } 1974d9bb58e5SYang Zhong 197534d49937SPeter Maydell hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 1976417aeaffSRichard Henderson full = &env_tlb(env)->d[mmu_idx].fulltlb[index]; 197734d49937SPeter Maydell 197834d49937SPeter Maydell if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 1979417aeaffSRichard Henderson notdirty_write(env_cpu(env), addr, size, full, retaddr); 1980417aeaffSRichard Henderson } 1981417aeaffSRichard Henderson 1982417aeaffSRichard Henderson if (unlikely(tlb_addr & TLB_WATCHPOINT)) { 19837bedee32SRichard Henderson cpu_check_watchpoint(env_cpu(env), addr, size, full->attrs, 19847bedee32SRichard Henderson BP_MEM_READ | BP_MEM_WRITE, retaddr); 198534d49937SPeter Maydell } 198634d49937SPeter Maydell 198734d49937SPeter Maydell return hostaddr; 1988d9bb58e5SYang Zhong 1989d9bb58e5SYang Zhong stop_the_world: 199029a0af61SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), retaddr); 1991d9bb58e5SYang Zhong } 1992d9bb58e5SYang Zhong 1993eed56642SAlex Bennée /* 1994eed56642SAlex Bennée * Load Helpers 1995eed56642SAlex Bennée * 1996eed56642SAlex Bennée * We support two different access types. SOFTMMU_CODE_ACCESS is 1997eed56642SAlex Bennée * specifically for reading instructions from system memory. It is 1998eed56642SAlex Bennée * called by the translation loop and in some helpers where the code 1999eed56642SAlex Bennée * is disassembled. It shouldn't be called directly by guest code. 2000cdfac37bSRichard Henderson * 2001eed56642SAlex Bennée * For the benefit of TCG generated code, we want to avoid the 2002eed56642SAlex Bennée * complication of ABI-specific return type promotion and always 2003eed56642SAlex Bennée * return a value extended to the register size of the host. This is 2004eed56642SAlex Bennée * tcg_target_long, except in the case of a 32-bit host and 64-bit 2005eed56642SAlex Bennée * data, and for that we always have uint64_t. 2006eed56642SAlex Bennée * 2007eed56642SAlex Bennée * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. 2008eed56642SAlex Bennée */ 2009eed56642SAlex Bennée 20108cfdacaaSRichard Henderson /** 20118cfdacaaSRichard Henderson * do_ld_mmio_beN: 20128cfdacaaSRichard Henderson * @env: cpu context 20138cfdacaaSRichard Henderson * @p: translation parameters 20148cfdacaaSRichard Henderson * @ret_be: accumulated data 20158cfdacaaSRichard Henderson * @mmu_idx: virtual address context 20168cfdacaaSRichard Henderson * @ra: return address into tcg generated code, or 0 20178cfdacaaSRichard Henderson * 20188cfdacaaSRichard Henderson * Load @p->size bytes from @p->addr, which is memory-mapped i/o. 20198cfdacaaSRichard Henderson * The bytes are concatenated in big-endian order with @ret_be. 20208cfdacaaSRichard Henderson */ 20218cfdacaaSRichard Henderson static uint64_t do_ld_mmio_beN(CPUArchState *env, MMULookupPageData *p, 20228cfdacaaSRichard Henderson uint64_t ret_be, int mmu_idx, 20238cfdacaaSRichard Henderson MMUAccessType type, uintptr_t ra) 20242dd92606SRichard Henderson { 20258cfdacaaSRichard Henderson CPUTLBEntryFull *full = p->full; 20268cfdacaaSRichard Henderson target_ulong addr = p->addr; 20278cfdacaaSRichard Henderson int i, size = p->size; 20288cfdacaaSRichard Henderson 20298cfdacaaSRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 20308cfdacaaSRichard Henderson for (i = 0; i < size; i++) { 20318cfdacaaSRichard Henderson uint8_t x = io_readx(env, full, mmu_idx, addr + i, ra, type, MO_UB); 20328cfdacaaSRichard Henderson ret_be = (ret_be << 8) | x; 20338cfdacaaSRichard Henderson } 20348cfdacaaSRichard Henderson return ret_be; 20358cfdacaaSRichard Henderson } 20368cfdacaaSRichard Henderson 20378cfdacaaSRichard Henderson /** 20388cfdacaaSRichard Henderson * do_ld_bytes_beN 20398cfdacaaSRichard Henderson * @p: translation parameters 20408cfdacaaSRichard Henderson * @ret_be: accumulated data 20418cfdacaaSRichard Henderson * 20428cfdacaaSRichard Henderson * Load @p->size bytes from @p->haddr, which is RAM. 20438cfdacaaSRichard Henderson * The bytes to concatenated in big-endian order with @ret_be. 20448cfdacaaSRichard Henderson */ 20458cfdacaaSRichard Henderson static uint64_t do_ld_bytes_beN(MMULookupPageData *p, uint64_t ret_be) 20468cfdacaaSRichard Henderson { 20478cfdacaaSRichard Henderson uint8_t *haddr = p->haddr; 20488cfdacaaSRichard Henderson int i, size = p->size; 20498cfdacaaSRichard Henderson 20508cfdacaaSRichard Henderson for (i = 0; i < size; i++) { 20518cfdacaaSRichard Henderson ret_be = (ret_be << 8) | haddr[i]; 20528cfdacaaSRichard Henderson } 20538cfdacaaSRichard Henderson return ret_be; 20548cfdacaaSRichard Henderson } 20558cfdacaaSRichard Henderson 2056cdfac37bSRichard Henderson /** 2057cdfac37bSRichard Henderson * do_ld_parts_beN 2058cdfac37bSRichard Henderson * @p: translation parameters 2059cdfac37bSRichard Henderson * @ret_be: accumulated data 2060cdfac37bSRichard Henderson * 2061cdfac37bSRichard Henderson * As do_ld_bytes_beN, but atomically on each aligned part. 2062cdfac37bSRichard Henderson */ 2063cdfac37bSRichard Henderson static uint64_t do_ld_parts_beN(MMULookupPageData *p, uint64_t ret_be) 2064cdfac37bSRichard Henderson { 2065cdfac37bSRichard Henderson void *haddr = p->haddr; 2066cdfac37bSRichard Henderson int size = p->size; 2067cdfac37bSRichard Henderson 2068cdfac37bSRichard Henderson do { 2069cdfac37bSRichard Henderson uint64_t x; 2070cdfac37bSRichard Henderson int n; 2071cdfac37bSRichard Henderson 2072cdfac37bSRichard Henderson /* 2073cdfac37bSRichard Henderson * Find minimum of alignment and size. 2074cdfac37bSRichard Henderson * This is slightly stronger than required by MO_ATOM_SUBALIGN, which 2075cdfac37bSRichard Henderson * would have only checked the low bits of addr|size once at the start, 2076cdfac37bSRichard Henderson * but is just as easy. 2077cdfac37bSRichard Henderson */ 2078cdfac37bSRichard Henderson switch (((uintptr_t)haddr | size) & 7) { 2079cdfac37bSRichard Henderson case 4: 2080cdfac37bSRichard Henderson x = cpu_to_be32(load_atomic4(haddr)); 2081cdfac37bSRichard Henderson ret_be = (ret_be << 32) | x; 2082cdfac37bSRichard Henderson n = 4; 2083cdfac37bSRichard Henderson break; 2084cdfac37bSRichard Henderson case 2: 2085cdfac37bSRichard Henderson case 6: 2086cdfac37bSRichard Henderson x = cpu_to_be16(load_atomic2(haddr)); 2087cdfac37bSRichard Henderson ret_be = (ret_be << 16) | x; 2088cdfac37bSRichard Henderson n = 2; 2089cdfac37bSRichard Henderson break; 2090cdfac37bSRichard Henderson default: 2091cdfac37bSRichard Henderson x = *(uint8_t *)haddr; 2092cdfac37bSRichard Henderson ret_be = (ret_be << 8) | x; 2093cdfac37bSRichard Henderson n = 1; 2094cdfac37bSRichard Henderson break; 2095cdfac37bSRichard Henderson case 0: 2096cdfac37bSRichard Henderson g_assert_not_reached(); 2097cdfac37bSRichard Henderson } 2098cdfac37bSRichard Henderson haddr += n; 2099cdfac37bSRichard Henderson size -= n; 2100cdfac37bSRichard Henderson } while (size != 0); 2101cdfac37bSRichard Henderson return ret_be; 2102cdfac37bSRichard Henderson } 2103cdfac37bSRichard Henderson 2104cdfac37bSRichard Henderson /** 2105cdfac37bSRichard Henderson * do_ld_parts_be4 2106cdfac37bSRichard Henderson * @p: translation parameters 2107cdfac37bSRichard Henderson * @ret_be: accumulated data 2108cdfac37bSRichard Henderson * 2109cdfac37bSRichard Henderson * As do_ld_bytes_beN, but with one atomic load. 2110cdfac37bSRichard Henderson * Four aligned bytes are guaranteed to cover the load. 2111cdfac37bSRichard Henderson */ 2112cdfac37bSRichard Henderson static uint64_t do_ld_whole_be4(MMULookupPageData *p, uint64_t ret_be) 2113cdfac37bSRichard Henderson { 2114cdfac37bSRichard Henderson int o = p->addr & 3; 2115cdfac37bSRichard Henderson uint32_t x = load_atomic4(p->haddr - o); 2116cdfac37bSRichard Henderson 2117cdfac37bSRichard Henderson x = cpu_to_be32(x); 2118cdfac37bSRichard Henderson x <<= o * 8; 2119cdfac37bSRichard Henderson x >>= (4 - p->size) * 8; 2120cdfac37bSRichard Henderson return (ret_be << (p->size * 8)) | x; 2121cdfac37bSRichard Henderson } 2122cdfac37bSRichard Henderson 2123cdfac37bSRichard Henderson /** 2124cdfac37bSRichard Henderson * do_ld_parts_be8 2125cdfac37bSRichard Henderson * @p: translation parameters 2126cdfac37bSRichard Henderson * @ret_be: accumulated data 2127cdfac37bSRichard Henderson * 2128cdfac37bSRichard Henderson * As do_ld_bytes_beN, but with one atomic load. 2129cdfac37bSRichard Henderson * Eight aligned bytes are guaranteed to cover the load. 2130cdfac37bSRichard Henderson */ 2131cdfac37bSRichard Henderson static uint64_t do_ld_whole_be8(CPUArchState *env, uintptr_t ra, 2132cdfac37bSRichard Henderson MMULookupPageData *p, uint64_t ret_be) 2133cdfac37bSRichard Henderson { 2134cdfac37bSRichard Henderson int o = p->addr & 7; 2135cdfac37bSRichard Henderson uint64_t x = load_atomic8_or_exit(env, ra, p->haddr - o); 2136cdfac37bSRichard Henderson 2137cdfac37bSRichard Henderson x = cpu_to_be64(x); 2138cdfac37bSRichard Henderson x <<= o * 8; 2139cdfac37bSRichard Henderson x >>= (8 - p->size) * 8; 2140cdfac37bSRichard Henderson return (ret_be << (p->size * 8)) | x; 2141cdfac37bSRichard Henderson } 2142cdfac37bSRichard Henderson 214335c653c4SRichard Henderson /** 214435c653c4SRichard Henderson * do_ld_parts_be16 214535c653c4SRichard Henderson * @p: translation parameters 214635c653c4SRichard Henderson * @ret_be: accumulated data 214735c653c4SRichard Henderson * 214835c653c4SRichard Henderson * As do_ld_bytes_beN, but with one atomic load. 214935c653c4SRichard Henderson * 16 aligned bytes are guaranteed to cover the load. 215035c653c4SRichard Henderson */ 215135c653c4SRichard Henderson static Int128 do_ld_whole_be16(CPUArchState *env, uintptr_t ra, 215235c653c4SRichard Henderson MMULookupPageData *p, uint64_t ret_be) 215335c653c4SRichard Henderson { 215435c653c4SRichard Henderson int o = p->addr & 15; 215535c653c4SRichard Henderson Int128 x, y = load_atomic16_or_exit(env, ra, p->haddr - o); 215635c653c4SRichard Henderson int size = p->size; 215735c653c4SRichard Henderson 215835c653c4SRichard Henderson if (!HOST_BIG_ENDIAN) { 215935c653c4SRichard Henderson y = bswap128(y); 216035c653c4SRichard Henderson } 216135c653c4SRichard Henderson y = int128_lshift(y, o * 8); 216235c653c4SRichard Henderson y = int128_urshift(y, (16 - size) * 8); 216335c653c4SRichard Henderson x = int128_make64(ret_be); 216435c653c4SRichard Henderson x = int128_lshift(x, size * 8); 216535c653c4SRichard Henderson return int128_or(x, y); 216635c653c4SRichard Henderson } 216735c653c4SRichard Henderson 21688cfdacaaSRichard Henderson /* 21698cfdacaaSRichard Henderson * Wrapper for the above. 21708cfdacaaSRichard Henderson */ 21718cfdacaaSRichard Henderson static uint64_t do_ld_beN(CPUArchState *env, MMULookupPageData *p, 2172cdfac37bSRichard Henderson uint64_t ret_be, int mmu_idx, MMUAccessType type, 2173cdfac37bSRichard Henderson MemOp mop, uintptr_t ra) 21748cfdacaaSRichard Henderson { 2175cdfac37bSRichard Henderson MemOp atom; 2176cdfac37bSRichard Henderson unsigned tmp, half_size; 2177cdfac37bSRichard Henderson 21788cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 21798cfdacaaSRichard Henderson return do_ld_mmio_beN(env, p, ret_be, mmu_idx, type, ra); 2180cdfac37bSRichard Henderson } 2181cdfac37bSRichard Henderson 2182cdfac37bSRichard Henderson /* 2183cdfac37bSRichard Henderson * It is a given that we cross a page and therefore there is no 2184cdfac37bSRichard Henderson * atomicity for the load as a whole, but subobjects may need attention. 2185cdfac37bSRichard Henderson */ 2186cdfac37bSRichard Henderson atom = mop & MO_ATOM_MASK; 2187cdfac37bSRichard Henderson switch (atom) { 2188cdfac37bSRichard Henderson case MO_ATOM_SUBALIGN: 2189cdfac37bSRichard Henderson return do_ld_parts_beN(p, ret_be); 2190cdfac37bSRichard Henderson 2191cdfac37bSRichard Henderson case MO_ATOM_IFALIGN_PAIR: 2192cdfac37bSRichard Henderson case MO_ATOM_WITHIN16_PAIR: 2193cdfac37bSRichard Henderson tmp = mop & MO_SIZE; 2194cdfac37bSRichard Henderson tmp = tmp ? tmp - 1 : 0; 2195cdfac37bSRichard Henderson half_size = 1 << tmp; 2196cdfac37bSRichard Henderson if (atom == MO_ATOM_IFALIGN_PAIR 2197cdfac37bSRichard Henderson ? p->size == half_size 2198cdfac37bSRichard Henderson : p->size >= half_size) { 2199cdfac37bSRichard Henderson if (!HAVE_al8_fast && p->size < 4) { 2200cdfac37bSRichard Henderson return do_ld_whole_be4(p, ret_be); 22018cfdacaaSRichard Henderson } else { 2202cdfac37bSRichard Henderson return do_ld_whole_be8(env, ra, p, ret_be); 2203cdfac37bSRichard Henderson } 2204cdfac37bSRichard Henderson } 2205cdfac37bSRichard Henderson /* fall through */ 2206cdfac37bSRichard Henderson 2207cdfac37bSRichard Henderson case MO_ATOM_IFALIGN: 2208cdfac37bSRichard Henderson case MO_ATOM_WITHIN16: 2209cdfac37bSRichard Henderson case MO_ATOM_NONE: 22108cfdacaaSRichard Henderson return do_ld_bytes_beN(p, ret_be); 2211cdfac37bSRichard Henderson 2212cdfac37bSRichard Henderson default: 2213cdfac37bSRichard Henderson g_assert_not_reached(); 22148cfdacaaSRichard Henderson } 22158cfdacaaSRichard Henderson } 22168cfdacaaSRichard Henderson 221735c653c4SRichard Henderson /* 221835c653c4SRichard Henderson * Wrapper for the above, for 8 < size < 16. 221935c653c4SRichard Henderson */ 222035c653c4SRichard Henderson static Int128 do_ld16_beN(CPUArchState *env, MMULookupPageData *p, 222135c653c4SRichard Henderson uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra) 222235c653c4SRichard Henderson { 222335c653c4SRichard Henderson int size = p->size; 222435c653c4SRichard Henderson uint64_t b; 222535c653c4SRichard Henderson MemOp atom; 222635c653c4SRichard Henderson 222735c653c4SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 222835c653c4SRichard Henderson p->size = size - 8; 222935c653c4SRichard Henderson a = do_ld_mmio_beN(env, p, a, mmu_idx, MMU_DATA_LOAD, ra); 223035c653c4SRichard Henderson p->addr += p->size; 223135c653c4SRichard Henderson p->size = 8; 223235c653c4SRichard Henderson b = do_ld_mmio_beN(env, p, 0, mmu_idx, MMU_DATA_LOAD, ra); 223335c653c4SRichard Henderson return int128_make128(b, a); 223435c653c4SRichard Henderson } 223535c653c4SRichard Henderson 223635c653c4SRichard Henderson /* 223735c653c4SRichard Henderson * It is a given that we cross a page and therefore there is no 223835c653c4SRichard Henderson * atomicity for the load as a whole, but subobjects may need attention. 223935c653c4SRichard Henderson */ 224035c653c4SRichard Henderson atom = mop & MO_ATOM_MASK; 224135c653c4SRichard Henderson switch (atom) { 224235c653c4SRichard Henderson case MO_ATOM_SUBALIGN: 224335c653c4SRichard Henderson p->size = size - 8; 224435c653c4SRichard Henderson a = do_ld_parts_beN(p, a); 224535c653c4SRichard Henderson p->haddr += size - 8; 224635c653c4SRichard Henderson p->size = 8; 224735c653c4SRichard Henderson b = do_ld_parts_beN(p, 0); 224835c653c4SRichard Henderson break; 224935c653c4SRichard Henderson 225035c653c4SRichard Henderson case MO_ATOM_WITHIN16_PAIR: 225135c653c4SRichard Henderson /* Since size > 8, this is the half that must be atomic. */ 225235c653c4SRichard Henderson return do_ld_whole_be16(env, ra, p, a); 225335c653c4SRichard Henderson 225435c653c4SRichard Henderson case MO_ATOM_IFALIGN_PAIR: 225535c653c4SRichard Henderson /* 225635c653c4SRichard Henderson * Since size > 8, both halves are misaligned, 225735c653c4SRichard Henderson * and so neither is atomic. 225835c653c4SRichard Henderson */ 225935c653c4SRichard Henderson case MO_ATOM_IFALIGN: 226035c653c4SRichard Henderson case MO_ATOM_WITHIN16: 226135c653c4SRichard Henderson case MO_ATOM_NONE: 226235c653c4SRichard Henderson p->size = size - 8; 226335c653c4SRichard Henderson a = do_ld_bytes_beN(p, a); 226435c653c4SRichard Henderson b = ldq_be_p(p->haddr + size - 8); 226535c653c4SRichard Henderson break; 226635c653c4SRichard Henderson 226735c653c4SRichard Henderson default: 226835c653c4SRichard Henderson g_assert_not_reached(); 226935c653c4SRichard Henderson } 227035c653c4SRichard Henderson 227135c653c4SRichard Henderson return int128_make128(b, a); 227235c653c4SRichard Henderson } 227335c653c4SRichard Henderson 22748cfdacaaSRichard Henderson static uint8_t do_ld_1(CPUArchState *env, MMULookupPageData *p, int mmu_idx, 22758cfdacaaSRichard Henderson MMUAccessType type, uintptr_t ra) 22768cfdacaaSRichard Henderson { 22778cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 22788cfdacaaSRichard Henderson return io_readx(env, p->full, mmu_idx, p->addr, ra, type, MO_UB); 22798cfdacaaSRichard Henderson } else { 22808cfdacaaSRichard Henderson return *(uint8_t *)p->haddr; 22818cfdacaaSRichard Henderson } 22828cfdacaaSRichard Henderson } 22838cfdacaaSRichard Henderson 22848cfdacaaSRichard Henderson static uint16_t do_ld_2(CPUArchState *env, MMULookupPageData *p, int mmu_idx, 22858cfdacaaSRichard Henderson MMUAccessType type, MemOp memop, uintptr_t ra) 22868cfdacaaSRichard Henderson { 22878cfdacaaSRichard Henderson uint64_t ret; 22888cfdacaaSRichard Henderson 22898cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 22908cfdacaaSRichard Henderson return io_readx(env, p->full, mmu_idx, p->addr, ra, type, memop); 22918cfdacaaSRichard Henderson } 22928cfdacaaSRichard Henderson 22938cfdacaaSRichard Henderson /* Perform the load host endian, then swap if necessary. */ 2294cdfac37bSRichard Henderson ret = load_atom_2(env, ra, p->haddr, memop); 22958cfdacaaSRichard Henderson if (memop & MO_BSWAP) { 22968cfdacaaSRichard Henderson ret = bswap16(ret); 22978cfdacaaSRichard Henderson } 22988cfdacaaSRichard Henderson return ret; 22998cfdacaaSRichard Henderson } 23008cfdacaaSRichard Henderson 23018cfdacaaSRichard Henderson static uint32_t do_ld_4(CPUArchState *env, MMULookupPageData *p, int mmu_idx, 23028cfdacaaSRichard Henderson MMUAccessType type, MemOp memop, uintptr_t ra) 23038cfdacaaSRichard Henderson { 23048cfdacaaSRichard Henderson uint32_t ret; 23058cfdacaaSRichard Henderson 23068cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 23078cfdacaaSRichard Henderson return io_readx(env, p->full, mmu_idx, p->addr, ra, type, memop); 23088cfdacaaSRichard Henderson } 23098cfdacaaSRichard Henderson 23108cfdacaaSRichard Henderson /* Perform the load host endian. */ 2311cdfac37bSRichard Henderson ret = load_atom_4(env, ra, p->haddr, memop); 23128cfdacaaSRichard Henderson if (memop & MO_BSWAP) { 23138cfdacaaSRichard Henderson ret = bswap32(ret); 23148cfdacaaSRichard Henderson } 23158cfdacaaSRichard Henderson return ret; 23168cfdacaaSRichard Henderson } 23178cfdacaaSRichard Henderson 23188cfdacaaSRichard Henderson static uint64_t do_ld_8(CPUArchState *env, MMULookupPageData *p, int mmu_idx, 23198cfdacaaSRichard Henderson MMUAccessType type, MemOp memop, uintptr_t ra) 23208cfdacaaSRichard Henderson { 23218cfdacaaSRichard Henderson uint64_t ret; 23228cfdacaaSRichard Henderson 23238cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 23248cfdacaaSRichard Henderson return io_readx(env, p->full, mmu_idx, p->addr, ra, type, memop); 23258cfdacaaSRichard Henderson } 23268cfdacaaSRichard Henderson 23278cfdacaaSRichard Henderson /* Perform the load host endian. */ 2328cdfac37bSRichard Henderson ret = load_atom_8(env, ra, p->haddr, memop); 23298cfdacaaSRichard Henderson if (memop & MO_BSWAP) { 23308cfdacaaSRichard Henderson ret = bswap64(ret); 23318cfdacaaSRichard Henderson } 23328cfdacaaSRichard Henderson return ret; 23338cfdacaaSRichard Henderson } 23348cfdacaaSRichard Henderson 23358cfdacaaSRichard Henderson static uint8_t do_ld1_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, 23368cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType access_type) 23378cfdacaaSRichard Henderson { 23388cfdacaaSRichard Henderson MMULookupLocals l; 23398cfdacaaSRichard Henderson bool crosspage; 23408cfdacaaSRichard Henderson 23418cfdacaaSRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l); 23428cfdacaaSRichard Henderson tcg_debug_assert(!crosspage); 23438cfdacaaSRichard Henderson 23448cfdacaaSRichard Henderson return do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra); 23452dd92606SRichard Henderson } 23462dd92606SRichard Henderson 234724e46e6cSRichard Henderson tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr, 23489002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2349eed56642SAlex Bennée { 23500cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8); 23518cfdacaaSRichard Henderson return do_ld1_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); 23522dd92606SRichard Henderson } 23532dd92606SRichard Henderson 23548cfdacaaSRichard Henderson static uint16_t do_ld2_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, 23558cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType access_type) 23562dd92606SRichard Henderson { 23578cfdacaaSRichard Henderson MMULookupLocals l; 23588cfdacaaSRichard Henderson bool crosspage; 23598cfdacaaSRichard Henderson uint16_t ret; 23608cfdacaaSRichard Henderson uint8_t a, b; 23618cfdacaaSRichard Henderson 23628cfdacaaSRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l); 23638cfdacaaSRichard Henderson if (likely(!crosspage)) { 23648cfdacaaSRichard Henderson return do_ld_2(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra); 23658cfdacaaSRichard Henderson } 23668cfdacaaSRichard Henderson 23678cfdacaaSRichard Henderson a = do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra); 23688cfdacaaSRichard Henderson b = do_ld_1(env, &l.page[1], l.mmu_idx, access_type, ra); 23698cfdacaaSRichard Henderson 23708cfdacaaSRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 23718cfdacaaSRichard Henderson ret = a | (b << 8); 23728cfdacaaSRichard Henderson } else { 23738cfdacaaSRichard Henderson ret = b | (a << 8); 23748cfdacaaSRichard Henderson } 23758cfdacaaSRichard Henderson return ret; 2376eed56642SAlex Bennée } 2377eed56642SAlex Bennée 237824e46e6cSRichard Henderson tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr, 23799002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2380eed56642SAlex Bennée { 23810cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); 23828cfdacaaSRichard Henderson return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); 23832dd92606SRichard Henderson } 23842dd92606SRichard Henderson 23858cfdacaaSRichard Henderson static uint32_t do_ld4_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, 23868cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType access_type) 23872dd92606SRichard Henderson { 23888cfdacaaSRichard Henderson MMULookupLocals l; 23898cfdacaaSRichard Henderson bool crosspage; 23908cfdacaaSRichard Henderson uint32_t ret; 23918cfdacaaSRichard Henderson 23928cfdacaaSRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l); 23938cfdacaaSRichard Henderson if (likely(!crosspage)) { 23948cfdacaaSRichard Henderson return do_ld_4(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra); 23958cfdacaaSRichard Henderson } 23968cfdacaaSRichard Henderson 2397cdfac37bSRichard Henderson ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra); 2398cdfac37bSRichard Henderson ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra); 23998cfdacaaSRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 24008cfdacaaSRichard Henderson ret = bswap32(ret); 24018cfdacaaSRichard Henderson } 24028cfdacaaSRichard Henderson return ret; 2403eed56642SAlex Bennée } 2404eed56642SAlex Bennée 240524e46e6cSRichard Henderson tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr, 24069002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2407eed56642SAlex Bennée { 24080cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); 24098cfdacaaSRichard Henderson return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); 24108cfdacaaSRichard Henderson } 24118cfdacaaSRichard Henderson 24128cfdacaaSRichard Henderson static uint64_t do_ld8_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi, 24138cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType access_type) 24148cfdacaaSRichard Henderson { 24158cfdacaaSRichard Henderson MMULookupLocals l; 24168cfdacaaSRichard Henderson bool crosspage; 24178cfdacaaSRichard Henderson uint64_t ret; 24188cfdacaaSRichard Henderson 24198cfdacaaSRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l); 24208cfdacaaSRichard Henderson if (likely(!crosspage)) { 24218cfdacaaSRichard Henderson return do_ld_8(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra); 24228cfdacaaSRichard Henderson } 24238cfdacaaSRichard Henderson 2424cdfac37bSRichard Henderson ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra); 2425cdfac37bSRichard Henderson ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra); 24268cfdacaaSRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 24278cfdacaaSRichard Henderson ret = bswap64(ret); 24288cfdacaaSRichard Henderson } 24298cfdacaaSRichard Henderson return ret; 2430eed56642SAlex Bennée } 2431eed56642SAlex Bennée 243224e46e6cSRichard Henderson uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr, 24339002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2434eed56642SAlex Bennée { 24350cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); 24368cfdacaaSRichard Henderson return do_ld8_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD); 2437eed56642SAlex Bennée } 2438eed56642SAlex Bennée 2439eed56642SAlex Bennée /* 2440eed56642SAlex Bennée * Provide signed versions of the load routines as well. We can of course 2441eed56642SAlex Bennée * avoid this for 64-bit data, or for 32-bit data on 32-bit host. 2442eed56642SAlex Bennée */ 2443eed56642SAlex Bennée 244424e46e6cSRichard Henderson tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr, 24459002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2446eed56642SAlex Bennée { 24470cadc1edSRichard Henderson return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr); 2448eed56642SAlex Bennée } 2449eed56642SAlex Bennée 245024e46e6cSRichard Henderson tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr, 24519002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2452eed56642SAlex Bennée { 24530cadc1edSRichard Henderson return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr); 2454eed56642SAlex Bennée } 2455eed56642SAlex Bennée 245624e46e6cSRichard Henderson tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr, 24579002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2458eed56642SAlex Bennée { 24590cadc1edSRichard Henderson return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr); 2460eed56642SAlex Bennée } 2461eed56642SAlex Bennée 246235c653c4SRichard Henderson static Int128 do_ld16_mmu(CPUArchState *env, target_ulong addr, 246335c653c4SRichard Henderson MemOpIdx oi, uintptr_t ra) 246435c653c4SRichard Henderson { 246535c653c4SRichard Henderson MMULookupLocals l; 246635c653c4SRichard Henderson bool crosspage; 246735c653c4SRichard Henderson uint64_t a, b; 246835c653c4SRichard Henderson Int128 ret; 246935c653c4SRichard Henderson int first; 247035c653c4SRichard Henderson 247135c653c4SRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l); 247235c653c4SRichard Henderson if (likely(!crosspage)) { 247335c653c4SRichard Henderson /* Perform the load host endian. */ 247435c653c4SRichard Henderson if (unlikely(l.page[0].flags & TLB_MMIO)) { 247535c653c4SRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 247635c653c4SRichard Henderson a = io_readx(env, l.page[0].full, l.mmu_idx, addr, 247735c653c4SRichard Henderson ra, MMU_DATA_LOAD, MO_64); 247835c653c4SRichard Henderson b = io_readx(env, l.page[0].full, l.mmu_idx, addr + 8, 247935c653c4SRichard Henderson ra, MMU_DATA_LOAD, MO_64); 248035c653c4SRichard Henderson ret = int128_make128(HOST_BIG_ENDIAN ? b : a, 248135c653c4SRichard Henderson HOST_BIG_ENDIAN ? a : b); 248235c653c4SRichard Henderson } else { 248335c653c4SRichard Henderson ret = load_atom_16(env, ra, l.page[0].haddr, l.memop); 248435c653c4SRichard Henderson } 248535c653c4SRichard Henderson if (l.memop & MO_BSWAP) { 248635c653c4SRichard Henderson ret = bswap128(ret); 248735c653c4SRichard Henderson } 248835c653c4SRichard Henderson return ret; 248935c653c4SRichard Henderson } 249035c653c4SRichard Henderson 249135c653c4SRichard Henderson first = l.page[0].size; 249235c653c4SRichard Henderson if (first == 8) { 249335c653c4SRichard Henderson MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64; 249435c653c4SRichard Henderson 249535c653c4SRichard Henderson a = do_ld_8(env, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra); 249635c653c4SRichard Henderson b = do_ld_8(env, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra); 249735c653c4SRichard Henderson if ((mop8 & MO_BSWAP) == MO_LE) { 249835c653c4SRichard Henderson ret = int128_make128(a, b); 249935c653c4SRichard Henderson } else { 250035c653c4SRichard Henderson ret = int128_make128(b, a); 250135c653c4SRichard Henderson } 250235c653c4SRichard Henderson return ret; 250335c653c4SRichard Henderson } 250435c653c4SRichard Henderson 250535c653c4SRichard Henderson if (first < 8) { 250635c653c4SRichard Henderson a = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, 250735c653c4SRichard Henderson MMU_DATA_LOAD, l.memop, ra); 250835c653c4SRichard Henderson ret = do_ld16_beN(env, &l.page[1], a, l.mmu_idx, l.memop, ra); 250935c653c4SRichard Henderson } else { 251035c653c4SRichard Henderson ret = do_ld16_beN(env, &l.page[0], 0, l.mmu_idx, l.memop, ra); 251135c653c4SRichard Henderson b = int128_getlo(ret); 251235c653c4SRichard Henderson ret = int128_lshift(ret, l.page[1].size * 8); 251335c653c4SRichard Henderson a = int128_gethi(ret); 251435c653c4SRichard Henderson b = do_ld_beN(env, &l.page[1], b, l.mmu_idx, 251535c653c4SRichard Henderson MMU_DATA_LOAD, l.memop, ra); 251635c653c4SRichard Henderson ret = int128_make128(b, a); 251735c653c4SRichard Henderson } 251835c653c4SRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 251935c653c4SRichard Henderson ret = bswap128(ret); 252035c653c4SRichard Henderson } 252135c653c4SRichard Henderson return ret; 252235c653c4SRichard Henderson } 252335c653c4SRichard Henderson 252424e46e6cSRichard Henderson Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr, 252535c653c4SRichard Henderson uint32_t oi, uintptr_t retaddr) 252635c653c4SRichard Henderson { 252735c653c4SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); 252835c653c4SRichard Henderson return do_ld16_mmu(env, addr, oi, retaddr); 252935c653c4SRichard Henderson } 253035c653c4SRichard Henderson 2531e570597aSRichard Henderson Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi) 253235c653c4SRichard Henderson { 253335c653c4SRichard Henderson return helper_ld16_mmu(env, addr, oi, GETPC()); 253435c653c4SRichard Henderson } 253535c653c4SRichard Henderson 2536eed56642SAlex Bennée /* 2537d03f1408SRichard Henderson * Load helpers for cpu_ldst.h. 2538d03f1408SRichard Henderson */ 2539d03f1408SRichard Henderson 25408cfdacaaSRichard Henderson static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi) 2541d03f1408SRichard Henderson { 254237aff087SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); 2543d03f1408SRichard Henderson } 2544d03f1408SRichard Henderson 2545f83bcecbSRichard Henderson uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) 2546d03f1408SRichard Henderson { 25478cfdacaaSRichard Henderson uint8_t ret; 25488cfdacaaSRichard Henderson 25490cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB); 25508cfdacaaSRichard Henderson ret = do_ld1_mmu(env, addr, oi, ra, MMU_DATA_LOAD); 25518cfdacaaSRichard Henderson plugin_load_cb(env, addr, oi); 25528cfdacaaSRichard Henderson return ret; 2553d03f1408SRichard Henderson } 2554d03f1408SRichard Henderson 2555fbea7a40SRichard Henderson uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr, 2556f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2557d03f1408SRichard Henderson { 25588cfdacaaSRichard Henderson uint16_t ret; 25598cfdacaaSRichard Henderson 2560fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); 25618cfdacaaSRichard Henderson ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD); 25628cfdacaaSRichard Henderson plugin_load_cb(env, addr, oi); 25638cfdacaaSRichard Henderson return ret; 2564d03f1408SRichard Henderson } 2565d03f1408SRichard Henderson 2566fbea7a40SRichard Henderson uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr, 2567f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2568d03f1408SRichard Henderson { 25698cfdacaaSRichard Henderson uint32_t ret; 25708cfdacaaSRichard Henderson 2571fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); 25728cfdacaaSRichard Henderson ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD); 25738cfdacaaSRichard Henderson plugin_load_cb(env, addr, oi); 25748cfdacaaSRichard Henderson return ret; 2575d03f1408SRichard Henderson } 2576d03f1408SRichard Henderson 2577fbea7a40SRichard Henderson uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr, 2578f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t ra) 2579d03f1408SRichard Henderson { 25808cfdacaaSRichard Henderson uint64_t ret; 25818cfdacaaSRichard Henderson 2582fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); 25838cfdacaaSRichard Henderson ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD); 25848cfdacaaSRichard Henderson plugin_load_cb(env, addr, oi); 25858cfdacaaSRichard Henderson return ret; 2586d03f1408SRichard Henderson } 2587d03f1408SRichard Henderson 2588fbea7a40SRichard Henderson Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr, 2589cb48f365SRichard Henderson MemOpIdx oi, uintptr_t ra) 2590cb48f365SRichard Henderson { 259135c653c4SRichard Henderson Int128 ret; 2592cb48f365SRichard Henderson 2593fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); 259435c653c4SRichard Henderson ret = do_ld16_mmu(env, addr, oi, ra); 259535c653c4SRichard Henderson plugin_load_cb(env, addr, oi); 259635c653c4SRichard Henderson return ret; 2597cb48f365SRichard Henderson } 2598cb48f365SRichard Henderson 2599d03f1408SRichard Henderson /* 2600eed56642SAlex Bennée * Store Helpers 2601eed56642SAlex Bennée */ 2602eed56642SAlex Bennée 260359213461SRichard Henderson /** 260459213461SRichard Henderson * do_st_mmio_leN: 260559213461SRichard Henderson * @env: cpu context 260659213461SRichard Henderson * @p: translation parameters 260759213461SRichard Henderson * @val_le: data to store 260859213461SRichard Henderson * @mmu_idx: virtual address context 260959213461SRichard Henderson * @ra: return address into tcg generated code, or 0 261059213461SRichard Henderson * 261159213461SRichard Henderson * Store @p->size bytes at @p->addr, which is memory-mapped i/o. 261259213461SRichard Henderson * The bytes to store are extracted in little-endian order from @val_le; 261359213461SRichard Henderson * return the bytes of @val_le beyond @p->size that have not been stored. 261459213461SRichard Henderson */ 261559213461SRichard Henderson static uint64_t do_st_mmio_leN(CPUArchState *env, MMULookupPageData *p, 261659213461SRichard Henderson uint64_t val_le, int mmu_idx, uintptr_t ra) 26176b8b622eSRichard Henderson { 261859213461SRichard Henderson CPUTLBEntryFull *full = p->full; 261959213461SRichard Henderson target_ulong addr = p->addr; 262059213461SRichard Henderson int i, size = p->size; 26216b8b622eSRichard Henderson 262259213461SRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 262359213461SRichard Henderson for (i = 0; i < size; i++, val_le >>= 8) { 262459213461SRichard Henderson io_writex(env, full, mmu_idx, val_le, addr + i, ra, MO_UB); 262559213461SRichard Henderson } 262659213461SRichard Henderson return val_le; 262759213461SRichard Henderson } 262859213461SRichard Henderson 26296b8b622eSRichard Henderson /* 263059213461SRichard Henderson * Wrapper for the above. 26316b8b622eSRichard Henderson */ 263259213461SRichard Henderson static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p, 26335b36f268SRichard Henderson uint64_t val_le, int mmu_idx, 26345b36f268SRichard Henderson MemOp mop, uintptr_t ra) 263559213461SRichard Henderson { 26365b36f268SRichard Henderson MemOp atom; 26375b36f268SRichard Henderson unsigned tmp, half_size; 26385b36f268SRichard Henderson 263959213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 264059213461SRichard Henderson return do_st_mmio_leN(env, p, val_le, mmu_idx, ra); 264159213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 264259213461SRichard Henderson return val_le >> (p->size * 8); 26435b36f268SRichard Henderson } 26445b36f268SRichard Henderson 26455b36f268SRichard Henderson /* 26465b36f268SRichard Henderson * It is a given that we cross a page and therefore there is no atomicity 26475b36f268SRichard Henderson * for the store as a whole, but subobjects may need attention. 26485b36f268SRichard Henderson */ 26495b36f268SRichard Henderson atom = mop & MO_ATOM_MASK; 26505b36f268SRichard Henderson switch (atom) { 26515b36f268SRichard Henderson case MO_ATOM_SUBALIGN: 26525b36f268SRichard Henderson return store_parts_leN(p->haddr, p->size, val_le); 26535b36f268SRichard Henderson 26545b36f268SRichard Henderson case MO_ATOM_IFALIGN_PAIR: 26555b36f268SRichard Henderson case MO_ATOM_WITHIN16_PAIR: 26565b36f268SRichard Henderson tmp = mop & MO_SIZE; 26575b36f268SRichard Henderson tmp = tmp ? tmp - 1 : 0; 26585b36f268SRichard Henderson half_size = 1 << tmp; 26595b36f268SRichard Henderson if (atom == MO_ATOM_IFALIGN_PAIR 26605b36f268SRichard Henderson ? p->size == half_size 26615b36f268SRichard Henderson : p->size >= half_size) { 26625b36f268SRichard Henderson if (!HAVE_al8_fast && p->size <= 4) { 26635b36f268SRichard Henderson return store_whole_le4(p->haddr, p->size, val_le); 26645b36f268SRichard Henderson } else if (HAVE_al8) { 26655b36f268SRichard Henderson return store_whole_le8(p->haddr, p->size, val_le); 26666b8b622eSRichard Henderson } else { 26675b36f268SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), ra); 26685b36f268SRichard Henderson } 26695b36f268SRichard Henderson } 26705b36f268SRichard Henderson /* fall through */ 26715b36f268SRichard Henderson 26725b36f268SRichard Henderson case MO_ATOM_IFALIGN: 26735b36f268SRichard Henderson case MO_ATOM_WITHIN16: 26745b36f268SRichard Henderson case MO_ATOM_NONE: 26755b36f268SRichard Henderson return store_bytes_leN(p->haddr, p->size, val_le); 26765b36f268SRichard Henderson 26775b36f268SRichard Henderson default: 26785b36f268SRichard Henderson g_assert_not_reached(); 26796b8b622eSRichard Henderson } 26806b8b622eSRichard Henderson } 26816b8b622eSRichard Henderson 268235c653c4SRichard Henderson /* 268335c653c4SRichard Henderson * Wrapper for the above, for 8 < size < 16. 268435c653c4SRichard Henderson */ 268535c653c4SRichard Henderson static uint64_t do_st16_leN(CPUArchState *env, MMULookupPageData *p, 268635c653c4SRichard Henderson Int128 val_le, int mmu_idx, 268735c653c4SRichard Henderson MemOp mop, uintptr_t ra) 268835c653c4SRichard Henderson { 268935c653c4SRichard Henderson int size = p->size; 269035c653c4SRichard Henderson MemOp atom; 269135c653c4SRichard Henderson 269235c653c4SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 269335c653c4SRichard Henderson p->size = 8; 269435c653c4SRichard Henderson do_st_mmio_leN(env, p, int128_getlo(val_le), mmu_idx, ra); 269535c653c4SRichard Henderson p->size = size - 8; 269635c653c4SRichard Henderson p->addr += 8; 269735c653c4SRichard Henderson return do_st_mmio_leN(env, p, int128_gethi(val_le), mmu_idx, ra); 269835c653c4SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 269935c653c4SRichard Henderson return int128_gethi(val_le) >> ((size - 8) * 8); 270035c653c4SRichard Henderson } 270135c653c4SRichard Henderson 270235c653c4SRichard Henderson /* 270335c653c4SRichard Henderson * It is a given that we cross a page and therefore there is no atomicity 270435c653c4SRichard Henderson * for the store as a whole, but subobjects may need attention. 270535c653c4SRichard Henderson */ 270635c653c4SRichard Henderson atom = mop & MO_ATOM_MASK; 270735c653c4SRichard Henderson switch (atom) { 270835c653c4SRichard Henderson case MO_ATOM_SUBALIGN: 270935c653c4SRichard Henderson store_parts_leN(p->haddr, 8, int128_getlo(val_le)); 271035c653c4SRichard Henderson return store_parts_leN(p->haddr + 8, p->size - 8, 271135c653c4SRichard Henderson int128_gethi(val_le)); 271235c653c4SRichard Henderson 271335c653c4SRichard Henderson case MO_ATOM_WITHIN16_PAIR: 271435c653c4SRichard Henderson /* Since size > 8, this is the half that must be atomic. */ 271535c653c4SRichard Henderson if (!HAVE_al16) { 271635c653c4SRichard Henderson cpu_loop_exit_atomic(env_cpu(env), ra); 271735c653c4SRichard Henderson } 271835c653c4SRichard Henderson return store_whole_le16(p->haddr, p->size, val_le); 271935c653c4SRichard Henderson 272035c653c4SRichard Henderson case MO_ATOM_IFALIGN_PAIR: 272135c653c4SRichard Henderson /* 272235c653c4SRichard Henderson * Since size > 8, both halves are misaligned, 272335c653c4SRichard Henderson * and so neither is atomic. 272435c653c4SRichard Henderson */ 272535c653c4SRichard Henderson case MO_ATOM_IFALIGN: 272635c653c4SRichard Henderson case MO_ATOM_NONE: 272735c653c4SRichard Henderson stq_le_p(p->haddr, int128_getlo(val_le)); 272835c653c4SRichard Henderson return store_bytes_leN(p->haddr + 8, p->size - 8, 272935c653c4SRichard Henderson int128_gethi(val_le)); 273035c653c4SRichard Henderson 273135c653c4SRichard Henderson default: 273235c653c4SRichard Henderson g_assert_not_reached(); 273335c653c4SRichard Henderson } 273435c653c4SRichard Henderson } 273535c653c4SRichard Henderson 273659213461SRichard Henderson static void do_st_1(CPUArchState *env, MMULookupPageData *p, uint8_t val, 273759213461SRichard Henderson int mmu_idx, uintptr_t ra) 2738eed56642SAlex Bennée { 273959213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 274059213461SRichard Henderson io_writex(env, p->full, mmu_idx, val, p->addr, ra, MO_UB); 274159213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 274259213461SRichard Henderson /* nothing */ 27435b87b3e6SRichard Henderson } else { 274459213461SRichard Henderson *(uint8_t *)p->haddr = val; 27455b87b3e6SRichard Henderson } 2746eed56642SAlex Bennée } 2747eed56642SAlex Bennée 274859213461SRichard Henderson static void do_st_2(CPUArchState *env, MMULookupPageData *p, uint16_t val, 274959213461SRichard Henderson int mmu_idx, MemOp memop, uintptr_t ra) 2750eed56642SAlex Bennée { 275159213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 275259213461SRichard Henderson io_writex(env, p->full, mmu_idx, val, p->addr, ra, memop); 275359213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 275459213461SRichard Henderson /* nothing */ 275559213461SRichard Henderson } else { 275659213461SRichard Henderson /* Swap to host endian if necessary, then store. */ 275759213461SRichard Henderson if (memop & MO_BSWAP) { 275859213461SRichard Henderson val = bswap16(val); 275959213461SRichard Henderson } 27605b36f268SRichard Henderson store_atom_2(env, ra, p->haddr, memop, val); 276159213461SRichard Henderson } 276259213461SRichard Henderson } 276359213461SRichard Henderson 276459213461SRichard Henderson static void do_st_4(CPUArchState *env, MMULookupPageData *p, uint32_t val, 276559213461SRichard Henderson int mmu_idx, MemOp memop, uintptr_t ra) 276659213461SRichard Henderson { 276759213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 276859213461SRichard Henderson io_writex(env, p->full, mmu_idx, val, p->addr, ra, memop); 276959213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 277059213461SRichard Henderson /* nothing */ 277159213461SRichard Henderson } else { 277259213461SRichard Henderson /* Swap to host endian if necessary, then store. */ 277359213461SRichard Henderson if (memop & MO_BSWAP) { 277459213461SRichard Henderson val = bswap32(val); 277559213461SRichard Henderson } 27765b36f268SRichard Henderson store_atom_4(env, ra, p->haddr, memop, val); 277759213461SRichard Henderson } 277859213461SRichard Henderson } 277959213461SRichard Henderson 278059213461SRichard Henderson static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val, 278159213461SRichard Henderson int mmu_idx, MemOp memop, uintptr_t ra) 278259213461SRichard Henderson { 278359213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 278459213461SRichard Henderson io_writex(env, p->full, mmu_idx, val, p->addr, ra, memop); 278559213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 278659213461SRichard Henderson /* nothing */ 278759213461SRichard Henderson } else { 278859213461SRichard Henderson /* Swap to host endian if necessary, then store. */ 278959213461SRichard Henderson if (memop & MO_BSWAP) { 279059213461SRichard Henderson val = bswap64(val); 279159213461SRichard Henderson } 27925b36f268SRichard Henderson store_atom_8(env, ra, p->haddr, memop, val); 279359213461SRichard Henderson } 2794eed56642SAlex Bennée } 2795eed56642SAlex Bennée 279624e46e6cSRichard Henderson void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val, 279759213461SRichard Henderson MemOpIdx oi, uintptr_t ra) 2798f83bcecbSRichard Henderson { 279959213461SRichard Henderson MMULookupLocals l; 280059213461SRichard Henderson bool crosspage; 280159213461SRichard Henderson 28020cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8); 280359213461SRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); 280459213461SRichard Henderson tcg_debug_assert(!crosspage); 280559213461SRichard Henderson 280659213461SRichard Henderson do_st_1(env, &l.page[0], val, l.mmu_idx, ra); 2807f83bcecbSRichard Henderson } 2808f83bcecbSRichard Henderson 280959213461SRichard Henderson static void do_st2_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 281059213461SRichard Henderson MemOpIdx oi, uintptr_t ra) 2811f83bcecbSRichard Henderson { 281259213461SRichard Henderson MMULookupLocals l; 281359213461SRichard Henderson bool crosspage; 281459213461SRichard Henderson uint8_t a, b; 281559213461SRichard Henderson 281659213461SRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); 281759213461SRichard Henderson if (likely(!crosspage)) { 281859213461SRichard Henderson do_st_2(env, &l.page[0], val, l.mmu_idx, l.memop, ra); 281959213461SRichard Henderson return; 282059213461SRichard Henderson } 282159213461SRichard Henderson 282259213461SRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 282359213461SRichard Henderson a = val, b = val >> 8; 282459213461SRichard Henderson } else { 282559213461SRichard Henderson b = val, a = val >> 8; 282659213461SRichard Henderson } 282759213461SRichard Henderson do_st_1(env, &l.page[0], a, l.mmu_idx, ra); 282859213461SRichard Henderson do_st_1(env, &l.page[1], b, l.mmu_idx, ra); 2829f83bcecbSRichard Henderson } 2830f83bcecbSRichard Henderson 283124e46e6cSRichard Henderson void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val, 28329002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2833eed56642SAlex Bennée { 28340cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); 283559213461SRichard Henderson do_st2_mmu(env, addr, val, oi, retaddr); 2836f83bcecbSRichard Henderson } 2837f83bcecbSRichard Henderson 283859213461SRichard Henderson static void do_st4_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 283959213461SRichard Henderson MemOpIdx oi, uintptr_t ra) 2840f83bcecbSRichard Henderson { 284159213461SRichard Henderson MMULookupLocals l; 284259213461SRichard Henderson bool crosspage; 284359213461SRichard Henderson 284459213461SRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); 284559213461SRichard Henderson if (likely(!crosspage)) { 284659213461SRichard Henderson do_st_4(env, &l.page[0], val, l.mmu_idx, l.memop, ra); 284759213461SRichard Henderson return; 284859213461SRichard Henderson } 284959213461SRichard Henderson 285059213461SRichard Henderson /* Swap to little endian for simplicity, then store by bytes. */ 285159213461SRichard Henderson if ((l.memop & MO_BSWAP) != MO_LE) { 285259213461SRichard Henderson val = bswap32(val); 285359213461SRichard Henderson } 28545b36f268SRichard Henderson val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra); 28555b36f268SRichard Henderson (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra); 2856eed56642SAlex Bennée } 2857eed56642SAlex Bennée 285824e46e6cSRichard Henderson void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val, 28599002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2860eed56642SAlex Bennée { 28610cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); 286259213461SRichard Henderson do_st4_mmu(env, addr, val, oi, retaddr); 286359213461SRichard Henderson } 286459213461SRichard Henderson 286559213461SRichard Henderson static void do_st8_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 286659213461SRichard Henderson MemOpIdx oi, uintptr_t ra) 286759213461SRichard Henderson { 286859213461SRichard Henderson MMULookupLocals l; 286959213461SRichard Henderson bool crosspage; 287059213461SRichard Henderson 287159213461SRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); 287259213461SRichard Henderson if (likely(!crosspage)) { 287359213461SRichard Henderson do_st_8(env, &l.page[0], val, l.mmu_idx, l.memop, ra); 287459213461SRichard Henderson return; 287559213461SRichard Henderson } 287659213461SRichard Henderson 287759213461SRichard Henderson /* Swap to little endian for simplicity, then store by bytes. */ 287859213461SRichard Henderson if ((l.memop & MO_BSWAP) != MO_LE) { 287959213461SRichard Henderson val = bswap64(val); 288059213461SRichard Henderson } 28815b36f268SRichard Henderson val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra); 28825b36f268SRichard Henderson (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra); 2883eed56642SAlex Bennée } 2884eed56642SAlex Bennée 288524e46e6cSRichard Henderson void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val, 28869002ffcbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2887eed56642SAlex Bennée { 28880cadc1edSRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); 288959213461SRichard Henderson do_st8_mmu(env, addr, val, oi, retaddr); 2890eed56642SAlex Bennée } 2891d9bb58e5SYang Zhong 289235c653c4SRichard Henderson static void do_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val, 289335c653c4SRichard Henderson MemOpIdx oi, uintptr_t ra) 289435c653c4SRichard Henderson { 289535c653c4SRichard Henderson MMULookupLocals l; 289635c653c4SRichard Henderson bool crosspage; 289735c653c4SRichard Henderson uint64_t a, b; 289835c653c4SRichard Henderson int first; 289935c653c4SRichard Henderson 290035c653c4SRichard Henderson crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l); 290135c653c4SRichard Henderson if (likely(!crosspage)) { 290235c653c4SRichard Henderson /* Swap to host endian if necessary, then store. */ 290335c653c4SRichard Henderson if (l.memop & MO_BSWAP) { 290435c653c4SRichard Henderson val = bswap128(val); 290535c653c4SRichard Henderson } 290635c653c4SRichard Henderson if (unlikely(l.page[0].flags & TLB_MMIO)) { 290735c653c4SRichard Henderson QEMU_IOTHREAD_LOCK_GUARD(); 290835c653c4SRichard Henderson if (HOST_BIG_ENDIAN) { 290935c653c4SRichard Henderson b = int128_getlo(val), a = int128_gethi(val); 291035c653c4SRichard Henderson } else { 291135c653c4SRichard Henderson a = int128_getlo(val), b = int128_gethi(val); 291235c653c4SRichard Henderson } 291335c653c4SRichard Henderson io_writex(env, l.page[0].full, l.mmu_idx, a, addr, ra, MO_64); 291435c653c4SRichard Henderson io_writex(env, l.page[0].full, l.mmu_idx, b, addr + 8, ra, MO_64); 291535c653c4SRichard Henderson } else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) { 291635c653c4SRichard Henderson /* nothing */ 291735c653c4SRichard Henderson } else { 291835c653c4SRichard Henderson store_atom_16(env, ra, l.page[0].haddr, l.memop, val); 291935c653c4SRichard Henderson } 292035c653c4SRichard Henderson return; 292135c653c4SRichard Henderson } 292235c653c4SRichard Henderson 292335c653c4SRichard Henderson first = l.page[0].size; 292435c653c4SRichard Henderson if (first == 8) { 292535c653c4SRichard Henderson MemOp mop8 = (l.memop & ~(MO_SIZE | MO_BSWAP)) | MO_64; 292635c653c4SRichard Henderson 292735c653c4SRichard Henderson if (l.memop & MO_BSWAP) { 292835c653c4SRichard Henderson val = bswap128(val); 292935c653c4SRichard Henderson } 293035c653c4SRichard Henderson if (HOST_BIG_ENDIAN) { 293135c653c4SRichard Henderson b = int128_getlo(val), a = int128_gethi(val); 293235c653c4SRichard Henderson } else { 293335c653c4SRichard Henderson a = int128_getlo(val), b = int128_gethi(val); 293435c653c4SRichard Henderson } 293535c653c4SRichard Henderson do_st_8(env, &l.page[0], a, l.mmu_idx, mop8, ra); 293635c653c4SRichard Henderson do_st_8(env, &l.page[1], b, l.mmu_idx, mop8, ra); 293735c653c4SRichard Henderson return; 293835c653c4SRichard Henderson } 293935c653c4SRichard Henderson 294035c653c4SRichard Henderson if ((l.memop & MO_BSWAP) != MO_LE) { 294135c653c4SRichard Henderson val = bswap128(val); 294235c653c4SRichard Henderson } 294335c653c4SRichard Henderson if (first < 8) { 294435c653c4SRichard Henderson do_st_leN(env, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra); 294535c653c4SRichard Henderson val = int128_urshift(val, first * 8); 294635c653c4SRichard Henderson do_st16_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra); 294735c653c4SRichard Henderson } else { 294835c653c4SRichard Henderson b = do_st16_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra); 294935c653c4SRichard Henderson do_st_leN(env, &l.page[1], b, l.mmu_idx, l.memop, ra); 295035c653c4SRichard Henderson } 295135c653c4SRichard Henderson } 295235c653c4SRichard Henderson 295324e46e6cSRichard Henderson void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val, 295435c653c4SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 295535c653c4SRichard Henderson { 295635c653c4SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); 295735c653c4SRichard Henderson do_st16_mmu(env, addr, val, oi, retaddr); 295835c653c4SRichard Henderson } 295935c653c4SRichard Henderson 2960e570597aSRichard Henderson void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi) 296135c653c4SRichard Henderson { 296235c653c4SRichard Henderson helper_st16_mmu(env, addr, val, oi, GETPC()); 296335c653c4SRichard Henderson } 296435c653c4SRichard Henderson 2965d03f1408SRichard Henderson /* 2966d03f1408SRichard Henderson * Store Helpers for cpu_ldst.h 2967d03f1408SRichard Henderson */ 2968d03f1408SRichard Henderson 296959213461SRichard Henderson static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi) 2970d03f1408SRichard Henderson { 297137aff087SRichard Henderson qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); 2972d03f1408SRichard Henderson } 2973d03f1408SRichard Henderson 2974f83bcecbSRichard Henderson void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, 2975f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2976d03f1408SRichard Henderson { 29770cadc1edSRichard Henderson helper_stb_mmu(env, addr, val, oi, retaddr); 297859213461SRichard Henderson plugin_store_cb(env, addr, oi); 2979d03f1408SRichard Henderson } 2980d03f1408SRichard Henderson 2981fbea7a40SRichard Henderson void cpu_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, 2982f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2983d03f1408SRichard Henderson { 2984fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16); 29850cadc1edSRichard Henderson do_st2_mmu(env, addr, val, oi, retaddr); 298659213461SRichard Henderson plugin_store_cb(env, addr, oi); 2987d03f1408SRichard Henderson } 2988d03f1408SRichard Henderson 2989fbea7a40SRichard Henderson void cpu_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, 2990f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2991d03f1408SRichard Henderson { 2992fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32); 29930cadc1edSRichard Henderson do_st4_mmu(env, addr, val, oi, retaddr); 299459213461SRichard Henderson plugin_store_cb(env, addr, oi); 2995d03f1408SRichard Henderson } 2996d03f1408SRichard Henderson 2997fbea7a40SRichard Henderson void cpu_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, 2998f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 2999d03f1408SRichard Henderson { 3000fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64); 30010cadc1edSRichard Henderson do_st8_mmu(env, addr, val, oi, retaddr); 300259213461SRichard Henderson plugin_store_cb(env, addr, oi); 3003b9e60257SRichard Henderson } 3004b9e60257SRichard Henderson 3005fbea7a40SRichard Henderson void cpu_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val, 3006f83bcecbSRichard Henderson MemOpIdx oi, uintptr_t retaddr) 3007b9e60257SRichard Henderson { 3008fbea7a40SRichard Henderson tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128); 300935c653c4SRichard Henderson do_st16_mmu(env, addr, val, oi, retaddr); 301035c653c4SRichard Henderson plugin_store_cb(env, addr, oi); 3011cb48f365SRichard Henderson } 3012cb48f365SRichard Henderson 3013f83bcecbSRichard Henderson #include "ldst_common.c.inc" 3014cfe04a4bSRichard Henderson 3015be9568b4SRichard Henderson /* 3016be9568b4SRichard Henderson * First set of functions passes in OI and RETADDR. 3017be9568b4SRichard Henderson * This makes them callable from other helpers. 3018be9568b4SRichard Henderson */ 3019d9bb58e5SYang Zhong 3020d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \ 3021be9568b4SRichard Henderson glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) 3022a754f7f3SRichard Henderson 3023707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP 3024d9bb58e5SYang Zhong 3025139c1837SPaolo Bonzini #include "atomic_common.c.inc" 3026d9bb58e5SYang Zhong 3027d9bb58e5SYang Zhong #define DATA_SIZE 1 3028d9bb58e5SYang Zhong #include "atomic_template.h" 3029d9bb58e5SYang Zhong 3030d9bb58e5SYang Zhong #define DATA_SIZE 2 3031d9bb58e5SYang Zhong #include "atomic_template.h" 3032d9bb58e5SYang Zhong 3033d9bb58e5SYang Zhong #define DATA_SIZE 4 3034d9bb58e5SYang Zhong #include "atomic_template.h" 3035d9bb58e5SYang Zhong 3036d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 3037d9bb58e5SYang Zhong #define DATA_SIZE 8 3038d9bb58e5SYang Zhong #include "atomic_template.h" 3039d9bb58e5SYang Zhong #endif 3040d9bb58e5SYang Zhong 3041*4deb39ebSRichard Henderson #if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128) 3042d9bb58e5SYang Zhong #define DATA_SIZE 16 3043d9bb58e5SYang Zhong #include "atomic_template.h" 3044d9bb58e5SYang Zhong #endif 3045d9bb58e5SYang Zhong 3046d9bb58e5SYang Zhong /* Code access functions. */ 3047d9bb58e5SYang Zhong 3048fc4120a3SRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) 3049eed56642SAlex Bennée { 30509002ffcbSRichard Henderson MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); 30518cfdacaaSRichard Henderson return do_ld1_mmu(env, addr, oi, 0, MMU_INST_FETCH); 30524cef72d0SAlex Bennée } 30534cef72d0SAlex Bennée 3054fc4120a3SRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) 30552dd92606SRichard Henderson { 30569002ffcbSRichard Henderson MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); 30578cfdacaaSRichard Henderson return do_ld2_mmu(env, addr, oi, 0, MMU_INST_FETCH); 30582dd92606SRichard Henderson } 30592dd92606SRichard Henderson 3060fc4120a3SRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) 30614cef72d0SAlex Bennée { 30629002ffcbSRichard Henderson MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); 30638cfdacaaSRichard Henderson return do_ld4_mmu(env, addr, oi, 0, MMU_INST_FETCH); 3064eed56642SAlex Bennée } 3065d9bb58e5SYang Zhong 3066fc4120a3SRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) 3067eed56642SAlex Bennée { 3068fc313c64SFrédéric Pétrot MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true)); 30698cfdacaaSRichard Henderson return do_ld8_mmu(env, addr, oi, 0, MMU_INST_FETCH); 3070eed56642SAlex Bennée } 307128990626SRichard Henderson 307228990626SRichard Henderson uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr, 307328990626SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 307428990626SRichard Henderson { 30758cfdacaaSRichard Henderson return do_ld1_mmu(env, addr, oi, retaddr, MMU_INST_FETCH); 307628990626SRichard Henderson } 307728990626SRichard Henderson 307828990626SRichard Henderson uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr, 307928990626SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 308028990626SRichard Henderson { 30818cfdacaaSRichard Henderson return do_ld2_mmu(env, addr, oi, retaddr, MMU_INST_FETCH); 308228990626SRichard Henderson } 308328990626SRichard Henderson 308428990626SRichard Henderson uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr, 308528990626SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 308628990626SRichard Henderson { 30878cfdacaaSRichard Henderson return do_ld4_mmu(env, addr, oi, retaddr, MMU_INST_FETCH); 308828990626SRichard Henderson } 308928990626SRichard Henderson 309028990626SRichard Henderson uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, 309128990626SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 309228990626SRichard Henderson { 30938cfdacaaSRichard Henderson return do_ld8_mmu(env, addr, oi, retaddr, MMU_INST_FETCH); 309428990626SRichard Henderson } 3095