1d9bb58e5SYang Zhong /* 2d9bb58e5SYang Zhong * Common CPU TLB handling 3d9bb58e5SYang Zhong * 4d9bb58e5SYang Zhong * Copyright (c) 2003 Fabrice Bellard 5d9bb58e5SYang Zhong * 6d9bb58e5SYang Zhong * This library is free software; you can redistribute it and/or 7d9bb58e5SYang Zhong * modify it under the terms of the GNU Lesser General Public 8d9bb58e5SYang Zhong * License as published by the Free Software Foundation; either 9fb0343d5SThomas Huth * version 2.1 of the License, or (at your option) any later version. 10d9bb58e5SYang Zhong * 11d9bb58e5SYang Zhong * This library is distributed in the hope that it will be useful, 12d9bb58e5SYang Zhong * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d9bb58e5SYang Zhong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14d9bb58e5SYang Zhong * Lesser General Public License for more details. 15d9bb58e5SYang Zhong * 16d9bb58e5SYang Zhong * You should have received a copy of the GNU Lesser General Public 17d9bb58e5SYang Zhong * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18d9bb58e5SYang Zhong */ 19d9bb58e5SYang Zhong 20d9bb58e5SYang Zhong #include "qemu/osdep.h" 21d9bb58e5SYang Zhong #include "qemu/main-loop.h" 2278271684SClaudio Fontana #include "hw/core/tcg-cpu-ops.h" 23d9bb58e5SYang Zhong #include "exec/exec-all.h" 24d9bb58e5SYang Zhong #include "exec/memory.h" 25d9bb58e5SYang Zhong #include "exec/cpu_ldst.h" 26d9bb58e5SYang Zhong #include "exec/cputlb.h" 27f4f826c0SPhilippe Mathieu-Daudé #include "exec/tb-flush.h" 28d9bb58e5SYang Zhong #include "exec/memory-internal.h" 29d9bb58e5SYang Zhong #include "exec/ram_addr.h" 30d9bb58e5SYang Zhong #include "tcg/tcg.h" 31d9bb58e5SYang Zhong #include "qemu/error-report.h" 32d9bb58e5SYang Zhong #include "exec/log.h" 33c213ee2dSRichard Henderson #include "exec/helper-proto-common.h" 34d9bb58e5SYang Zhong #include "qemu/atomic.h" 35e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 363b9bd3f4SPaolo Bonzini #include "exec/translate-all.h" 3751807763SPhilippe Mathieu-Daudé #include "trace.h" 38e5ceadffSPhilippe Mathieu-Daudé #include "tb-hash.h" 3943e7a2d3SPhilippe Mathieu-Daudé #include "internal-common.h" 404c268d6dSPhilippe Mathieu-Daudé #include "internal-target.h" 41235537faSAlex Bennée #ifdef CONFIG_PLUGIN 42235537faSAlex Bennée #include "qemu/plugin-memory.h" 43235537faSAlex Bennée #endif 44d2ba8026SRichard Henderson #include "tcg/tcg-ldst.h" 4570f168f8SRichard Henderson #include "tcg/oversized-guest.h" 46d9bb58e5SYang Zhong 47d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 48d9bb58e5SYang Zhong /* #define DEBUG_TLB */ 49d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */ 50d9bb58e5SYang Zhong 51d9bb58e5SYang Zhong #ifdef DEBUG_TLB 52d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1 53d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG 54d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 1 55d9bb58e5SYang Zhong # else 56d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 57d9bb58e5SYang Zhong # endif 58d9bb58e5SYang Zhong #else 59d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0 60d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 61d9bb58e5SYang Zhong #endif 62d9bb58e5SYang Zhong 63d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \ 64d9bb58e5SYang Zhong if (DEBUG_TLB_LOG_GATE) { \ 65d9bb58e5SYang Zhong qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 66d9bb58e5SYang Zhong ## __VA_ARGS__); \ 67d9bb58e5SYang Zhong } else if (DEBUG_TLB_GATE) { \ 68d9bb58e5SYang Zhong fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 69d9bb58e5SYang Zhong } \ 70d9bb58e5SYang Zhong } while (0) 71d9bb58e5SYang Zhong 72ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do { \ 73d9bb58e5SYang Zhong if (DEBUG_TLB_GATE) { \ 74ea9025cbSEmilio G. Cota g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ 75d9bb58e5SYang Zhong } \ 76d9bb58e5SYang Zhong } while (0) 77d9bb58e5SYang Zhong 78d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a 79e79f8142SAnton Johansson * vaddr even on 32 bit builds 80e79f8142SAnton Johansson */ 81e79f8142SAnton Johansson QEMU_BUILD_BUG_ON(sizeof(vaddr) > sizeof(run_on_cpu_data)); 82d9bb58e5SYang Zhong 83d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 84d9bb58e5SYang Zhong */ 85d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 86d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 87d9bb58e5SYang Zhong 88722a1c1eSRichard Henderson static inline size_t tlb_n_entries(CPUTLBDescFast *fast) 897a1efe1bSRichard Henderson { 90722a1c1eSRichard Henderson return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; 917a1efe1bSRichard Henderson } 927a1efe1bSRichard Henderson 93722a1c1eSRichard Henderson static inline size_t sizeof_tlb(CPUTLBDescFast *fast) 9486e1eff8SEmilio G. Cota { 95722a1c1eSRichard Henderson return fast->mask + (1 << CPU_TLB_ENTRY_BITS); 9686e1eff8SEmilio G. Cota } 9786e1eff8SEmilio G. Cota 9879e42085SRichard Henderson static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, 9986e1eff8SEmilio G. Cota size_t max_entries) 10086e1eff8SEmilio G. Cota { 10179e42085SRichard Henderson desc->window_begin_ns = ns; 10279e42085SRichard Henderson desc->window_max_entries = max_entries; 10386e1eff8SEmilio G. Cota } 10486e1eff8SEmilio G. Cota 10506f3831cSAnton Johansson static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr) 1060f4abea8SRichard Henderson { 107a976a99aSRichard Henderson CPUJumpCache *jc = cpu->tb_jmp_cache; 10899ab4d50SEric Auger int i, i0; 1090f4abea8SRichard Henderson 11099ab4d50SEric Auger if (unlikely(!jc)) { 11199ab4d50SEric Auger return; 11299ab4d50SEric Auger } 11399ab4d50SEric Auger 11499ab4d50SEric Auger i0 = tb_jmp_cache_hash_page(page_addr); 1150f4abea8SRichard Henderson for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { 116a976a99aSRichard Henderson qatomic_set(&jc->array[i0 + i].tb, NULL); 1170f4abea8SRichard Henderson } 1180f4abea8SRichard Henderson } 1190f4abea8SRichard Henderson 12086e1eff8SEmilio G. Cota /** 12186e1eff8SEmilio G. Cota * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary 12271ccd47bSRichard Henderson * @desc: The CPUTLBDesc portion of the TLB 12371ccd47bSRichard Henderson * @fast: The CPUTLBDescFast portion of the same TLB 12486e1eff8SEmilio G. Cota * 12586e1eff8SEmilio G. Cota * Called with tlb_lock_held. 12686e1eff8SEmilio G. Cota * 12786e1eff8SEmilio G. Cota * We have two main constraints when resizing a TLB: (1) we only resize it 12886e1eff8SEmilio G. Cota * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing 12986e1eff8SEmilio G. Cota * the array or unnecessarily flushing it), which means we do not control how 13086e1eff8SEmilio G. Cota * frequently the resizing can occur; (2) we don't have access to the guest's 13186e1eff8SEmilio G. Cota * future scheduling decisions, and therefore have to decide the magnitude of 13286e1eff8SEmilio G. Cota * the resize based on past observations. 13386e1eff8SEmilio G. Cota * 13486e1eff8SEmilio G. Cota * In general, a memory-hungry process can benefit greatly from an appropriately 13586e1eff8SEmilio G. Cota * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that 13686e1eff8SEmilio G. Cota * we just have to make the TLB as large as possible; while an oversized TLB 13786e1eff8SEmilio G. Cota * results in minimal TLB miss rates, it also takes longer to be flushed 13886e1eff8SEmilio G. Cota * (flushes can be _very_ frequent), and the reduced locality can also hurt 13986e1eff8SEmilio G. Cota * performance. 14086e1eff8SEmilio G. Cota * 14186e1eff8SEmilio G. Cota * To achieve near-optimal performance for all kinds of workloads, we: 14286e1eff8SEmilio G. Cota * 14386e1eff8SEmilio G. Cota * 1. Aggressively increase the size of the TLB when the use rate of the 14486e1eff8SEmilio G. Cota * TLB being flushed is high, since it is likely that in the near future this 14586e1eff8SEmilio G. Cota * memory-hungry process will execute again, and its memory hungriness will 14686e1eff8SEmilio G. Cota * probably be similar. 14786e1eff8SEmilio G. Cota * 14886e1eff8SEmilio G. Cota * 2. Slowly reduce the size of the TLB as the use rate declines over a 14986e1eff8SEmilio G. Cota * reasonably large time window. The rationale is that if in such a time window 15086e1eff8SEmilio G. Cota * we have not observed a high TLB use rate, it is likely that we won't observe 15186e1eff8SEmilio G. Cota * it in the near future. In that case, once a time window expires we downsize 15286e1eff8SEmilio G. Cota * the TLB to match the maximum use rate observed in the window. 15386e1eff8SEmilio G. Cota * 15486e1eff8SEmilio G. Cota * 3. Try to keep the maximum use rate in a time window in the 30-70% range, 15586e1eff8SEmilio G. Cota * since in that range performance is likely near-optimal. Recall that the TLB 15686e1eff8SEmilio G. Cota * is direct mapped, so we want the use rate to be low (or at least not too 15786e1eff8SEmilio G. Cota * high), since otherwise we are likely to have a significant amount of 15886e1eff8SEmilio G. Cota * conflict misses. 15986e1eff8SEmilio G. Cota */ 1603c3959f2SRichard Henderson static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, 1613c3959f2SRichard Henderson int64_t now) 16286e1eff8SEmilio G. Cota { 16371ccd47bSRichard Henderson size_t old_size = tlb_n_entries(fast); 16486e1eff8SEmilio G. Cota size_t rate; 16586e1eff8SEmilio G. Cota size_t new_size = old_size; 16686e1eff8SEmilio G. Cota int64_t window_len_ms = 100; 16786e1eff8SEmilio G. Cota int64_t window_len_ns = window_len_ms * 1000 * 1000; 16879e42085SRichard Henderson bool window_expired = now > desc->window_begin_ns + window_len_ns; 16986e1eff8SEmilio G. Cota 17079e42085SRichard Henderson if (desc->n_used_entries > desc->window_max_entries) { 17179e42085SRichard Henderson desc->window_max_entries = desc->n_used_entries; 17286e1eff8SEmilio G. Cota } 17379e42085SRichard Henderson rate = desc->window_max_entries * 100 / old_size; 17486e1eff8SEmilio G. Cota 17586e1eff8SEmilio G. Cota if (rate > 70) { 17686e1eff8SEmilio G. Cota new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); 17786e1eff8SEmilio G. Cota } else if (rate < 30 && window_expired) { 17879e42085SRichard Henderson size_t ceil = pow2ceil(desc->window_max_entries); 17979e42085SRichard Henderson size_t expected_rate = desc->window_max_entries * 100 / ceil; 18086e1eff8SEmilio G. Cota 18186e1eff8SEmilio G. Cota /* 18286e1eff8SEmilio G. Cota * Avoid undersizing when the max number of entries seen is just below 18386e1eff8SEmilio G. Cota * a pow2. For instance, if max_entries == 1025, the expected use rate 18486e1eff8SEmilio G. Cota * would be 1025/2048==50%. However, if max_entries == 1023, we'd get 18586e1eff8SEmilio G. Cota * 1023/1024==99.9% use rate, so we'd likely end up doubling the size 18686e1eff8SEmilio G. Cota * later. Thus, make sure that the expected use rate remains below 70%. 18786e1eff8SEmilio G. Cota * (and since we double the size, that means the lowest rate we'd 18886e1eff8SEmilio G. Cota * expect to get is 35%, which is still in the 30-70% range where 18986e1eff8SEmilio G. Cota * we consider that the size is appropriate.) 19086e1eff8SEmilio G. Cota */ 19186e1eff8SEmilio G. Cota if (expected_rate > 70) { 19286e1eff8SEmilio G. Cota ceil *= 2; 19386e1eff8SEmilio G. Cota } 19486e1eff8SEmilio G. Cota new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); 19586e1eff8SEmilio G. Cota } 19686e1eff8SEmilio G. Cota 19786e1eff8SEmilio G. Cota if (new_size == old_size) { 19886e1eff8SEmilio G. Cota if (window_expired) { 19979e42085SRichard Henderson tlb_window_reset(desc, now, desc->n_used_entries); 20086e1eff8SEmilio G. Cota } 20186e1eff8SEmilio G. Cota return; 20286e1eff8SEmilio G. Cota } 20386e1eff8SEmilio G. Cota 20471ccd47bSRichard Henderson g_free(fast->table); 20525d3ec58SRichard Henderson g_free(desc->fulltlb); 20686e1eff8SEmilio G. Cota 20779e42085SRichard Henderson tlb_window_reset(desc, now, 0); 20886e1eff8SEmilio G. Cota /* desc->n_used_entries is cleared by the caller */ 20971ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 21071ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 21125d3ec58SRichard Henderson desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); 21271ccd47bSRichard Henderson 21386e1eff8SEmilio G. Cota /* 21486e1eff8SEmilio G. Cota * If the allocations fail, try smaller sizes. We just freed some 21586e1eff8SEmilio G. Cota * memory, so going back to half of new_size has a good chance of working. 21686e1eff8SEmilio G. Cota * Increased memory pressure elsewhere in the system might cause the 21786e1eff8SEmilio G. Cota * allocations to fail though, so we progressively reduce the allocation 21886e1eff8SEmilio G. Cota * size, aborting if we cannot even allocate the smallest TLB we support. 21986e1eff8SEmilio G. Cota */ 22025d3ec58SRichard Henderson while (fast->table == NULL || desc->fulltlb == NULL) { 22186e1eff8SEmilio G. Cota if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { 22286e1eff8SEmilio G. Cota error_report("%s: %s", __func__, strerror(errno)); 22386e1eff8SEmilio G. Cota abort(); 22486e1eff8SEmilio G. Cota } 22586e1eff8SEmilio G. Cota new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); 22671ccd47bSRichard Henderson fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 22786e1eff8SEmilio G. Cota 22871ccd47bSRichard Henderson g_free(fast->table); 22925d3ec58SRichard Henderson g_free(desc->fulltlb); 23071ccd47bSRichard Henderson fast->table = g_try_new(CPUTLBEntry, new_size); 23125d3ec58SRichard Henderson desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size); 23286e1eff8SEmilio G. Cota } 23386e1eff8SEmilio G. Cota } 23486e1eff8SEmilio G. Cota 235bbf021b0SRichard Henderson static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) 23686e1eff8SEmilio G. Cota { 2375c948e31SRichard Henderson desc->n_used_entries = 0; 2385c948e31SRichard Henderson desc->large_page_addr = -1; 2395c948e31SRichard Henderson desc->large_page_mask = -1; 2405c948e31SRichard Henderson desc->vindex = 0; 2415c948e31SRichard Henderson memset(fast->table, -1, sizeof_tlb(fast)); 2425c948e31SRichard Henderson memset(desc->vtable, -1, sizeof(desc->vtable)); 24386e1eff8SEmilio G. Cota } 24486e1eff8SEmilio G. Cota 24510b32e2cSAnton Johansson static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx, 2463c3959f2SRichard Henderson int64_t now) 247bbf021b0SRichard Henderson { 24810b32e2cSAnton Johansson CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx]; 24910b32e2cSAnton Johansson CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx]; 250bbf021b0SRichard Henderson 2513c3959f2SRichard Henderson tlb_mmu_resize_locked(desc, fast, now); 252bbf021b0SRichard Henderson tlb_mmu_flush_locked(desc, fast); 253bbf021b0SRichard Henderson } 254bbf021b0SRichard Henderson 25556e89f76SRichard Henderson static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) 25656e89f76SRichard Henderson { 25756e89f76SRichard Henderson size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; 25856e89f76SRichard Henderson 25956e89f76SRichard Henderson tlb_window_reset(desc, now, 0); 26056e89f76SRichard Henderson desc->n_used_entries = 0; 26156e89f76SRichard Henderson fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; 26256e89f76SRichard Henderson fast->table = g_new(CPUTLBEntry, n_entries); 26325d3ec58SRichard Henderson desc->fulltlb = g_new(CPUTLBEntryFull, n_entries); 2643c16304aSRichard Henderson tlb_mmu_flush_locked(desc, fast); 26556e89f76SRichard Henderson } 26656e89f76SRichard Henderson 26710b32e2cSAnton Johansson static inline void tlb_n_used_entries_inc(CPUState *cpu, uintptr_t mmu_idx) 26886e1eff8SEmilio G. Cota { 26910b32e2cSAnton Johansson cpu->neg.tlb.d[mmu_idx].n_used_entries++; 27086e1eff8SEmilio G. Cota } 27186e1eff8SEmilio G. Cota 27210b32e2cSAnton Johansson static inline void tlb_n_used_entries_dec(CPUState *cpu, uintptr_t mmu_idx) 27386e1eff8SEmilio G. Cota { 27410b32e2cSAnton Johansson cpu->neg.tlb.d[mmu_idx].n_used_entries--; 27586e1eff8SEmilio G. Cota } 27686e1eff8SEmilio G. Cota 2775005e253SEmilio G. Cota void tlb_init(CPUState *cpu) 2785005e253SEmilio G. Cota { 27956e89f76SRichard Henderson int64_t now = get_clock_realtime(); 28056e89f76SRichard Henderson int i; 28171aec354SEmilio G. Cota 28210b32e2cSAnton Johansson qemu_spin_init(&cpu->neg.tlb.c.lock); 2833d1523ceSRichard Henderson 2843c16304aSRichard Henderson /* All tlbs are initialized flushed. */ 28510b32e2cSAnton Johansson cpu->neg.tlb.c.dirty = 0; 28686e1eff8SEmilio G. Cota 28756e89f76SRichard Henderson for (i = 0; i < NB_MMU_MODES; i++) { 28810b32e2cSAnton Johansson tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now); 28956e89f76SRichard Henderson } 2905005e253SEmilio G. Cota } 2915005e253SEmilio G. Cota 292816d9be5SEmilio G. Cota void tlb_destroy(CPUState *cpu) 293816d9be5SEmilio G. Cota { 294816d9be5SEmilio G. Cota int i; 295816d9be5SEmilio G. Cota 29610b32e2cSAnton Johansson qemu_spin_destroy(&cpu->neg.tlb.c.lock); 297816d9be5SEmilio G. Cota for (i = 0; i < NB_MMU_MODES; i++) { 29810b32e2cSAnton Johansson CPUTLBDesc *desc = &cpu->neg.tlb.d[i]; 29910b32e2cSAnton Johansson CPUTLBDescFast *fast = &cpu->neg.tlb.f[i]; 300816d9be5SEmilio G. Cota 301816d9be5SEmilio G. Cota g_free(fast->table); 30225d3ec58SRichard Henderson g_free(desc->fulltlb); 303816d9be5SEmilio G. Cota } 304816d9be5SEmilio G. Cota } 305816d9be5SEmilio G. Cota 306d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus 307d9bb58e5SYang Zhong * 308d9bb58e5SYang Zhong * If the wait flag is set then the src cpu's helper will be queued as 309d9bb58e5SYang Zhong * "safe" work and the loop exited creating a synchronisation point 310d9bb58e5SYang Zhong * where all queued work will be finished before execution starts 311d9bb58e5SYang Zhong * again. 312d9bb58e5SYang Zhong */ 313d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 314d9bb58e5SYang Zhong run_on_cpu_data d) 315d9bb58e5SYang Zhong { 316d9bb58e5SYang Zhong CPUState *cpu; 317d9bb58e5SYang Zhong 318d9bb58e5SYang Zhong CPU_FOREACH(cpu) { 319d9bb58e5SYang Zhong if (cpu != src) { 320d9bb58e5SYang Zhong async_run_on_cpu(cpu, fn, d); 321d9bb58e5SYang Zhong } 322d9bb58e5SYang Zhong } 323d9bb58e5SYang Zhong } 324d9bb58e5SYang Zhong 325d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 326d9bb58e5SYang Zhong { 3273d1523ceSRichard Henderson uint16_t asked = data.host_int; 3283d1523ceSRichard Henderson uint16_t all_dirty, work, to_clean; 3293c3959f2SRichard Henderson int64_t now = get_clock_realtime(); 330d9bb58e5SYang Zhong 331d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 332d9bb58e5SYang Zhong 3333d1523ceSRichard Henderson tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); 334d9bb58e5SYang Zhong 33510b32e2cSAnton Johansson qemu_spin_lock(&cpu->neg.tlb.c.lock); 33660a2ad7dSRichard Henderson 33710b32e2cSAnton Johansson all_dirty = cpu->neg.tlb.c.dirty; 3383d1523ceSRichard Henderson to_clean = asked & all_dirty; 3393d1523ceSRichard Henderson all_dirty &= ~to_clean; 34010b32e2cSAnton Johansson cpu->neg.tlb.c.dirty = all_dirty; 3413d1523ceSRichard Henderson 3423d1523ceSRichard Henderson for (work = to_clean; work != 0; work &= work - 1) { 3433d1523ceSRichard Henderson int mmu_idx = ctz32(work); 34410b32e2cSAnton Johansson tlb_flush_one_mmuidx_locked(cpu, mmu_idx, now); 345d9bb58e5SYang Zhong } 3463d1523ceSRichard Henderson 34710b32e2cSAnton Johansson qemu_spin_unlock(&cpu->neg.tlb.c.lock); 348d9bb58e5SYang Zhong 349a976a99aSRichard Henderson tcg_flush_jmp_cache(cpu); 35064f2674bSRichard Henderson 3513d1523ceSRichard Henderson if (to_clean == ALL_MMUIDX_BITS) { 35210b32e2cSAnton Johansson qatomic_set(&cpu->neg.tlb.c.full_flush_count, 35310b32e2cSAnton Johansson cpu->neg.tlb.c.full_flush_count + 1); 354e09de0a2SRichard Henderson } else { 35510b32e2cSAnton Johansson qatomic_set(&cpu->neg.tlb.c.part_flush_count, 35610b32e2cSAnton Johansson cpu->neg.tlb.c.part_flush_count + ctpop16(to_clean)); 3573d1523ceSRichard Henderson if (to_clean != asked) { 35810b32e2cSAnton Johansson qatomic_set(&cpu->neg.tlb.c.elide_flush_count, 35910b32e2cSAnton Johansson cpu->neg.tlb.c.elide_flush_count + 3603d1523ceSRichard Henderson ctpop16(asked & ~to_clean)); 3613d1523ceSRichard Henderson } 36264f2674bSRichard Henderson } 363d9bb58e5SYang Zhong } 364d9bb58e5SYang Zhong 365d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 366d9bb58e5SYang Zhong { 367d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 368d9bb58e5SYang Zhong 36964f2674bSRichard Henderson if (cpu->created && !qemu_cpu_is_self(cpu)) { 370d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 371ab651105SRichard Henderson RUN_ON_CPU_HOST_INT(idxmap)); 372d9bb58e5SYang Zhong } else { 37360a2ad7dSRichard Henderson tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); 374d9bb58e5SYang Zhong } 375d9bb58e5SYang Zhong } 376d9bb58e5SYang Zhong 37764f2674bSRichard Henderson void tlb_flush(CPUState *cpu) 37864f2674bSRichard Henderson { 37964f2674bSRichard Henderson tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); 38064f2674bSRichard Henderson } 38164f2674bSRichard Henderson 382d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 383d9bb58e5SYang Zhong { 384d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 385d9bb58e5SYang Zhong 386d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 387d9bb58e5SYang Zhong 388d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 389d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 390d9bb58e5SYang Zhong } 391d9bb58e5SYang Zhong 39264f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu) 39364f2674bSRichard Henderson { 39464f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); 39564f2674bSRichard Henderson } 39664f2674bSRichard Henderson 39764f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) 398d9bb58e5SYang Zhong { 399d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 400d9bb58e5SYang Zhong 401d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 402d9bb58e5SYang Zhong 403d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 404d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 405d9bb58e5SYang Zhong } 406d9bb58e5SYang Zhong 40764f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu) 40864f2674bSRichard Henderson { 40964f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); 41064f2674bSRichard Henderson } 41164f2674bSRichard Henderson 4123ab6e68cSRichard Henderson static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry, 413732d5487SAnton Johansson vaddr page, vaddr mask) 4143ab6e68cSRichard Henderson { 4153ab6e68cSRichard Henderson page &= mask; 4163ab6e68cSRichard Henderson mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK; 4173ab6e68cSRichard Henderson 4183ab6e68cSRichard Henderson return (page == (tlb_entry->addr_read & mask) || 4193ab6e68cSRichard Henderson page == (tlb_addr_write(tlb_entry) & mask) || 4203ab6e68cSRichard Henderson page == (tlb_entry->addr_code & mask)); 4213ab6e68cSRichard Henderson } 4223ab6e68cSRichard Henderson 423732d5487SAnton Johansson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page) 424d9bb58e5SYang Zhong { 4253ab6e68cSRichard Henderson return tlb_hit_page_mask_anyprot(tlb_entry, page, -1); 42668fea038SRichard Henderson } 42768fea038SRichard Henderson 4283cea94bbSEmilio G. Cota /** 4293cea94bbSEmilio G. Cota * tlb_entry_is_empty - return true if the entry is not in use 4303cea94bbSEmilio G. Cota * @te: pointer to CPUTLBEntry 4313cea94bbSEmilio G. Cota */ 4323cea94bbSEmilio G. Cota static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) 4333cea94bbSEmilio G. Cota { 4343cea94bbSEmilio G. Cota return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; 4353cea94bbSEmilio G. Cota } 4363cea94bbSEmilio G. Cota 43753d28455SRichard Henderson /* Called with tlb_c.lock held */ 4383ab6e68cSRichard Henderson static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry, 439732d5487SAnton Johansson vaddr page, 440732d5487SAnton Johansson vaddr mask) 44168fea038SRichard Henderson { 4423ab6e68cSRichard Henderson if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) { 443d9bb58e5SYang Zhong memset(tlb_entry, -1, sizeof(*tlb_entry)); 44486e1eff8SEmilio G. Cota return true; 445d9bb58e5SYang Zhong } 44686e1eff8SEmilio G. Cota return false; 447d9bb58e5SYang Zhong } 448d9bb58e5SYang Zhong 449732d5487SAnton Johansson static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, vaddr page) 45068fea038SRichard Henderson { 4513ab6e68cSRichard Henderson return tlb_flush_entry_mask_locked(tlb_entry, page, -1); 4523ab6e68cSRichard Henderson } 4533ab6e68cSRichard Henderson 4543ab6e68cSRichard Henderson /* Called with tlb_c.lock held */ 45510b32e2cSAnton Johansson static void tlb_flush_vtlb_page_mask_locked(CPUState *cpu, int mmu_idx, 456732d5487SAnton Johansson vaddr page, 457732d5487SAnton Johansson vaddr mask) 4583ab6e68cSRichard Henderson { 45910b32e2cSAnton Johansson CPUTLBDesc *d = &cpu->neg.tlb.d[mmu_idx]; 46068fea038SRichard Henderson int k; 46171aec354SEmilio G. Cota 46210b32e2cSAnton Johansson assert_cpu_is_self(cpu); 46368fea038SRichard Henderson for (k = 0; k < CPU_VTLB_SIZE; k++) { 4643ab6e68cSRichard Henderson if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) { 46510b32e2cSAnton Johansson tlb_n_used_entries_dec(cpu, mmu_idx); 46686e1eff8SEmilio G. Cota } 46768fea038SRichard Henderson } 46868fea038SRichard Henderson } 46968fea038SRichard Henderson 47010b32e2cSAnton Johansson static inline void tlb_flush_vtlb_page_locked(CPUState *cpu, int mmu_idx, 471732d5487SAnton Johansson vaddr page) 4723ab6e68cSRichard Henderson { 47310b32e2cSAnton Johansson tlb_flush_vtlb_page_mask_locked(cpu, mmu_idx, page, -1); 4743ab6e68cSRichard Henderson } 4753ab6e68cSRichard Henderson 47610b32e2cSAnton Johansson static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page) 4771308e026SRichard Henderson { 47810b32e2cSAnton Johansson vaddr lp_addr = cpu->neg.tlb.d[midx].large_page_addr; 47910b32e2cSAnton Johansson vaddr lp_mask = cpu->neg.tlb.d[midx].large_page_mask; 4801308e026SRichard Henderson 4811308e026SRichard Henderson /* Check if we need to flush due to large pages. */ 4821308e026SRichard Henderson if ((page & lp_mask) == lp_addr) { 4838c605cf1SAnton Johansson tlb_debug("forcing full flush midx %d (%016" 4848c605cf1SAnton Johansson VADDR_PRIx "/%016" VADDR_PRIx ")\n", 4851308e026SRichard Henderson midx, lp_addr, lp_mask); 48610b32e2cSAnton Johansson tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime()); 4871308e026SRichard Henderson } else { 48810b32e2cSAnton Johansson if (tlb_flush_entry_locked(tlb_entry(cpu, midx, page), page)) { 48910b32e2cSAnton Johansson tlb_n_used_entries_dec(cpu, midx); 49086e1eff8SEmilio G. Cota } 49110b32e2cSAnton Johansson tlb_flush_vtlb_page_locked(cpu, midx, page); 4921308e026SRichard Henderson } 4931308e026SRichard Henderson } 4941308e026SRichard Henderson 4957b7d00e0SRichard Henderson /** 4967b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_0: 4977b7d00e0SRichard Henderson * @cpu: cpu on which to flush 4987b7d00e0SRichard Henderson * @addr: page of virtual address to flush 4997b7d00e0SRichard Henderson * @idxmap: set of mmu_idx to flush 5007b7d00e0SRichard Henderson * 5017b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, flush one page 5027b7d00e0SRichard Henderson * at @addr from the tlbs indicated by @idxmap from @cpu. 503d9bb58e5SYang Zhong */ 5047b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, 505732d5487SAnton Johansson vaddr addr, 5067b7d00e0SRichard Henderson uint16_t idxmap) 507d9bb58e5SYang Zhong { 508d9bb58e5SYang Zhong int mmu_idx; 509d9bb58e5SYang Zhong 510d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 511d9bb58e5SYang Zhong 5128c605cf1SAnton Johansson tlb_debug("page addr: %016" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap); 513d9bb58e5SYang Zhong 51410b32e2cSAnton Johansson qemu_spin_lock(&cpu->neg.tlb.c.lock); 515d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 5167b7d00e0SRichard Henderson if ((idxmap >> mmu_idx) & 1) { 51710b32e2cSAnton Johansson tlb_flush_page_locked(cpu, mmu_idx, addr); 518d9bb58e5SYang Zhong } 519d9bb58e5SYang Zhong } 52010b32e2cSAnton Johansson qemu_spin_unlock(&cpu->neg.tlb.c.lock); 521d9bb58e5SYang Zhong 5221d41a79bSRichard Henderson /* 5231d41a79bSRichard Henderson * Discard jump cache entries for any tb which might potentially 5241d41a79bSRichard Henderson * overlap the flushed page, which includes the previous. 5251d41a79bSRichard Henderson */ 5261d41a79bSRichard Henderson tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); 5271d41a79bSRichard Henderson tb_jmp_cache_clear_page(cpu, addr); 528d9bb58e5SYang Zhong } 529d9bb58e5SYang Zhong 5307b7d00e0SRichard Henderson /** 5317b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_1: 5327b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5337b7d00e0SRichard Henderson * @data: encoded addr + idxmap 5347b7d00e0SRichard Henderson * 5357b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5367b7d00e0SRichard Henderson * async_run_on_cpu. The idxmap parameter is encoded in the page 5377b7d00e0SRichard Henderson * offset of the target_ptr field. This limits the set of mmu_idx 5387b7d00e0SRichard Henderson * that can be passed via this method. 5397b7d00e0SRichard Henderson */ 5407b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, 5417b7d00e0SRichard Henderson run_on_cpu_data data) 5427b7d00e0SRichard Henderson { 543732d5487SAnton Johansson vaddr addr_and_idxmap = data.target_ptr; 544732d5487SAnton Johansson vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK; 5457b7d00e0SRichard Henderson uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; 5467b7d00e0SRichard Henderson 5477b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 5487b7d00e0SRichard Henderson } 5497b7d00e0SRichard Henderson 5507b7d00e0SRichard Henderson typedef struct { 551732d5487SAnton Johansson vaddr addr; 5527b7d00e0SRichard Henderson uint16_t idxmap; 5537b7d00e0SRichard Henderson } TLBFlushPageByMMUIdxData; 5547b7d00e0SRichard Henderson 5557b7d00e0SRichard Henderson /** 5567b7d00e0SRichard Henderson * tlb_flush_page_by_mmuidx_async_2: 5577b7d00e0SRichard Henderson * @cpu: cpu on which to flush 5587b7d00e0SRichard Henderson * @data: allocated addr + idxmap 5597b7d00e0SRichard Henderson * 5607b7d00e0SRichard Henderson * Helper for tlb_flush_page_by_mmuidx and friends, called through 5617b7d00e0SRichard Henderson * async_run_on_cpu. The addr+idxmap parameters are stored in a 5627b7d00e0SRichard Henderson * TLBFlushPageByMMUIdxData structure that has been allocated 5637b7d00e0SRichard Henderson * specifically for this helper. Free the structure when done. 5647b7d00e0SRichard Henderson */ 5657b7d00e0SRichard Henderson static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, 5667b7d00e0SRichard Henderson run_on_cpu_data data) 5677b7d00e0SRichard Henderson { 5687b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = data.host_ptr; 5697b7d00e0SRichard Henderson 5707b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); 5717b7d00e0SRichard Henderson g_free(d); 5727b7d00e0SRichard Henderson } 5737b7d00e0SRichard Henderson 574732d5487SAnton Johansson void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap) 575d9bb58e5SYang Zhong { 5768c605cf1SAnton Johansson tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap); 577d9bb58e5SYang Zhong 578d9bb58e5SYang Zhong /* This should already be page aligned */ 5797b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 580d9bb58e5SYang Zhong 5817b7d00e0SRichard Henderson if (qemu_cpu_is_self(cpu)) { 5827b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); 5837b7d00e0SRichard Henderson } else if (idxmap < TARGET_PAGE_SIZE) { 5847b7d00e0SRichard Henderson /* 5857b7d00e0SRichard Henderson * Most targets have only a few mmu_idx. In the case where 5867b7d00e0SRichard Henderson * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid 5877b7d00e0SRichard Henderson * allocating memory for this operation. 5887b7d00e0SRichard Henderson */ 5897b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, 5907b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 591d9bb58e5SYang Zhong } else { 5927b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); 5937b7d00e0SRichard Henderson 5947b7d00e0SRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 5957b7d00e0SRichard Henderson d->addr = addr; 5967b7d00e0SRichard Henderson d->idxmap = idxmap; 5977b7d00e0SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, 5987b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 599d9bb58e5SYang Zhong } 600d9bb58e5SYang Zhong } 601d9bb58e5SYang Zhong 602732d5487SAnton Johansson void tlb_flush_page(CPUState *cpu, vaddr addr) 603f8144c6cSRichard Henderson { 604f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); 605f8144c6cSRichard Henderson } 606f8144c6cSRichard Henderson 607732d5487SAnton Johansson void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr, 608d9bb58e5SYang Zhong uint16_t idxmap) 609d9bb58e5SYang Zhong { 6108c605cf1SAnton Johansson tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap); 611d9bb58e5SYang Zhong 612d9bb58e5SYang Zhong /* This should already be page aligned */ 6137b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 614d9bb58e5SYang Zhong 6157b7d00e0SRichard Henderson /* 6167b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6177b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6187b7d00e0SRichard Henderson */ 6197b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6207b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6217b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6227b7d00e0SRichard Henderson } else { 6237b7d00e0SRichard Henderson CPUState *dst_cpu; 6247b7d00e0SRichard Henderson 6257b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6267b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6277b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6287b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d 6297b7d00e0SRichard Henderson = g_new(TLBFlushPageByMMUIdxData, 1); 6307b7d00e0SRichard Henderson 6317b7d00e0SRichard Henderson d->addr = addr; 6327b7d00e0SRichard Henderson d->idxmap = idxmap; 6337b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6347b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6357b7d00e0SRichard Henderson } 6367b7d00e0SRichard Henderson } 6377b7d00e0SRichard Henderson } 6387b7d00e0SRichard Henderson 6397b7d00e0SRichard Henderson tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); 640d9bb58e5SYang Zhong } 641d9bb58e5SYang Zhong 642732d5487SAnton Johansson void tlb_flush_page_all_cpus(CPUState *src, vaddr addr) 643f8144c6cSRichard Henderson { 644f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); 645f8144c6cSRichard Henderson } 646f8144c6cSRichard Henderson 647d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 648732d5487SAnton Johansson vaddr addr, 649d9bb58e5SYang Zhong uint16_t idxmap) 650d9bb58e5SYang Zhong { 6518c605cf1SAnton Johansson tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap); 652d9bb58e5SYang Zhong 653d9bb58e5SYang Zhong /* This should already be page aligned */ 6547b7d00e0SRichard Henderson addr &= TARGET_PAGE_MASK; 655d9bb58e5SYang Zhong 6567b7d00e0SRichard Henderson /* 6577b7d00e0SRichard Henderson * Allocate memory to hold addr+idxmap only when needed. 6587b7d00e0SRichard Henderson * See tlb_flush_page_by_mmuidx for details. 6597b7d00e0SRichard Henderson */ 6607b7d00e0SRichard Henderson if (idxmap < TARGET_PAGE_SIZE) { 6617b7d00e0SRichard Henderson flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6627b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6637b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, 6647b7d00e0SRichard Henderson RUN_ON_CPU_TARGET_PTR(addr | idxmap)); 6657b7d00e0SRichard Henderson } else { 6667b7d00e0SRichard Henderson CPUState *dst_cpu; 6677b7d00e0SRichard Henderson TLBFlushPageByMMUIdxData *d; 6687b7d00e0SRichard Henderson 6697b7d00e0SRichard Henderson /* Allocate a separate data block for each destination cpu. */ 6707b7d00e0SRichard Henderson CPU_FOREACH(dst_cpu) { 6717b7d00e0SRichard Henderson if (dst_cpu != src_cpu) { 6727b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6737b7d00e0SRichard Henderson d->addr = addr; 6747b7d00e0SRichard Henderson d->idxmap = idxmap; 6757b7d00e0SRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, 6767b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6777b7d00e0SRichard Henderson } 6787b7d00e0SRichard Henderson } 6797b7d00e0SRichard Henderson 6807b7d00e0SRichard Henderson d = g_new(TLBFlushPageByMMUIdxData, 1); 6817b7d00e0SRichard Henderson d->addr = addr; 6827b7d00e0SRichard Henderson d->idxmap = idxmap; 6837b7d00e0SRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, 6847b7d00e0SRichard Henderson RUN_ON_CPU_HOST_PTR(d)); 6857b7d00e0SRichard Henderson } 686d9bb58e5SYang Zhong } 687d9bb58e5SYang Zhong 688732d5487SAnton Johansson void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr) 689d9bb58e5SYang Zhong { 690f8144c6cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); 691d9bb58e5SYang Zhong } 692d9bb58e5SYang Zhong 69310b32e2cSAnton Johansson static void tlb_flush_range_locked(CPUState *cpu, int midx, 694732d5487SAnton Johansson vaddr addr, vaddr len, 6953c4ddec1SRichard Henderson unsigned bits) 6963ab6e68cSRichard Henderson { 69710b32e2cSAnton Johansson CPUTLBDesc *d = &cpu->neg.tlb.d[midx]; 69810b32e2cSAnton Johansson CPUTLBDescFast *f = &cpu->neg.tlb.f[midx]; 699732d5487SAnton Johansson vaddr mask = MAKE_64BIT_MASK(0, bits); 7003ab6e68cSRichard Henderson 7013ab6e68cSRichard Henderson /* 7023ab6e68cSRichard Henderson * If @bits is smaller than the tlb size, there may be multiple entries 7033ab6e68cSRichard Henderson * within the TLB; otherwise all addresses that match under @mask hit 7043ab6e68cSRichard Henderson * the same TLB entry. 7053ab6e68cSRichard Henderson * TODO: Perhaps allow bits to be a few bits less than the size. 7063ab6e68cSRichard Henderson * For now, just flush the entire TLB. 7073c4ddec1SRichard Henderson * 7083c4ddec1SRichard Henderson * If @len is larger than the tlb size, then it will take longer to 7093c4ddec1SRichard Henderson * test all of the entries in the TLB than it will to flush it all. 7103ab6e68cSRichard Henderson */ 7113c4ddec1SRichard Henderson if (mask < f->mask || len > f->mask) { 7123ab6e68cSRichard Henderson tlb_debug("forcing full flush midx %d (" 7138c605cf1SAnton Johansson "%016" VADDR_PRIx "/%016" VADDR_PRIx "+%016" VADDR_PRIx ")\n", 7143c4ddec1SRichard Henderson midx, addr, mask, len); 71510b32e2cSAnton Johansson tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime()); 7163ab6e68cSRichard Henderson return; 7173ab6e68cSRichard Henderson } 7183ab6e68cSRichard Henderson 7193c4ddec1SRichard Henderson /* 7203c4ddec1SRichard Henderson * Check if we need to flush due to large pages. 7213c4ddec1SRichard Henderson * Because large_page_mask contains all 1's from the msb, 7223c4ddec1SRichard Henderson * we only need to test the end of the range. 7233c4ddec1SRichard Henderson */ 7243c4ddec1SRichard Henderson if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) { 7253ab6e68cSRichard Henderson tlb_debug("forcing full flush midx %d (" 7268c605cf1SAnton Johansson "%016" VADDR_PRIx "/%016" VADDR_PRIx ")\n", 7273ab6e68cSRichard Henderson midx, d->large_page_addr, d->large_page_mask); 72810b32e2cSAnton Johansson tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime()); 7293ab6e68cSRichard Henderson return; 7303ab6e68cSRichard Henderson } 7313ab6e68cSRichard Henderson 732732d5487SAnton Johansson for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) { 733732d5487SAnton Johansson vaddr page = addr + i; 73410b32e2cSAnton Johansson CPUTLBEntry *entry = tlb_entry(cpu, midx, page); 7353c4ddec1SRichard Henderson 7363c4ddec1SRichard Henderson if (tlb_flush_entry_mask_locked(entry, page, mask)) { 73710b32e2cSAnton Johansson tlb_n_used_entries_dec(cpu, midx); 7383ab6e68cSRichard Henderson } 73910b32e2cSAnton Johansson tlb_flush_vtlb_page_mask_locked(cpu, midx, page, mask); 7403ab6e68cSRichard Henderson } 7413c4ddec1SRichard Henderson } 7423ab6e68cSRichard Henderson 7433ab6e68cSRichard Henderson typedef struct { 744732d5487SAnton Johansson vaddr addr; 745732d5487SAnton Johansson vaddr len; 7463ab6e68cSRichard Henderson uint16_t idxmap; 7473ab6e68cSRichard Henderson uint16_t bits; 7483960a59fSRichard Henderson } TLBFlushRangeData; 7493ab6e68cSRichard Henderson 7506be48e45SRichard Henderson static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, 7513960a59fSRichard Henderson TLBFlushRangeData d) 7523ab6e68cSRichard Henderson { 7533ab6e68cSRichard Henderson int mmu_idx; 7543ab6e68cSRichard Henderson 7553ab6e68cSRichard Henderson assert_cpu_is_self(cpu); 7563ab6e68cSRichard Henderson 7578c605cf1SAnton Johansson tlb_debug("range: %016" VADDR_PRIx "/%u+%016" VADDR_PRIx " mmu_map:0x%x\n", 7583c4ddec1SRichard Henderson d.addr, d.bits, d.len, d.idxmap); 7593ab6e68cSRichard Henderson 76010b32e2cSAnton Johansson qemu_spin_lock(&cpu->neg.tlb.c.lock); 7613ab6e68cSRichard Henderson for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 7623ab6e68cSRichard Henderson if ((d.idxmap >> mmu_idx) & 1) { 76310b32e2cSAnton Johansson tlb_flush_range_locked(cpu, mmu_idx, d.addr, d.len, d.bits); 7643ab6e68cSRichard Henderson } 7653ab6e68cSRichard Henderson } 76610b32e2cSAnton Johansson qemu_spin_unlock(&cpu->neg.tlb.c.lock); 7673ab6e68cSRichard Henderson 768cfc2a2d6SIdan Horowitz /* 769cfc2a2d6SIdan Horowitz * If the length is larger than the jump cache size, then it will take 770cfc2a2d6SIdan Horowitz * longer to clear each entry individually than it will to clear it all. 771cfc2a2d6SIdan Horowitz */ 772cfc2a2d6SIdan Horowitz if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) { 773a976a99aSRichard Henderson tcg_flush_jmp_cache(cpu); 774cfc2a2d6SIdan Horowitz return; 775cfc2a2d6SIdan Horowitz } 776cfc2a2d6SIdan Horowitz 7771d41a79bSRichard Henderson /* 7781d41a79bSRichard Henderson * Discard jump cache entries for any tb which might potentially 7791d41a79bSRichard Henderson * overlap the flushed pages, which includes the previous. 7801d41a79bSRichard Henderson */ 7811d41a79bSRichard Henderson d.addr -= TARGET_PAGE_SIZE; 782732d5487SAnton Johansson for (vaddr i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) { 7831d41a79bSRichard Henderson tb_jmp_cache_clear_page(cpu, d.addr); 7841d41a79bSRichard Henderson d.addr += TARGET_PAGE_SIZE; 7853c4ddec1SRichard Henderson } 7863ab6e68cSRichard Henderson } 7873ab6e68cSRichard Henderson 788206a583dSRichard Henderson static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu, 7893ab6e68cSRichard Henderson run_on_cpu_data data) 7903ab6e68cSRichard Henderson { 7913960a59fSRichard Henderson TLBFlushRangeData *d = data.host_ptr; 7926be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(cpu, *d); 7933ab6e68cSRichard Henderson g_free(d); 7943ab6e68cSRichard Henderson } 7953ab6e68cSRichard Henderson 796732d5487SAnton Johansson void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, 797732d5487SAnton Johansson vaddr len, uint16_t idxmap, 798e5b1921bSRichard Henderson unsigned bits) 7993ab6e68cSRichard Henderson { 8003960a59fSRichard Henderson TLBFlushRangeData d; 8013ab6e68cSRichard Henderson 802e5b1921bSRichard Henderson /* 803e5b1921bSRichard Henderson * If all bits are significant, and len is small, 804e5b1921bSRichard Henderson * this devolves to tlb_flush_page. 805e5b1921bSRichard Henderson */ 806e5b1921bSRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 8073ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx(cpu, addr, idxmap); 8083ab6e68cSRichard Henderson return; 8093ab6e68cSRichard Henderson } 8103ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 8113ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 8123ab6e68cSRichard Henderson tlb_flush_by_mmuidx(cpu, idxmap); 8133ab6e68cSRichard Henderson return; 8143ab6e68cSRichard Henderson } 8153ab6e68cSRichard Henderson 8163ab6e68cSRichard Henderson /* This should already be page aligned */ 8173ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 818e5b1921bSRichard Henderson d.len = len; 8193ab6e68cSRichard Henderson d.idxmap = idxmap; 8203ab6e68cSRichard Henderson d.bits = bits; 8213ab6e68cSRichard Henderson 8223ab6e68cSRichard Henderson if (qemu_cpu_is_self(cpu)) { 8236be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(cpu, d); 8243ab6e68cSRichard Henderson } else { 8253ab6e68cSRichard Henderson /* Otherwise allocate a structure, freed by the worker. */ 8263960a59fSRichard Henderson TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); 827206a583dSRichard Henderson async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1, 8283ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 8293ab6e68cSRichard Henderson } 8303ab6e68cSRichard Henderson } 8313ab6e68cSRichard Henderson 832732d5487SAnton Johansson void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr, 833e5b1921bSRichard Henderson uint16_t idxmap, unsigned bits) 834e5b1921bSRichard Henderson { 835e5b1921bSRichard Henderson tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); 836e5b1921bSRichard Henderson } 837e5b1921bSRichard Henderson 838600b819fSRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, 839732d5487SAnton Johansson vaddr addr, vaddr len, 840600b819fSRichard Henderson uint16_t idxmap, unsigned bits) 8413ab6e68cSRichard Henderson { 8423960a59fSRichard Henderson TLBFlushRangeData d; 843d34e4d1aSRichard Henderson CPUState *dst_cpu; 8443ab6e68cSRichard Henderson 845600b819fSRichard Henderson /* 846600b819fSRichard Henderson * If all bits are significant, and len is small, 847600b819fSRichard Henderson * this devolves to tlb_flush_page. 848600b819fSRichard Henderson */ 849600b819fSRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 8503ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap); 8513ab6e68cSRichard Henderson return; 8523ab6e68cSRichard Henderson } 8533ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 8543ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 8553ab6e68cSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap); 8563ab6e68cSRichard Henderson return; 8573ab6e68cSRichard Henderson } 8583ab6e68cSRichard Henderson 8593ab6e68cSRichard Henderson /* This should already be page aligned */ 8603ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 861600b819fSRichard Henderson d.len = len; 8623ab6e68cSRichard Henderson d.idxmap = idxmap; 8633ab6e68cSRichard Henderson d.bits = bits; 8643ab6e68cSRichard Henderson 8653ab6e68cSRichard Henderson /* Allocate a separate data block for each destination cpu. */ 8663ab6e68cSRichard Henderson CPU_FOREACH(dst_cpu) { 8673ab6e68cSRichard Henderson if (dst_cpu != src_cpu) { 8683960a59fSRichard Henderson TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); 8693ab6e68cSRichard Henderson async_run_on_cpu(dst_cpu, 870206a583dSRichard Henderson tlb_flush_range_by_mmuidx_async_1, 8713ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 8723ab6e68cSRichard Henderson } 8733ab6e68cSRichard Henderson } 8743ab6e68cSRichard Henderson 8756be48e45SRichard Henderson tlb_flush_range_by_mmuidx_async_0(src_cpu, d); 8763ab6e68cSRichard Henderson } 8773ab6e68cSRichard Henderson 878600b819fSRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, 879732d5487SAnton Johansson vaddr addr, uint16_t idxmap, 880732d5487SAnton Johansson unsigned bits) 881600b819fSRichard Henderson { 882600b819fSRichard Henderson tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE, 883600b819fSRichard Henderson idxmap, bits); 884600b819fSRichard Henderson } 885600b819fSRichard Henderson 886c13b27d8SRichard Henderson void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 887732d5487SAnton Johansson vaddr addr, 888732d5487SAnton Johansson vaddr len, 8893ab6e68cSRichard Henderson uint16_t idxmap, 8903ab6e68cSRichard Henderson unsigned bits) 8913ab6e68cSRichard Henderson { 892d34e4d1aSRichard Henderson TLBFlushRangeData d, *p; 893d34e4d1aSRichard Henderson CPUState *dst_cpu; 8943ab6e68cSRichard Henderson 895c13b27d8SRichard Henderson /* 896c13b27d8SRichard Henderson * If all bits are significant, and len is small, 897c13b27d8SRichard Henderson * this devolves to tlb_flush_page. 898c13b27d8SRichard Henderson */ 899c13b27d8SRichard Henderson if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { 9003ab6e68cSRichard Henderson tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); 9013ab6e68cSRichard Henderson return; 9023ab6e68cSRichard Henderson } 9033ab6e68cSRichard Henderson /* If no page bits are significant, this devolves to tlb_flush. */ 9043ab6e68cSRichard Henderson if (bits < TARGET_PAGE_BITS) { 9053ab6e68cSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); 9063ab6e68cSRichard Henderson return; 9073ab6e68cSRichard Henderson } 9083ab6e68cSRichard Henderson 9093ab6e68cSRichard Henderson /* This should already be page aligned */ 9103ab6e68cSRichard Henderson d.addr = addr & TARGET_PAGE_MASK; 911c13b27d8SRichard Henderson d.len = len; 9123ab6e68cSRichard Henderson d.idxmap = idxmap; 9133ab6e68cSRichard Henderson d.bits = bits; 9143ab6e68cSRichard Henderson 9153ab6e68cSRichard Henderson /* Allocate a separate data block for each destination cpu. */ 9163ab6e68cSRichard Henderson CPU_FOREACH(dst_cpu) { 9173ab6e68cSRichard Henderson if (dst_cpu != src_cpu) { 9186d244788SRichard Henderson p = g_memdup(&d, sizeof(d)); 919206a583dSRichard Henderson async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1, 9203ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 9213ab6e68cSRichard Henderson } 9223ab6e68cSRichard Henderson } 9233ab6e68cSRichard Henderson 9246d244788SRichard Henderson p = g_memdup(&d, sizeof(d)); 925206a583dSRichard Henderson async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1, 9263ab6e68cSRichard Henderson RUN_ON_CPU_HOST_PTR(p)); 9273ab6e68cSRichard Henderson } 9283ab6e68cSRichard Henderson 929c13b27d8SRichard Henderson void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 930732d5487SAnton Johansson vaddr addr, 931c13b27d8SRichard Henderson uint16_t idxmap, 932c13b27d8SRichard Henderson unsigned bits) 933c13b27d8SRichard Henderson { 934c13b27d8SRichard Henderson tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE, 935c13b27d8SRichard Henderson idxmap, bits); 936c13b27d8SRichard Henderson } 937c13b27d8SRichard Henderson 938d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr' 939d9bb58e5SYang Zhong can be detected */ 940d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr) 941d9bb58e5SYang Zhong { 94293b99616SRichard Henderson cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK, 94393b99616SRichard Henderson TARGET_PAGE_SIZE, 944d9bb58e5SYang Zhong DIRTY_MEMORY_CODE); 945d9bb58e5SYang Zhong } 946d9bb58e5SYang Zhong 947d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer 948d9bb58e5SYang Zhong tested for self modifying code */ 949d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr) 950d9bb58e5SYang Zhong { 951d9bb58e5SYang Zhong cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 952d9bb58e5SYang Zhong } 953d9bb58e5SYang Zhong 954d9bb58e5SYang Zhong 955d9bb58e5SYang Zhong /* 956d9bb58e5SYang Zhong * Dirty write flag handling 957d9bb58e5SYang Zhong * 958d9bb58e5SYang Zhong * When the TCG code writes to a location it looks up the address in 959d9bb58e5SYang Zhong * the TLB and uses that data to compute the final address. If any of 960d9bb58e5SYang Zhong * the lower bits of the address are set then the slow path is forced. 961d9bb58e5SYang Zhong * There are a number of reasons to do this but for normal RAM the 962d9bb58e5SYang Zhong * most usual is detecting writes to code regions which may invalidate 963d9bb58e5SYang Zhong * generated code. 964d9bb58e5SYang Zhong * 96571aec354SEmilio G. Cota * Other vCPUs might be reading their TLBs during guest execution, so we update 966d73415a3SStefan Hajnoczi * te->addr_write with qatomic_set. We don't need to worry about this for 96771aec354SEmilio G. Cota * oversized guests as MTTCG is disabled for them. 968d9bb58e5SYang Zhong * 96953d28455SRichard Henderson * Called with tlb_c.lock held. 970d9bb58e5SYang Zhong */ 97171aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 97271aec354SEmilio G. Cota uintptr_t start, uintptr_t length) 973d9bb58e5SYang Zhong { 974d9bb58e5SYang Zhong uintptr_t addr = tlb_entry->addr_write; 975d9bb58e5SYang Zhong 9767b0d792cSRichard Henderson if ((addr & (TLB_INVALID_MASK | TLB_MMIO | 9777b0d792cSRichard Henderson TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { 978d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 979d9bb58e5SYang Zhong addr += tlb_entry->addend; 980d9bb58e5SYang Zhong if ((addr - start) < length) { 981238f4380SRichard Henderson #if TARGET_LONG_BITS == 32 982238f4380SRichard Henderson uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write; 983238f4380SRichard Henderson ptr_write += HOST_BIG_ENDIAN; 984238f4380SRichard Henderson qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY); 985238f4380SRichard Henderson #elif TCG_OVERSIZED_GUEST 98671aec354SEmilio G. Cota tlb_entry->addr_write |= TLB_NOTDIRTY; 987d9bb58e5SYang Zhong #else 988d73415a3SStefan Hajnoczi qatomic_set(&tlb_entry->addr_write, 98971aec354SEmilio G. Cota tlb_entry->addr_write | TLB_NOTDIRTY); 990d9bb58e5SYang Zhong #endif 991d9bb58e5SYang Zhong } 99271aec354SEmilio G. Cota } 99371aec354SEmilio G. Cota } 99471aec354SEmilio G. Cota 99571aec354SEmilio G. Cota /* 99653d28455SRichard Henderson * Called with tlb_c.lock held. 99771aec354SEmilio G. Cota * Called only from the vCPU context, i.e. the TLB's owner thread. 99871aec354SEmilio G. Cota */ 99971aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 100071aec354SEmilio G. Cota { 100171aec354SEmilio G. Cota *d = *s; 100271aec354SEmilio G. Cota } 1003d9bb58e5SYang Zhong 1004d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 100571aec354SEmilio G. Cota * the target vCPU). 100653d28455SRichard Henderson * We must take tlb_c.lock to avoid racing with another vCPU update. The only 100771aec354SEmilio G. Cota * thing actually updated is the target TLB entry ->addr_write flags. 1008d9bb58e5SYang Zhong */ 1009d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 1010d9bb58e5SYang Zhong { 1011d9bb58e5SYang Zhong int mmu_idx; 1012d9bb58e5SYang Zhong 101310b32e2cSAnton Johansson qemu_spin_lock(&cpu->neg.tlb.c.lock); 1014d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1015d9bb58e5SYang Zhong unsigned int i; 101610b32e2cSAnton Johansson unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]); 1017d9bb58e5SYang Zhong 101886e1eff8SEmilio G. Cota for (i = 0; i < n; i++) { 101910b32e2cSAnton Johansson tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i], 1020a40ec84eSRichard Henderson start1, length); 1021d9bb58e5SYang Zhong } 1022d9bb58e5SYang Zhong 1023d9bb58e5SYang Zhong for (i = 0; i < CPU_VTLB_SIZE; i++) { 102410b32e2cSAnton Johansson tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i], 1025a40ec84eSRichard Henderson start1, length); 1026d9bb58e5SYang Zhong } 1027d9bb58e5SYang Zhong } 102810b32e2cSAnton Johansson qemu_spin_unlock(&cpu->neg.tlb.c.lock); 1029d9bb58e5SYang Zhong } 1030d9bb58e5SYang Zhong 103153d28455SRichard Henderson /* Called with tlb_c.lock held */ 103271aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 1033732d5487SAnton Johansson vaddr addr) 1034d9bb58e5SYang Zhong { 1035732d5487SAnton Johansson if (tlb_entry->addr_write == (addr | TLB_NOTDIRTY)) { 1036732d5487SAnton Johansson tlb_entry->addr_write = addr; 1037d9bb58e5SYang Zhong } 1038d9bb58e5SYang Zhong } 1039d9bb58e5SYang Zhong 1040d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr 1041d9bb58e5SYang Zhong so that it is no longer dirty */ 1042*51579d40SPhilippe Mathieu-Daudé static void tlb_set_dirty(CPUState *cpu, vaddr addr) 1043d9bb58e5SYang Zhong { 1044d9bb58e5SYang Zhong int mmu_idx; 1045d9bb58e5SYang Zhong 1046d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 1047d9bb58e5SYang Zhong 1048732d5487SAnton Johansson addr &= TARGET_PAGE_MASK; 104910b32e2cSAnton Johansson qemu_spin_lock(&cpu->neg.tlb.c.lock); 1050d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 105110b32e2cSAnton Johansson tlb_set_dirty1_locked(tlb_entry(cpu, mmu_idx, addr), addr); 1052d9bb58e5SYang Zhong } 1053d9bb58e5SYang Zhong 1054d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 1055d9bb58e5SYang Zhong int k; 1056d9bb58e5SYang Zhong for (k = 0; k < CPU_VTLB_SIZE; k++) { 105710b32e2cSAnton Johansson tlb_set_dirty1_locked(&cpu->neg.tlb.d[mmu_idx].vtable[k], addr); 1058d9bb58e5SYang Zhong } 1059d9bb58e5SYang Zhong } 106010b32e2cSAnton Johansson qemu_spin_unlock(&cpu->neg.tlb.c.lock); 1061d9bb58e5SYang Zhong } 1062d9bb58e5SYang Zhong 1063d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by 1064d9bb58e5SYang Zhong large pages and trigger a full TLB flush if these are invalidated. */ 106510b32e2cSAnton Johansson static void tlb_add_large_page(CPUState *cpu, int mmu_idx, 1066732d5487SAnton Johansson vaddr addr, uint64_t size) 1067d9bb58e5SYang Zhong { 106810b32e2cSAnton Johansson vaddr lp_addr = cpu->neg.tlb.d[mmu_idx].large_page_addr; 1069732d5487SAnton Johansson vaddr lp_mask = ~(size - 1); 1070d9bb58e5SYang Zhong 1071732d5487SAnton Johansson if (lp_addr == (vaddr)-1) { 10721308e026SRichard Henderson /* No previous large page. */ 1073732d5487SAnton Johansson lp_addr = addr; 10741308e026SRichard Henderson } else { 1075d9bb58e5SYang Zhong /* Extend the existing region to include the new page. 10761308e026SRichard Henderson This is a compromise between unnecessary flushes and 10771308e026SRichard Henderson the cost of maintaining a full variable size TLB. */ 107810b32e2cSAnton Johansson lp_mask &= cpu->neg.tlb.d[mmu_idx].large_page_mask; 1079732d5487SAnton Johansson while (((lp_addr ^ addr) & lp_mask) != 0) { 10801308e026SRichard Henderson lp_mask <<= 1; 1081d9bb58e5SYang Zhong } 10821308e026SRichard Henderson } 108310b32e2cSAnton Johansson cpu->neg.tlb.d[mmu_idx].large_page_addr = lp_addr & lp_mask; 108410b32e2cSAnton Johansson cpu->neg.tlb.d[mmu_idx].large_page_mask = lp_mask; 1085d9bb58e5SYang Zhong } 1086d9bb58e5SYang Zhong 108758e8f1f6SRichard Henderson static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent, 1088d712b116SAnton Johansson vaddr address, int flags, 108958e8f1f6SRichard Henderson MMUAccessType access_type, bool enable) 109058e8f1f6SRichard Henderson { 109158e8f1f6SRichard Henderson if (enable) { 109258e8f1f6SRichard Henderson address |= flags & TLB_FLAGS_MASK; 109358e8f1f6SRichard Henderson flags &= TLB_SLOW_FLAGS_MASK; 109458e8f1f6SRichard Henderson if (flags) { 109558e8f1f6SRichard Henderson address |= TLB_FORCE_SLOW; 109658e8f1f6SRichard Henderson } 109758e8f1f6SRichard Henderson } else { 109858e8f1f6SRichard Henderson address = -1; 109958e8f1f6SRichard Henderson flags = 0; 110058e8f1f6SRichard Henderson } 110158e8f1f6SRichard Henderson ent->addr_idx[access_type] = address; 110258e8f1f6SRichard Henderson full->slow_flags[access_type] = flags; 110358e8f1f6SRichard Henderson } 110458e8f1f6SRichard Henderson 110540473689SRichard Henderson /* 110640473689SRichard Henderson * Add a new TLB entry. At most one entry for a given virtual address 1107d9bb58e5SYang Zhong * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 1108d9bb58e5SYang Zhong * supplied size is only used by tlb_flush_page. 1109d9bb58e5SYang Zhong * 1110d9bb58e5SYang Zhong * Called from TCG-generated code, which is under an RCU read-side 1111d9bb58e5SYang Zhong * critical section. 1112d9bb58e5SYang Zhong */ 111340473689SRichard Henderson void tlb_set_page_full(CPUState *cpu, int mmu_idx, 1114732d5487SAnton Johansson vaddr addr, CPUTLBEntryFull *full) 1115d9bb58e5SYang Zhong { 111610b32e2cSAnton Johansson CPUTLB *tlb = &cpu->neg.tlb; 1117a40ec84eSRichard Henderson CPUTLBDesc *desc = &tlb->d[mmu_idx]; 1118d9bb58e5SYang Zhong MemoryRegionSection *section; 111958e8f1f6SRichard Henderson unsigned int index, read_flags, write_flags; 1120d9bb58e5SYang Zhong uintptr_t addend; 112168fea038SRichard Henderson CPUTLBEntry *te, tn; 112255df6fcfSPeter Maydell hwaddr iotlb, xlat, sz, paddr_page; 1123732d5487SAnton Johansson vaddr addr_page; 112440473689SRichard Henderson int asidx, wp_flags, prot; 11258f5db641SRichard Henderson bool is_ram, is_romd; 1126d9bb58e5SYang Zhong 1127d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 112855df6fcfSPeter Maydell 112940473689SRichard Henderson if (full->lg_page_size <= TARGET_PAGE_BITS) { 113055df6fcfSPeter Maydell sz = TARGET_PAGE_SIZE; 113155df6fcfSPeter Maydell } else { 113240473689SRichard Henderson sz = (hwaddr)1 << full->lg_page_size; 113310b32e2cSAnton Johansson tlb_add_large_page(cpu, mmu_idx, addr, sz); 113455df6fcfSPeter Maydell } 1135732d5487SAnton Johansson addr_page = addr & TARGET_PAGE_MASK; 113640473689SRichard Henderson paddr_page = full->phys_addr & TARGET_PAGE_MASK; 113755df6fcfSPeter Maydell 113840473689SRichard Henderson prot = full->prot; 113940473689SRichard Henderson asidx = cpu_asidx_from_attrs(cpu, full->attrs); 114055df6fcfSPeter Maydell section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 114140473689SRichard Henderson &xlat, &sz, full->attrs, &prot); 1142d9bb58e5SYang Zhong assert(sz >= TARGET_PAGE_SIZE); 1143d9bb58e5SYang Zhong 11448c605cf1SAnton Johansson tlb_debug("vaddr=%016" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx 1145d9bb58e5SYang Zhong " prot=%x idx=%d\n", 1146732d5487SAnton Johansson addr, full->phys_addr, prot, mmu_idx); 1147d9bb58e5SYang Zhong 1148a0ff4a87SRichard Henderson read_flags = full->tlb_fill_flags; 114940473689SRichard Henderson if (full->lg_page_size < TARGET_PAGE_BITS) { 115030d7e098SRichard Henderson /* Repeat the MMU check and TLB fill on every access. */ 115158e8f1f6SRichard Henderson read_flags |= TLB_INVALID_MASK; 115255df6fcfSPeter Maydell } 11538f5db641SRichard Henderson 11548f5db641SRichard Henderson is_ram = memory_region_is_ram(section->mr); 11558f5db641SRichard Henderson is_romd = memory_region_is_romd(section->mr); 11568f5db641SRichard Henderson 11578f5db641SRichard Henderson if (is_ram || is_romd) { 11588f5db641SRichard Henderson /* RAM and ROMD both have associated host memory. */ 1159d9bb58e5SYang Zhong addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 11608f5db641SRichard Henderson } else { 11618f5db641SRichard Henderson /* I/O does not; force the host address to NULL. */ 11628f5db641SRichard Henderson addend = 0; 1163d9bb58e5SYang Zhong } 1164d9bb58e5SYang Zhong 116558e8f1f6SRichard Henderson write_flags = read_flags; 11668f5db641SRichard Henderson if (is_ram) { 11678f5db641SRichard Henderson iotlb = memory_region_get_ram_addr(section->mr) + xlat; 1168dff1ab68SLIU Zhiwei assert(!(iotlb & ~TARGET_PAGE_MASK)); 11698f5db641SRichard Henderson /* 11708f5db641SRichard Henderson * Computing is_clean is expensive; avoid all that unless 11718f5db641SRichard Henderson * the page is actually writable. 11728f5db641SRichard Henderson */ 11738f5db641SRichard Henderson if (prot & PAGE_WRITE) { 11748f5db641SRichard Henderson if (section->readonly) { 117558e8f1f6SRichard Henderson write_flags |= TLB_DISCARD_WRITE; 11768f5db641SRichard Henderson } else if (cpu_physical_memory_is_clean(iotlb)) { 117758e8f1f6SRichard Henderson write_flags |= TLB_NOTDIRTY; 11788f5db641SRichard Henderson } 11798f5db641SRichard Henderson } 11808f5db641SRichard Henderson } else { 11818f5db641SRichard Henderson /* I/O or ROMD */ 11828f5db641SRichard Henderson iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; 11838f5db641SRichard Henderson /* 11848f5db641SRichard Henderson * Writes to romd devices must go through MMIO to enable write. 11858f5db641SRichard Henderson * Reads to romd devices go through the ram_ptr found above, 11868f5db641SRichard Henderson * but of course reads to I/O must go through MMIO. 11878f5db641SRichard Henderson */ 118858e8f1f6SRichard Henderson write_flags |= TLB_MMIO; 11898f5db641SRichard Henderson if (!is_romd) { 119058e8f1f6SRichard Henderson read_flags = write_flags; 11918f5db641SRichard Henderson } 11928f5db641SRichard Henderson } 11938f5db641SRichard Henderson 1194732d5487SAnton Johansson wp_flags = cpu_watchpoint_address_matches(cpu, addr_page, 119550b107c5SRichard Henderson TARGET_PAGE_SIZE); 1196d9bb58e5SYang Zhong 119710b32e2cSAnton Johansson index = tlb_index(cpu, mmu_idx, addr_page); 119810b32e2cSAnton Johansson te = tlb_entry(cpu, mmu_idx, addr_page); 1199d9bb58e5SYang Zhong 120068fea038SRichard Henderson /* 120171aec354SEmilio G. Cota * Hold the TLB lock for the rest of the function. We could acquire/release 120271aec354SEmilio G. Cota * the lock several times in the function, but it is faster to amortize the 120371aec354SEmilio G. Cota * acquisition cost by acquiring it just once. Note that this leads to 120471aec354SEmilio G. Cota * a longer critical section, but this is not a concern since the TLB lock 120571aec354SEmilio G. Cota * is unlikely to be contended. 120671aec354SEmilio G. Cota */ 1207a40ec84eSRichard Henderson qemu_spin_lock(&tlb->c.lock); 120871aec354SEmilio G. Cota 12093d1523ceSRichard Henderson /* Note that the tlb is no longer clean. */ 1210a40ec84eSRichard Henderson tlb->c.dirty |= 1 << mmu_idx; 12113d1523ceSRichard Henderson 121271aec354SEmilio G. Cota /* Make sure there's no cached translation for the new page. */ 121310b32e2cSAnton Johansson tlb_flush_vtlb_page_locked(cpu, mmu_idx, addr_page); 121471aec354SEmilio G. Cota 121571aec354SEmilio G. Cota /* 121668fea038SRichard Henderson * Only evict the old entry to the victim tlb if it's for a 121768fea038SRichard Henderson * different page; otherwise just overwrite the stale data. 121868fea038SRichard Henderson */ 1219732d5487SAnton Johansson if (!tlb_hit_page_anyprot(te, addr_page) && !tlb_entry_is_empty(te)) { 1220a40ec84eSRichard Henderson unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; 1221a40ec84eSRichard Henderson CPUTLBEntry *tv = &desc->vtable[vidx]; 122268fea038SRichard Henderson 122368fea038SRichard Henderson /* Evict the old entry into the victim tlb. */ 122471aec354SEmilio G. Cota copy_tlb_helper_locked(tv, te); 122525d3ec58SRichard Henderson desc->vfulltlb[vidx] = desc->fulltlb[index]; 122610b32e2cSAnton Johansson tlb_n_used_entries_dec(cpu, mmu_idx); 122768fea038SRichard Henderson } 1228d9bb58e5SYang Zhong 1229d9bb58e5SYang Zhong /* refill the tlb */ 1230ace41090SPeter Maydell /* 1231dff1ab68SLIU Zhiwei * When memory region is ram, iotlb contains a TARGET_PAGE_BITS 1232dff1ab68SLIU Zhiwei * aligned ram_addr_t of the page base of the target RAM. 1233dff1ab68SLIU Zhiwei * Otherwise, iotlb contains 1234dff1ab68SLIU Zhiwei * - a physical section number in the lower TARGET_PAGE_BITS 1235dff1ab68SLIU Zhiwei * - the offset within section->mr of the page base (I/O, ROMD) with the 1236dff1ab68SLIU Zhiwei * TARGET_PAGE_BITS masked off. 123758e8f1f6SRichard Henderson * We subtract addr_page (which is page aligned and thus won't 1238ace41090SPeter Maydell * disturb the low bits) to give an offset which can be added to the 1239ace41090SPeter Maydell * (non-page-aligned) vaddr of the eventual memory access to get 1240ace41090SPeter Maydell * the MemoryRegion offset for the access. Note that the vaddr we 1241ace41090SPeter Maydell * subtract here is that of the page base, and not the same as the 1242fb3cb376SRichard Henderson * vaddr we add back in io_prepare()/get_page_addr_code(). 1243ace41090SPeter Maydell */ 124440473689SRichard Henderson desc->fulltlb[index] = *full; 124558e8f1f6SRichard Henderson full = &desc->fulltlb[index]; 124658e8f1f6SRichard Henderson full->xlat_section = iotlb - addr_page; 124758e8f1f6SRichard Henderson full->phys_addr = paddr_page; 1248d9bb58e5SYang Zhong 1249d9bb58e5SYang Zhong /* Now calculate the new entry */ 1250732d5487SAnton Johansson tn.addend = addend - addr_page; 125158e8f1f6SRichard Henderson 125258e8f1f6SRichard Henderson tlb_set_compare(full, &tn, addr_page, read_flags, 125358e8f1f6SRichard Henderson MMU_INST_FETCH, prot & PAGE_EXEC); 125458e8f1f6SRichard Henderson 125550b107c5SRichard Henderson if (wp_flags & BP_MEM_READ) { 125658e8f1f6SRichard Henderson read_flags |= TLB_WATCHPOINT; 125750b107c5SRichard Henderson } 125858e8f1f6SRichard Henderson tlb_set_compare(full, &tn, addr_page, read_flags, 125958e8f1f6SRichard Henderson MMU_DATA_LOAD, prot & PAGE_READ); 1260d9bb58e5SYang Zhong 1261f52bfb12SDavid Hildenbrand if (prot & PAGE_WRITE_INV) { 126258e8f1f6SRichard Henderson write_flags |= TLB_INVALID_MASK; 1263f52bfb12SDavid Hildenbrand } 126450b107c5SRichard Henderson if (wp_flags & BP_MEM_WRITE) { 126558e8f1f6SRichard Henderson write_flags |= TLB_WATCHPOINT; 126650b107c5SRichard Henderson } 126758e8f1f6SRichard Henderson tlb_set_compare(full, &tn, addr_page, write_flags, 126858e8f1f6SRichard Henderson MMU_DATA_STORE, prot & PAGE_WRITE); 1269d9bb58e5SYang Zhong 127071aec354SEmilio G. Cota copy_tlb_helper_locked(te, &tn); 127110b32e2cSAnton Johansson tlb_n_used_entries_inc(cpu, mmu_idx); 1272a40ec84eSRichard Henderson qemu_spin_unlock(&tlb->c.lock); 1273d9bb58e5SYang Zhong } 1274d9bb58e5SYang Zhong 1275732d5487SAnton Johansson void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr, 127640473689SRichard Henderson hwaddr paddr, MemTxAttrs attrs, int prot, 1277732d5487SAnton Johansson int mmu_idx, uint64_t size) 127840473689SRichard Henderson { 127940473689SRichard Henderson CPUTLBEntryFull full = { 128040473689SRichard Henderson .phys_addr = paddr, 128140473689SRichard Henderson .attrs = attrs, 128240473689SRichard Henderson .prot = prot, 128340473689SRichard Henderson .lg_page_size = ctz64(size) 128440473689SRichard Henderson }; 128540473689SRichard Henderson 128640473689SRichard Henderson assert(is_power_of_2(size)); 1287732d5487SAnton Johansson tlb_set_page_full(cpu, mmu_idx, addr, &full); 128840473689SRichard Henderson } 128940473689SRichard Henderson 1290732d5487SAnton Johansson void tlb_set_page(CPUState *cpu, vaddr addr, 1291d9bb58e5SYang Zhong hwaddr paddr, int prot, 1292732d5487SAnton Johansson int mmu_idx, uint64_t size) 1293d9bb58e5SYang Zhong { 1294732d5487SAnton Johansson tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED, 1295d9bb58e5SYang Zhong prot, mmu_idx, size); 1296d9bb58e5SYang Zhong } 1297d9bb58e5SYang Zhong 1298c319dc13SRichard Henderson /* 1299c319dc13SRichard Henderson * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the 1300c319dc13SRichard Henderson * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must 1301c319dc13SRichard Henderson * be discarded and looked up again (e.g. via tlb_entry()). 1302c319dc13SRichard Henderson */ 1303732d5487SAnton Johansson static void tlb_fill(CPUState *cpu, vaddr addr, int size, 1304c319dc13SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1305c319dc13SRichard Henderson { 1306c319dc13SRichard Henderson bool ok; 1307c319dc13SRichard Henderson 1308c319dc13SRichard Henderson /* 1309c319dc13SRichard Henderson * This is not a probe, so only valid return is success; failure 1310c319dc13SRichard Henderson * should result in exception + longjmp to the cpu loop. 1311c319dc13SRichard Henderson */ 13128810ee2aSAlex Bennée ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size, 1313e124536fSEduardo Habkost access_type, mmu_idx, false, retaddr); 1314c319dc13SRichard Henderson assert(ok); 1315c319dc13SRichard Henderson } 1316c319dc13SRichard Henderson 131778271684SClaudio Fontana static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, 131878271684SClaudio Fontana MMUAccessType access_type, 131978271684SClaudio Fontana int mmu_idx, uintptr_t retaddr) 132078271684SClaudio Fontana { 13218810ee2aSAlex Bennée cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, 13228810ee2aSAlex Bennée mmu_idx, retaddr); 132378271684SClaudio Fontana } 132478271684SClaudio Fontana 1325fb3cb376SRichard Henderson static MemoryRegionSection * 1326d50ef446SAnton Johansson io_prepare(hwaddr *out_offset, CPUState *cpu, hwaddr xlat, 1327fb3cb376SRichard Henderson MemTxAttrs attrs, vaddr addr, uintptr_t retaddr) 1328d9bb58e5SYang Zhong { 13292d54f194SPeter Maydell MemoryRegionSection *section; 1330fb3cb376SRichard Henderson hwaddr mr_offset; 1331d9bb58e5SYang Zhong 1332fb3cb376SRichard Henderson section = iotlb_to_section(cpu, xlat, attrs); 1333fb3cb376SRichard Henderson mr_offset = (xlat & TARGET_PAGE_MASK) + addr; 1334d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 1335464dacf6SRichard Henderson if (!cpu->neg.can_do_io) { 1336d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 1337d9bb58e5SYang Zhong } 1338d9bb58e5SYang Zhong 1339fb3cb376SRichard Henderson *out_offset = mr_offset; 1340fb3cb376SRichard Henderson return section; 1341fb3cb376SRichard Henderson } 1342fb3cb376SRichard Henderson 1343d50ef446SAnton Johansson static void io_failed(CPUState *cpu, CPUTLBEntryFull *full, vaddr addr, 1344fb3cb376SRichard Henderson unsigned size, MMUAccessType access_type, int mmu_idx, 13450e114440SRichard Henderson MemTxResult response, uintptr_t retaddr) 1346fb3cb376SRichard Henderson { 1347d50ef446SAnton Johansson if (!cpu->ignore_memory_transaction_failures 1348d50ef446SAnton Johansson && cpu->cc->tcg_ops->do_transaction_failed) { 13490e114440SRichard Henderson hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK); 1350bef0c216SRichard Henderson 1351d50ef446SAnton Johansson cpu->cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size, 1352bef0c216SRichard Henderson access_type, mmu_idx, 1353bef0c216SRichard Henderson full->attrs, response, retaddr); 1354bef0c216SRichard Henderson } 1355bef0c216SRichard Henderson } 1356fb3cb376SRichard Henderson 1357d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied 1358d9bb58e5SYang Zhong back to the main tlb. */ 135910b32e2cSAnton Johansson static bool victim_tlb_hit(CPUState *cpu, size_t mmu_idx, size_t index, 1360732d5487SAnton Johansson MMUAccessType access_type, vaddr page) 1361d9bb58e5SYang Zhong { 1362d9bb58e5SYang Zhong size_t vidx; 136371aec354SEmilio G. Cota 136410b32e2cSAnton Johansson assert_cpu_is_self(cpu); 1365d9bb58e5SYang Zhong for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 136610b32e2cSAnton Johansson CPUTLBEntry *vtlb = &cpu->neg.tlb.d[mmu_idx].vtable[vidx]; 13679e39de98SAnton Johansson uint64_t cmp = tlb_read_idx(vtlb, access_type); 1368d9bb58e5SYang Zhong 1369d9bb58e5SYang Zhong if (cmp == page) { 1370d9bb58e5SYang Zhong /* Found entry in victim tlb, swap tlb and iotlb. */ 137110b32e2cSAnton Johansson CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index]; 1372d9bb58e5SYang Zhong 137310b32e2cSAnton Johansson qemu_spin_lock(&cpu->neg.tlb.c.lock); 137471aec354SEmilio G. Cota copy_tlb_helper_locked(&tmptlb, tlb); 137571aec354SEmilio G. Cota copy_tlb_helper_locked(tlb, vtlb); 137671aec354SEmilio G. Cota copy_tlb_helper_locked(vtlb, &tmptlb); 137710b32e2cSAnton Johansson qemu_spin_unlock(&cpu->neg.tlb.c.lock); 1378d9bb58e5SYang Zhong 137910b32e2cSAnton Johansson CPUTLBEntryFull *f1 = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; 138010b32e2cSAnton Johansson CPUTLBEntryFull *f2 = &cpu->neg.tlb.d[mmu_idx].vfulltlb[vidx]; 138125d3ec58SRichard Henderson CPUTLBEntryFull tmpf; 138225d3ec58SRichard Henderson tmpf = *f1; *f1 = *f2; *f2 = tmpf; 1383d9bb58e5SYang Zhong return true; 1384d9bb58e5SYang Zhong } 1385d9bb58e5SYang Zhong } 1386d9bb58e5SYang Zhong return false; 1387d9bb58e5SYang Zhong } 1388d9bb58e5SYang Zhong 1389707526adSRichard Henderson static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, 139025d3ec58SRichard Henderson CPUTLBEntryFull *full, uintptr_t retaddr) 1391707526adSRichard Henderson { 139225d3ec58SRichard Henderson ram_addr_t ram_addr = mem_vaddr + full->xlat_section; 1393707526adSRichard Henderson 1394707526adSRichard Henderson trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); 1395707526adSRichard Henderson 1396707526adSRichard Henderson if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { 1397f349e92eSPhilippe Mathieu-Daudé tb_invalidate_phys_range_fast(ram_addr, size, retaddr); 1398707526adSRichard Henderson } 1399707526adSRichard Henderson 1400707526adSRichard Henderson /* 1401707526adSRichard Henderson * Set both VGA and migration bits for simplicity and to remove 1402707526adSRichard Henderson * the notdirty callback faster. 1403707526adSRichard Henderson */ 1404707526adSRichard Henderson cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); 1405707526adSRichard Henderson 1406707526adSRichard Henderson /* We remove the notdirty callback only if the code has been flushed. */ 1407707526adSRichard Henderson if (!cpu_physical_memory_is_clean(ram_addr)) { 1408707526adSRichard Henderson trace_memory_notdirty_set_dirty(mem_vaddr); 1409707526adSRichard Henderson tlb_set_dirty(cpu, mem_vaddr); 1410707526adSRichard Henderson } 1411707526adSRichard Henderson } 1412707526adSRichard Henderson 14135afec1c6SAnton Johansson static int probe_access_internal(CPUState *cpu, vaddr addr, 1414069cfe77SRichard Henderson int fault_size, MMUAccessType access_type, 1415069cfe77SRichard Henderson int mmu_idx, bool nonfault, 1416af803a4fSRichard Henderson void **phost, CPUTLBEntryFull **pfull, 14176d03226bSAlex Bennée uintptr_t retaddr, bool check_mem_cbs) 1418d9bb58e5SYang Zhong { 14195afec1c6SAnton Johansson uintptr_t index = tlb_index(cpu, mmu_idx, addr); 14205afec1c6SAnton Johansson CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr); 14219e39de98SAnton Johansson uint64_t tlb_addr = tlb_read_idx(entry, access_type); 14224f8f4127SAnton Johansson vaddr page_addr = addr & TARGET_PAGE_MASK; 142358e8f1f6SRichard Henderson int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW; 14245afec1c6SAnton Johansson bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(cpu); 142558e8f1f6SRichard Henderson CPUTLBEntryFull *full; 1426ca86cf32SDavid Hildenbrand 1427069cfe77SRichard Henderson if (!tlb_hit_page(tlb_addr, page_addr)) { 14285afec1c6SAnton Johansson if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) { 14295afec1c6SAnton Johansson if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type, 1430069cfe77SRichard Henderson mmu_idx, nonfault, retaddr)) { 1431069cfe77SRichard Henderson /* Non-faulting page table read failed. */ 1432069cfe77SRichard Henderson *phost = NULL; 1433af803a4fSRichard Henderson *pfull = NULL; 1434069cfe77SRichard Henderson return TLB_INVALID_MASK; 1435069cfe77SRichard Henderson } 1436069cfe77SRichard Henderson 143703a98189SDavid Hildenbrand /* TLB resize via tlb_fill may have moved the entry. */ 14385afec1c6SAnton Johansson index = tlb_index(cpu, mmu_idx, addr); 14395afec1c6SAnton Johansson entry = tlb_entry(cpu, mmu_idx, addr); 1440c3c8bf57SRichard Henderson 1441c3c8bf57SRichard Henderson /* 1442c3c8bf57SRichard Henderson * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately, 1443c3c8bf57SRichard Henderson * to force the next access through tlb_fill. We've just 1444c3c8bf57SRichard Henderson * called tlb_fill, so we know that this entry *is* valid. 1445c3c8bf57SRichard Henderson */ 1446c3c8bf57SRichard Henderson flags &= ~TLB_INVALID_MASK; 1447d9bb58e5SYang Zhong } 14480b3c75adSRichard Henderson tlb_addr = tlb_read_idx(entry, access_type); 144903a98189SDavid Hildenbrand } 1450c3c8bf57SRichard Henderson flags &= tlb_addr; 145103a98189SDavid Hildenbrand 14525afec1c6SAnton Johansson *pfull = full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; 145358e8f1f6SRichard Henderson flags |= full->slow_flags[access_type]; 1454af803a4fSRichard Henderson 1455069cfe77SRichard Henderson /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ 145649fa457cSRichard Henderson if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY | TLB_CHECK_ALIGNED)) 145749fa457cSRichard Henderson || (access_type != MMU_INST_FETCH && force_mmio)) { 1458069cfe77SRichard Henderson *phost = NULL; 1459069cfe77SRichard Henderson return TLB_MMIO; 1460fef39ccdSDavid Hildenbrand } 1461fef39ccdSDavid Hildenbrand 1462069cfe77SRichard Henderson /* Everything else is RAM. */ 1463069cfe77SRichard Henderson *phost = (void *)((uintptr_t)addr + entry->addend); 1464069cfe77SRichard Henderson return flags; 1465069cfe77SRichard Henderson } 1466069cfe77SRichard Henderson 14674f8f4127SAnton Johansson int probe_access_full(CPUArchState *env, vaddr addr, int size, 1468069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, 1469af803a4fSRichard Henderson bool nonfault, void **phost, CPUTLBEntryFull **pfull, 1470af803a4fSRichard Henderson uintptr_t retaddr) 1471069cfe77SRichard Henderson { 14725afec1c6SAnton Johansson int flags = probe_access_internal(env_cpu(env), addr, size, access_type, 14735afec1c6SAnton Johansson mmu_idx, nonfault, phost, pfull, retaddr, 14745afec1c6SAnton Johansson true); 1475069cfe77SRichard Henderson 1476069cfe77SRichard Henderson /* Handle clean RAM pages. */ 1477069cfe77SRichard Henderson if (unlikely(flags & TLB_NOTDIRTY)) { 1478e2faabeeSJessica Clarke int dirtysize = size == 0 ? 1 : size; 1479e2faabeeSJessica Clarke notdirty_write(env_cpu(env), addr, dirtysize, *pfull, retaddr); 1480069cfe77SRichard Henderson flags &= ~TLB_NOTDIRTY; 1481069cfe77SRichard Henderson } 1482069cfe77SRichard Henderson 1483069cfe77SRichard Henderson return flags; 1484069cfe77SRichard Henderson } 1485069cfe77SRichard Henderson 14866d03226bSAlex Bennée int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size, 14876d03226bSAlex Bennée MMUAccessType access_type, int mmu_idx, 14886d03226bSAlex Bennée void **phost, CPUTLBEntryFull **pfull) 14896d03226bSAlex Bennée { 14906d03226bSAlex Bennée void *discard_phost; 14916d03226bSAlex Bennée CPUTLBEntryFull *discard_tlb; 14926d03226bSAlex Bennée 14936d03226bSAlex Bennée /* privately handle users that don't need full results */ 14946d03226bSAlex Bennée phost = phost ? phost : &discard_phost; 14956d03226bSAlex Bennée pfull = pfull ? pfull : &discard_tlb; 14966d03226bSAlex Bennée 14975afec1c6SAnton Johansson int flags = probe_access_internal(env_cpu(env), addr, size, access_type, 14985afec1c6SAnton Johansson mmu_idx, true, phost, pfull, 0, false); 14996d03226bSAlex Bennée 15006d03226bSAlex Bennée /* Handle clean RAM pages. */ 15016d03226bSAlex Bennée if (unlikely(flags & TLB_NOTDIRTY)) { 1502e2faabeeSJessica Clarke int dirtysize = size == 0 ? 1 : size; 1503e2faabeeSJessica Clarke notdirty_write(env_cpu(env), addr, dirtysize, *pfull, 0); 15046d03226bSAlex Bennée flags &= ~TLB_NOTDIRTY; 15056d03226bSAlex Bennée } 15066d03226bSAlex Bennée 15076d03226bSAlex Bennée return flags; 15086d03226bSAlex Bennée } 15096d03226bSAlex Bennée 15104f8f4127SAnton Johansson int probe_access_flags(CPUArchState *env, vaddr addr, int size, 1511af803a4fSRichard Henderson MMUAccessType access_type, int mmu_idx, 1512af803a4fSRichard Henderson bool nonfault, void **phost, uintptr_t retaddr) 1513af803a4fSRichard Henderson { 1514af803a4fSRichard Henderson CPUTLBEntryFull *full; 15151770b2f2SDaniel Henrique Barboza int flags; 1516af803a4fSRichard Henderson 15171770b2f2SDaniel Henrique Barboza g_assert(-(addr | TARGET_PAGE_MASK) >= size); 15181770b2f2SDaniel Henrique Barboza 15195afec1c6SAnton Johansson flags = probe_access_internal(env_cpu(env), addr, size, access_type, 15205afec1c6SAnton Johansson mmu_idx, nonfault, phost, &full, retaddr, 15215afec1c6SAnton Johansson true); 15221770b2f2SDaniel Henrique Barboza 15231770b2f2SDaniel Henrique Barboza /* Handle clean RAM pages. */ 15241770b2f2SDaniel Henrique Barboza if (unlikely(flags & TLB_NOTDIRTY)) { 1525e2faabeeSJessica Clarke int dirtysize = size == 0 ? 1 : size; 1526e2faabeeSJessica Clarke notdirty_write(env_cpu(env), addr, dirtysize, full, retaddr); 15271770b2f2SDaniel Henrique Barboza flags &= ~TLB_NOTDIRTY; 15281770b2f2SDaniel Henrique Barboza } 15291770b2f2SDaniel Henrique Barboza 15301770b2f2SDaniel Henrique Barboza return flags; 1531af803a4fSRichard Henderson } 1532af803a4fSRichard Henderson 15334f8f4127SAnton Johansson void *probe_access(CPUArchState *env, vaddr addr, int size, 1534069cfe77SRichard Henderson MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 1535069cfe77SRichard Henderson { 1536af803a4fSRichard Henderson CPUTLBEntryFull *full; 1537069cfe77SRichard Henderson void *host; 1538069cfe77SRichard Henderson int flags; 1539069cfe77SRichard Henderson 1540069cfe77SRichard Henderson g_assert(-(addr | TARGET_PAGE_MASK) >= size); 1541069cfe77SRichard Henderson 15425afec1c6SAnton Johansson flags = probe_access_internal(env_cpu(env), addr, size, access_type, 15435afec1c6SAnton Johansson mmu_idx, false, &host, &full, retaddr, 15445afec1c6SAnton Johansson true); 1545069cfe77SRichard Henderson 1546069cfe77SRichard Henderson /* Per the interface, size == 0 merely faults the access. */ 1547069cfe77SRichard Henderson if (size == 0) { 154873bc0bd4SRichard Henderson return NULL; 154973bc0bd4SRichard Henderson } 155073bc0bd4SRichard Henderson 1551069cfe77SRichard Henderson if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { 155203a98189SDavid Hildenbrand /* Handle watchpoints. */ 1553069cfe77SRichard Henderson if (flags & TLB_WATCHPOINT) { 1554069cfe77SRichard Henderson int wp_access = (access_type == MMU_DATA_STORE 1555069cfe77SRichard Henderson ? BP_MEM_WRITE : BP_MEM_READ); 155603a98189SDavid Hildenbrand cpu_check_watchpoint(env_cpu(env), addr, size, 155725d3ec58SRichard Henderson full->attrs, wp_access, retaddr); 1558d9bb58e5SYang Zhong } 1559fef39ccdSDavid Hildenbrand 156073bc0bd4SRichard Henderson /* Handle clean RAM pages. */ 1561069cfe77SRichard Henderson if (flags & TLB_NOTDIRTY) { 1562e2faabeeSJessica Clarke notdirty_write(env_cpu(env), addr, size, full, retaddr); 156373bc0bd4SRichard Henderson } 1564fef39ccdSDavid Hildenbrand } 1565fef39ccdSDavid Hildenbrand 1566069cfe77SRichard Henderson return host; 1567d9bb58e5SYang Zhong } 1568d9bb58e5SYang Zhong 15694811e909SRichard Henderson void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 15704811e909SRichard Henderson MMUAccessType access_type, int mmu_idx) 15714811e909SRichard Henderson { 1572af803a4fSRichard Henderson CPUTLBEntryFull *full; 1573069cfe77SRichard Henderson void *host; 1574069cfe77SRichard Henderson int flags; 15754811e909SRichard Henderson 15765afec1c6SAnton Johansson flags = probe_access_internal(env_cpu(env), addr, 0, access_type, 15776d03226bSAlex Bennée mmu_idx, true, &host, &full, 0, false); 1578069cfe77SRichard Henderson 1579069cfe77SRichard Henderson /* No combination of flags are expected by the caller. */ 1580069cfe77SRichard Henderson return flags ? NULL : host; 15814811e909SRichard Henderson } 15824811e909SRichard Henderson 15837e0d9973SRichard Henderson /* 15847e0d9973SRichard Henderson * Return a ram_addr_t for the virtual address for execution. 15857e0d9973SRichard Henderson * 15867e0d9973SRichard Henderson * Return -1 if we can't translate and execute from an entire page 15877e0d9973SRichard Henderson * of RAM. This will force us to execute by loading and translating 15887e0d9973SRichard Henderson * one insn at a time, without caching. 15897e0d9973SRichard Henderson * 15907e0d9973SRichard Henderson * NOTE: This function will trigger an exception if the page is 15917e0d9973SRichard Henderson * not executable. 15927e0d9973SRichard Henderson */ 15934f8f4127SAnton Johansson tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr, 15947e0d9973SRichard Henderson void **hostp) 15957e0d9973SRichard Henderson { 1596af803a4fSRichard Henderson CPUTLBEntryFull *full; 15977e0d9973SRichard Henderson void *p; 15987e0d9973SRichard Henderson 15995afec1c6SAnton Johansson (void)probe_access_internal(env_cpu(env), addr, 1, MMU_INST_FETCH, 16003b916140SRichard Henderson cpu_mmu_index(env_cpu(env), true), false, 16016d03226bSAlex Bennée &p, &full, 0, false); 16027e0d9973SRichard Henderson if (p == NULL) { 16037e0d9973SRichard Henderson return -1; 16047e0d9973SRichard Henderson } 1605ac01ec6fSWeiwei Li 1606ac01ec6fSWeiwei Li if (full->lg_page_size < TARGET_PAGE_BITS) { 1607ac01ec6fSWeiwei Li return -1; 1608ac01ec6fSWeiwei Li } 1609ac01ec6fSWeiwei Li 16107e0d9973SRichard Henderson if (hostp) { 16117e0d9973SRichard Henderson *hostp = p; 16127e0d9973SRichard Henderson } 16137e0d9973SRichard Henderson return qemu_ram_addr_from_host_nofail(p); 16147e0d9973SRichard Henderson } 16157e0d9973SRichard Henderson 1616cdfac37bSRichard Henderson /* Load/store with atomicity primitives. */ 1617cdfac37bSRichard Henderson #include "ldst_atomicity.c.inc" 1618cdfac37bSRichard Henderson 1619235537faSAlex Bennée #ifdef CONFIG_PLUGIN 1620235537faSAlex Bennée /* 1621235537faSAlex Bennée * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. 1622235537faSAlex Bennée * This should be a hot path as we will have just looked this path up 1623235537faSAlex Bennée * in the softmmu lookup code (or helper). We don't handle re-fills or 1624235537faSAlex Bennée * checking the victim table. This is purely informational. 1625235537faSAlex Bennée * 1626da6aef48SRichard Henderson * The one corner case is i/o write, which can cause changes to the 1627da6aef48SRichard Henderson * address space. Those changes, and the corresponding tlb flush, 1628da6aef48SRichard Henderson * should be delayed until the next TB, so even then this ought not fail. 1629da6aef48SRichard Henderson * But check, Just in Case. 1630235537faSAlex Bennée */ 1631732d5487SAnton Johansson bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx, 1632235537faSAlex Bennée bool is_store, struct qemu_plugin_hwaddr *data) 1633235537faSAlex Bennée { 163410b32e2cSAnton Johansson CPUTLBEntry *tlbe = tlb_entry(cpu, mmu_idx, addr); 163510b32e2cSAnton Johansson uintptr_t index = tlb_index(cpu, mmu_idx, addr); 1636da6aef48SRichard Henderson MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD; 1637da6aef48SRichard Henderson uint64_t tlb_addr = tlb_read_idx(tlbe, access_type); 1638405c02d8SRichard Henderson CPUTLBEntryFull *full; 1639235537faSAlex Bennée 1640da6aef48SRichard Henderson if (unlikely(!tlb_hit(tlb_addr, addr))) { 1641da6aef48SRichard Henderson return false; 1642da6aef48SRichard Henderson } 1643da6aef48SRichard Henderson 164410b32e2cSAnton Johansson full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; 1645405c02d8SRichard Henderson data->phys_addr = full->phys_addr | (addr & ~TARGET_PAGE_MASK); 1646405c02d8SRichard Henderson 1647235537faSAlex Bennée /* We must have an iotlb entry for MMIO */ 1648235537faSAlex Bennée if (tlb_addr & TLB_MMIO) { 1649405c02d8SRichard Henderson MemoryRegionSection *section = 1650405c02d8SRichard Henderson iotlb_to_section(cpu, full->xlat_section & ~TARGET_PAGE_MASK, 1651405c02d8SRichard Henderson full->attrs); 1652235537faSAlex Bennée data->is_io = true; 1653405c02d8SRichard Henderson data->mr = section->mr; 1654235537faSAlex Bennée } else { 1655235537faSAlex Bennée data->is_io = false; 1656405c02d8SRichard Henderson data->mr = NULL; 1657235537faSAlex Bennée } 1658235537faSAlex Bennée return true; 1659235537faSAlex Bennée } 1660235537faSAlex Bennée #endif 1661235537faSAlex Bennée 166208dff435SRichard Henderson /* 16638cfdacaaSRichard Henderson * Probe for a load/store operation. 16648cfdacaaSRichard Henderson * Return the host address and into @flags. 16658cfdacaaSRichard Henderson */ 16668cfdacaaSRichard Henderson 16678cfdacaaSRichard Henderson typedef struct MMULookupPageData { 16688cfdacaaSRichard Henderson CPUTLBEntryFull *full; 16698cfdacaaSRichard Henderson void *haddr; 1670fb2c53cbSAnton Johansson vaddr addr; 16718cfdacaaSRichard Henderson int flags; 16728cfdacaaSRichard Henderson int size; 16738cfdacaaSRichard Henderson } MMULookupPageData; 16748cfdacaaSRichard Henderson 16758cfdacaaSRichard Henderson typedef struct MMULookupLocals { 16768cfdacaaSRichard Henderson MMULookupPageData page[2]; 16778cfdacaaSRichard Henderson MemOp memop; 16788cfdacaaSRichard Henderson int mmu_idx; 16798cfdacaaSRichard Henderson } MMULookupLocals; 16808cfdacaaSRichard Henderson 16818cfdacaaSRichard Henderson /** 16828cfdacaaSRichard Henderson * mmu_lookup1: translate one page 1683d50ef446SAnton Johansson * @cpu: generic cpu state 16848cfdacaaSRichard Henderson * @data: lookup parameters 16858cfdacaaSRichard Henderson * @mmu_idx: virtual address context 16868cfdacaaSRichard Henderson * @access_type: load/store/code 16878cfdacaaSRichard Henderson * @ra: return address into tcg generated code, or 0 16888cfdacaaSRichard Henderson * 16898cfdacaaSRichard Henderson * Resolve the translation for the one page at @data.addr, filling in 16908cfdacaaSRichard Henderson * the rest of @data with the results. If the translation fails, 16918cfdacaaSRichard Henderson * tlb_fill will longjmp out. Return true if the softmmu tlb for 16928cfdacaaSRichard Henderson * @mmu_idx may have resized. 16938cfdacaaSRichard Henderson */ 1694d50ef446SAnton Johansson static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, 16958cfdacaaSRichard Henderson int mmu_idx, MMUAccessType access_type, uintptr_t ra) 16968cfdacaaSRichard Henderson { 1697fb2c53cbSAnton Johansson vaddr addr = data->addr; 1698d50ef446SAnton Johansson uintptr_t index = tlb_index(cpu, mmu_idx, addr); 1699d50ef446SAnton Johansson CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr); 17009e39de98SAnton Johansson uint64_t tlb_addr = tlb_read_idx(entry, access_type); 17018cfdacaaSRichard Henderson bool maybe_resized = false; 170258e8f1f6SRichard Henderson CPUTLBEntryFull *full; 170358e8f1f6SRichard Henderson int flags; 17048cfdacaaSRichard Henderson 17058cfdacaaSRichard Henderson /* If the TLB entry is for a different page, reload and try again. */ 17068cfdacaaSRichard Henderson if (!tlb_hit(tlb_addr, addr)) { 1707d50ef446SAnton Johansson if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, 17088cfdacaaSRichard Henderson addr & TARGET_PAGE_MASK)) { 1709d50ef446SAnton Johansson tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra); 17108cfdacaaSRichard Henderson maybe_resized = true; 1711d50ef446SAnton Johansson index = tlb_index(cpu, mmu_idx, addr); 1712d50ef446SAnton Johansson entry = tlb_entry(cpu, mmu_idx, addr); 17138cfdacaaSRichard Henderson } 17148cfdacaaSRichard Henderson tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK; 17158cfdacaaSRichard Henderson } 17168cfdacaaSRichard Henderson 1717d50ef446SAnton Johansson full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; 171858e8f1f6SRichard Henderson flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW); 171958e8f1f6SRichard Henderson flags |= full->slow_flags[access_type]; 172058e8f1f6SRichard Henderson 172158e8f1f6SRichard Henderson data->full = full; 172258e8f1f6SRichard Henderson data->flags = flags; 17238cfdacaaSRichard Henderson /* Compute haddr speculatively; depending on flags it might be invalid. */ 17248cfdacaaSRichard Henderson data->haddr = (void *)((uintptr_t)addr + entry->addend); 17258cfdacaaSRichard Henderson 17268cfdacaaSRichard Henderson return maybe_resized; 17278cfdacaaSRichard Henderson } 17288cfdacaaSRichard Henderson 17298cfdacaaSRichard Henderson /** 17308cfdacaaSRichard Henderson * mmu_watch_or_dirty 1731d50ef446SAnton Johansson * @cpu: generic cpu state 17328cfdacaaSRichard Henderson * @data: lookup parameters 17338cfdacaaSRichard Henderson * @access_type: load/store/code 17348cfdacaaSRichard Henderson * @ra: return address into tcg generated code, or 0 17358cfdacaaSRichard Henderson * 17368cfdacaaSRichard Henderson * Trigger watchpoints for @data.addr:@data.size; 17378cfdacaaSRichard Henderson * record writes to protected clean pages. 17388cfdacaaSRichard Henderson */ 1739d50ef446SAnton Johansson static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data, 17408cfdacaaSRichard Henderson MMUAccessType access_type, uintptr_t ra) 17418cfdacaaSRichard Henderson { 17428cfdacaaSRichard Henderson CPUTLBEntryFull *full = data->full; 1743fb2c53cbSAnton Johansson vaddr addr = data->addr; 17448cfdacaaSRichard Henderson int flags = data->flags; 17458cfdacaaSRichard Henderson int size = data->size; 17468cfdacaaSRichard Henderson 17478cfdacaaSRichard Henderson /* On watchpoint hit, this will longjmp out. */ 17488cfdacaaSRichard Henderson if (flags & TLB_WATCHPOINT) { 17498cfdacaaSRichard Henderson int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ; 1750d50ef446SAnton Johansson cpu_check_watchpoint(cpu, addr, size, full->attrs, wp, ra); 17518cfdacaaSRichard Henderson flags &= ~TLB_WATCHPOINT; 17528cfdacaaSRichard Henderson } 17538cfdacaaSRichard Henderson 17548cfdacaaSRichard Henderson /* Note that notdirty is only set for writes. */ 17558cfdacaaSRichard Henderson if (flags & TLB_NOTDIRTY) { 1756d50ef446SAnton Johansson notdirty_write(cpu, addr, size, full, ra); 17578cfdacaaSRichard Henderson flags &= ~TLB_NOTDIRTY; 17588cfdacaaSRichard Henderson } 17598cfdacaaSRichard Henderson data->flags = flags; 17608cfdacaaSRichard Henderson } 17618cfdacaaSRichard Henderson 17628cfdacaaSRichard Henderson /** 17638cfdacaaSRichard Henderson * mmu_lookup: translate page(s) 1764d50ef446SAnton Johansson * @cpu: generic cpu state 17658cfdacaaSRichard Henderson * @addr: virtual address 17668cfdacaaSRichard Henderson * @oi: combined mmu_idx and MemOp 17678cfdacaaSRichard Henderson * @ra: return address into tcg generated code, or 0 17688cfdacaaSRichard Henderson * @access_type: load/store/code 17698cfdacaaSRichard Henderson * @l: output result 17708cfdacaaSRichard Henderson * 17718cfdacaaSRichard Henderson * Resolve the translation for the page(s) beginning at @addr, for MemOp.size 17728cfdacaaSRichard Henderson * bytes. Return true if the lookup crosses a page boundary. 17738cfdacaaSRichard Henderson */ 1774d50ef446SAnton Johansson static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, 17758cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType type, MMULookupLocals *l) 17768cfdacaaSRichard Henderson { 17778cfdacaaSRichard Henderson unsigned a_bits; 17788cfdacaaSRichard Henderson bool crosspage; 17798cfdacaaSRichard Henderson int flags; 17808cfdacaaSRichard Henderson 17818cfdacaaSRichard Henderson l->memop = get_memop(oi); 17828cfdacaaSRichard Henderson l->mmu_idx = get_mmuidx(oi); 17838cfdacaaSRichard Henderson 17848cfdacaaSRichard Henderson tcg_debug_assert(l->mmu_idx < NB_MMU_MODES); 17858cfdacaaSRichard Henderson 17868cfdacaaSRichard Henderson /* Handle CPU specific unaligned behaviour */ 17878cfdacaaSRichard Henderson a_bits = get_alignment_bits(l->memop); 17888cfdacaaSRichard Henderson if (addr & ((1 << a_bits) - 1)) { 1789d50ef446SAnton Johansson cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra); 17908cfdacaaSRichard Henderson } 17918cfdacaaSRichard Henderson 17928cfdacaaSRichard Henderson l->page[0].addr = addr; 17938cfdacaaSRichard Henderson l->page[0].size = memop_size(l->memop); 17948cfdacaaSRichard Henderson l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK; 17958cfdacaaSRichard Henderson l->page[1].size = 0; 17968cfdacaaSRichard Henderson crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK; 17978cfdacaaSRichard Henderson 17988cfdacaaSRichard Henderson if (likely(!crosspage)) { 1799d50ef446SAnton Johansson mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra); 18008cfdacaaSRichard Henderson 18018cfdacaaSRichard Henderson flags = l->page[0].flags; 18028cfdacaaSRichard Henderson if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) { 1803d50ef446SAnton Johansson mmu_watch_or_dirty(cpu, &l->page[0], type, ra); 18048cfdacaaSRichard Henderson } 18058cfdacaaSRichard Henderson if (unlikely(flags & TLB_BSWAP)) { 18068cfdacaaSRichard Henderson l->memop ^= MO_BSWAP; 18078cfdacaaSRichard Henderson } 18088cfdacaaSRichard Henderson } else { 18098cfdacaaSRichard Henderson /* Finish compute of page crossing. */ 18108cfdacaaSRichard Henderson int size0 = l->page[1].addr - addr; 18118cfdacaaSRichard Henderson l->page[1].size = l->page[0].size - size0; 18128cfdacaaSRichard Henderson l->page[0].size = size0; 18138cfdacaaSRichard Henderson 18148cfdacaaSRichard Henderson /* 18158cfdacaaSRichard Henderson * Lookup both pages, recognizing exceptions from either. If the 18168cfdacaaSRichard Henderson * second lookup potentially resized, refresh first CPUTLBEntryFull. 18178cfdacaaSRichard Henderson */ 1818d50ef446SAnton Johansson mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra); 1819d50ef446SAnton Johansson if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) { 1820d50ef446SAnton Johansson uintptr_t index = tlb_index(cpu, l->mmu_idx, addr); 1821d50ef446SAnton Johansson l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index]; 18228cfdacaaSRichard Henderson } 18238cfdacaaSRichard Henderson 18248cfdacaaSRichard Henderson flags = l->page[0].flags | l->page[1].flags; 18258cfdacaaSRichard Henderson if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) { 1826d50ef446SAnton Johansson mmu_watch_or_dirty(cpu, &l->page[0], type, ra); 1827d50ef446SAnton Johansson mmu_watch_or_dirty(cpu, &l->page[1], type, ra); 18288cfdacaaSRichard Henderson } 18298cfdacaaSRichard Henderson 18308cfdacaaSRichard Henderson /* 18318cfdacaaSRichard Henderson * Since target/sparc is the only user of TLB_BSWAP, and all 18328cfdacaaSRichard Henderson * Sparc accesses are aligned, any treatment across two pages 18338cfdacaaSRichard Henderson * would be arbitrary. Refuse it until there's a use. 18348cfdacaaSRichard Henderson */ 18358cfdacaaSRichard Henderson tcg_debug_assert((flags & TLB_BSWAP) == 0); 18368cfdacaaSRichard Henderson } 18378cfdacaaSRichard Henderson 183849fa457cSRichard Henderson /* 183949fa457cSRichard Henderson * This alignment check differs from the one above, in that this is 184049fa457cSRichard Henderson * based on the atomicity of the operation. The intended use case is 184149fa457cSRichard Henderson * the ARM memory type field of each PTE, where access to pages with 184249fa457cSRichard Henderson * Device memory type require alignment. 184349fa457cSRichard Henderson */ 184449fa457cSRichard Henderson if (unlikely(flags & TLB_CHECK_ALIGNED)) { 184549fa457cSRichard Henderson MemOp size = l->memop & MO_SIZE; 184649fa457cSRichard Henderson 184749fa457cSRichard Henderson switch (l->memop & MO_ATOM_MASK) { 184849fa457cSRichard Henderson case MO_ATOM_NONE: 184949fa457cSRichard Henderson size = MO_8; 185049fa457cSRichard Henderson break; 185149fa457cSRichard Henderson case MO_ATOM_IFALIGN_PAIR: 185249fa457cSRichard Henderson case MO_ATOM_WITHIN16_PAIR: 185349fa457cSRichard Henderson size = size ? size - 1 : 0; 185449fa457cSRichard Henderson break; 185549fa457cSRichard Henderson default: 185649fa457cSRichard Henderson break; 185749fa457cSRichard Henderson } 185849fa457cSRichard Henderson if (addr & ((1 << size) - 1)) { 185949fa457cSRichard Henderson cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra); 186049fa457cSRichard Henderson } 186149fa457cSRichard Henderson } 186249fa457cSRichard Henderson 18638cfdacaaSRichard Henderson return crosspage; 18648cfdacaaSRichard Henderson } 18658cfdacaaSRichard Henderson 18668cfdacaaSRichard Henderson /* 186708dff435SRichard Henderson * Probe for an atomic operation. Do not allow unaligned operations, 186808dff435SRichard Henderson * or io operations to proceed. Return the host address. 186908dff435SRichard Henderson */ 1870d560225fSAnton Johansson static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, 1871b0326eb9SAnton Johansson int size, uintptr_t retaddr) 1872d9bb58e5SYang Zhong { 1873b826044fSRichard Henderson uintptr_t mmu_idx = get_mmuidx(oi); 187414776ab5STony Nguyen MemOp mop = get_memop(oi); 1875d9bb58e5SYang Zhong int a_bits = get_alignment_bits(mop); 187608dff435SRichard Henderson uintptr_t index; 187708dff435SRichard Henderson CPUTLBEntry *tlbe; 1878b0326eb9SAnton Johansson vaddr tlb_addr; 187934d49937SPeter Maydell void *hostaddr; 1880417aeaffSRichard Henderson CPUTLBEntryFull *full; 1881d9bb58e5SYang Zhong 1882b826044fSRichard Henderson tcg_debug_assert(mmu_idx < NB_MMU_MODES); 1883b826044fSRichard Henderson 1884d9bb58e5SYang Zhong /* Adjust the given return address. */ 1885d9bb58e5SYang Zhong retaddr -= GETPC_ADJ; 1886d9bb58e5SYang Zhong 1887d9bb58e5SYang Zhong /* Enforce guest required alignment. */ 1888d9bb58e5SYang Zhong if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 1889d9bb58e5SYang Zhong /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 1890d560225fSAnton Johansson cpu_unaligned_access(cpu, addr, MMU_DATA_STORE, 1891d9bb58e5SYang Zhong mmu_idx, retaddr); 1892d9bb58e5SYang Zhong } 1893d9bb58e5SYang Zhong 1894d9bb58e5SYang Zhong /* Enforce qemu required alignment. */ 189508dff435SRichard Henderson if (unlikely(addr & (size - 1))) { 1896d9bb58e5SYang Zhong /* We get here if guest alignment was not requested, 1897d9bb58e5SYang Zhong or was not enforced by cpu_unaligned_access above. 1898d9bb58e5SYang Zhong We might widen the access and emulate, but for now 1899d9bb58e5SYang Zhong mark an exception and exit the cpu loop. */ 1900d9bb58e5SYang Zhong goto stop_the_world; 1901d9bb58e5SYang Zhong } 1902d9bb58e5SYang Zhong 1903d560225fSAnton Johansson index = tlb_index(cpu, mmu_idx, addr); 1904d560225fSAnton Johansson tlbe = tlb_entry(cpu, mmu_idx, addr); 190508dff435SRichard Henderson 1906d9bb58e5SYang Zhong /* Check TLB entry and enforce page permissions. */ 190708dff435SRichard Henderson tlb_addr = tlb_addr_write(tlbe); 1908334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 1909d560225fSAnton Johansson if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE, 19100b3c75adSRichard Henderson addr & TARGET_PAGE_MASK)) { 1911d560225fSAnton Johansson tlb_fill(cpu, addr, size, 191208dff435SRichard Henderson MMU_DATA_STORE, mmu_idx, retaddr); 1913d560225fSAnton Johansson index = tlb_index(cpu, mmu_idx, addr); 1914d560225fSAnton Johansson tlbe = tlb_entry(cpu, mmu_idx, addr); 1915d9bb58e5SYang Zhong } 1916403f290cSEmilio G. Cota tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; 1917d9bb58e5SYang Zhong } 1918d9bb58e5SYang Zhong 1919417aeaffSRichard Henderson /* 1920417aeaffSRichard Henderson * Let the guest notice RMW on a write-only page. 1921417aeaffSRichard Henderson * We have just verified that the page is writable. 1922417aeaffSRichard Henderson * Subpage lookups may have left TLB_INVALID_MASK set, 1923417aeaffSRichard Henderson * but addr_read will only be -1 if PAGE_READ was unset. 1924417aeaffSRichard Henderson */ 1925417aeaffSRichard Henderson if (unlikely(tlbe->addr_read == -1)) { 1926d560225fSAnton Johansson tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); 192708dff435SRichard Henderson /* 1928417aeaffSRichard Henderson * Since we don't support reads and writes to different 1929417aeaffSRichard Henderson * addresses, and we do have the proper page loaded for 1930417aeaffSRichard Henderson * write, this shouldn't ever return. But just in case, 1931417aeaffSRichard Henderson * handle via stop-the-world. 193208dff435SRichard Henderson */ 193308dff435SRichard Henderson goto stop_the_world; 193408dff435SRichard Henderson } 1935187ba694SRichard Henderson /* Collect tlb flags for read. */ 1936417aeaffSRichard Henderson tlb_addr |= tlbe->addr_read; 193708dff435SRichard Henderson 193855df6fcfSPeter Maydell /* Notice an IO access or a needs-MMU-lookup access */ 19390953674eSRichard Henderson if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) { 1940d9bb58e5SYang Zhong /* There's really nothing that can be done to 1941d9bb58e5SYang Zhong support this apart from stop-the-world. */ 1942d9bb58e5SYang Zhong goto stop_the_world; 1943d9bb58e5SYang Zhong } 1944d9bb58e5SYang Zhong 194534d49937SPeter Maydell hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 1946d560225fSAnton Johansson full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index]; 194734d49937SPeter Maydell 194834d49937SPeter Maydell if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 1949d560225fSAnton Johansson notdirty_write(cpu, addr, size, full, retaddr); 1950417aeaffSRichard Henderson } 1951417aeaffSRichard Henderson 1952187ba694SRichard Henderson if (unlikely(tlb_addr & TLB_FORCE_SLOW)) { 1953187ba694SRichard Henderson int wp_flags = 0; 1954187ba694SRichard Henderson 1955187ba694SRichard Henderson if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) { 1956187ba694SRichard Henderson wp_flags |= BP_MEM_WRITE; 1957187ba694SRichard Henderson } 1958187ba694SRichard Henderson if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) { 1959187ba694SRichard Henderson wp_flags |= BP_MEM_READ; 1960187ba694SRichard Henderson } 1961187ba694SRichard Henderson if (wp_flags) { 1962d560225fSAnton Johansson cpu_check_watchpoint(cpu, addr, size, 1963187ba694SRichard Henderson full->attrs, wp_flags, retaddr); 1964187ba694SRichard Henderson } 196534d49937SPeter Maydell } 196634d49937SPeter Maydell 196734d49937SPeter Maydell return hostaddr; 1968d9bb58e5SYang Zhong 1969d9bb58e5SYang Zhong stop_the_world: 1970d560225fSAnton Johansson cpu_loop_exit_atomic(cpu, retaddr); 1971d9bb58e5SYang Zhong } 1972d9bb58e5SYang Zhong 1973eed56642SAlex Bennée /* 1974eed56642SAlex Bennée * Load Helpers 1975eed56642SAlex Bennée * 1976eed56642SAlex Bennée * We support two different access types. SOFTMMU_CODE_ACCESS is 1977eed56642SAlex Bennée * specifically for reading instructions from system memory. It is 1978eed56642SAlex Bennée * called by the translation loop and in some helpers where the code 1979eed56642SAlex Bennée * is disassembled. It shouldn't be called directly by guest code. 1980cdfac37bSRichard Henderson * 1981eed56642SAlex Bennée * For the benefit of TCG generated code, we want to avoid the 1982eed56642SAlex Bennée * complication of ABI-specific return type promotion and always 1983eed56642SAlex Bennée * return a value extended to the register size of the host. This is 1984eed56642SAlex Bennée * tcg_target_long, except in the case of a 32-bit host and 64-bit 1985eed56642SAlex Bennée * data, and for that we always have uint64_t. 1986eed56642SAlex Bennée * 1987eed56642SAlex Bennée * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. 1988eed56642SAlex Bennée */ 1989eed56642SAlex Bennée 19908cfdacaaSRichard Henderson /** 19918cfdacaaSRichard Henderson * do_ld_mmio_beN: 1992d50ef446SAnton Johansson * @cpu: generic cpu state 19931966855eSRichard Henderson * @full: page parameters 19948cfdacaaSRichard Henderson * @ret_be: accumulated data 19951966855eSRichard Henderson * @addr: virtual address 19961966855eSRichard Henderson * @size: number of bytes 19978cfdacaaSRichard Henderson * @mmu_idx: virtual address context 19988cfdacaaSRichard Henderson * @ra: return address into tcg generated code, or 0 1999a4a411fbSStefan Hajnoczi * Context: BQL held 20008cfdacaaSRichard Henderson * 20011966855eSRichard Henderson * Load @size bytes from @addr, which is memory-mapped i/o. 20028cfdacaaSRichard Henderson * The bytes are concatenated in big-endian order with @ret_be. 20038cfdacaaSRichard Henderson */ 2004d50ef446SAnton Johansson static uint64_t int_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full, 20051966855eSRichard Henderson uint64_t ret_be, vaddr addr, int size, 20068bf67267SRichard Henderson int mmu_idx, MMUAccessType type, uintptr_t ra, 20078bf67267SRichard Henderson MemoryRegion *mr, hwaddr mr_offset) 20082dd92606SRichard Henderson { 2009190aba80SRichard Henderson do { 201013e61747SRichard Henderson MemOp this_mop; 201113e61747SRichard Henderson unsigned this_size; 201213e61747SRichard Henderson uint64_t val; 201313e61747SRichard Henderson MemTxResult r; 201413e61747SRichard Henderson 2015190aba80SRichard Henderson /* Read aligned pieces up to 8 bytes. */ 201613e61747SRichard Henderson this_mop = ctz32(size | (int)addr | 8); 201713e61747SRichard Henderson this_size = 1 << this_mop; 201813e61747SRichard Henderson this_mop |= MO_BE; 201913e61747SRichard Henderson 20208bf67267SRichard Henderson r = memory_region_dispatch_read(mr, mr_offset, &val, 20218bf67267SRichard Henderson this_mop, full->attrs); 202213e61747SRichard Henderson if (unlikely(r != MEMTX_OK)) { 2023d50ef446SAnton Johansson io_failed(cpu, full, addr, this_size, type, mmu_idx, r, ra); 20248cfdacaaSRichard Henderson } 202513e61747SRichard Henderson if (this_size == 8) { 202613e61747SRichard Henderson return val; 202713e61747SRichard Henderson } 202813e61747SRichard Henderson 202913e61747SRichard Henderson ret_be = (ret_be << (this_size * 8)) | val; 203013e61747SRichard Henderson addr += this_size; 203113e61747SRichard Henderson mr_offset += this_size; 203213e61747SRichard Henderson size -= this_size; 2033190aba80SRichard Henderson } while (size); 203413e61747SRichard Henderson 20358cfdacaaSRichard Henderson return ret_be; 20368cfdacaaSRichard Henderson } 20378cfdacaaSRichard Henderson 2038d50ef446SAnton Johansson static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full, 20398bf67267SRichard Henderson uint64_t ret_be, vaddr addr, int size, 20408bf67267SRichard Henderson int mmu_idx, MMUAccessType type, uintptr_t ra) 20418bf67267SRichard Henderson { 20428bf67267SRichard Henderson MemoryRegionSection *section; 20438bf67267SRichard Henderson MemoryRegion *mr; 20448bf67267SRichard Henderson hwaddr mr_offset; 20458bf67267SRichard Henderson MemTxAttrs attrs; 20468bf67267SRichard Henderson 20478bf67267SRichard Henderson tcg_debug_assert(size > 0 && size <= 8); 20488bf67267SRichard Henderson 20498bf67267SRichard Henderson attrs = full->attrs; 2050d50ef446SAnton Johansson section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); 20518bf67267SRichard Henderson mr = section->mr; 20528bf67267SRichard Henderson 20536aba908dSJonathan Cameron BQL_LOCK_GUARD(); 20546aba908dSJonathan Cameron return int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx, 20558bf67267SRichard Henderson type, ra, mr, mr_offset); 20568bf67267SRichard Henderson } 20578bf67267SRichard Henderson 2058d50ef446SAnton Johansson static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full, 20598bf67267SRichard Henderson uint64_t ret_be, vaddr addr, int size, 20608bf67267SRichard Henderson int mmu_idx, uintptr_t ra) 20618bf67267SRichard Henderson { 20628bf67267SRichard Henderson MemoryRegionSection *section; 20638bf67267SRichard Henderson MemoryRegion *mr; 20648bf67267SRichard Henderson hwaddr mr_offset; 20658bf67267SRichard Henderson MemTxAttrs attrs; 20668bf67267SRichard Henderson uint64_t a, b; 20678bf67267SRichard Henderson 20688bf67267SRichard Henderson tcg_debug_assert(size > 8 && size <= 16); 20698bf67267SRichard Henderson 20708bf67267SRichard Henderson attrs = full->attrs; 2071d50ef446SAnton Johansson section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); 20728bf67267SRichard Henderson mr = section->mr; 20738bf67267SRichard Henderson 20746aba908dSJonathan Cameron BQL_LOCK_GUARD(); 2075d50ef446SAnton Johansson a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx, 20768bf67267SRichard Henderson MMU_DATA_LOAD, ra, mr, mr_offset); 2077d50ef446SAnton Johansson b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx, 20788bf67267SRichard Henderson MMU_DATA_LOAD, ra, mr, mr_offset + size - 8); 20798bf67267SRichard Henderson return int128_make128(b, a); 20808bf67267SRichard Henderson } 20818bf67267SRichard Henderson 20828cfdacaaSRichard Henderson /** 20838cfdacaaSRichard Henderson * do_ld_bytes_beN 20848cfdacaaSRichard Henderson * @p: translation parameters 20858cfdacaaSRichard Henderson * @ret_be: accumulated data 20868cfdacaaSRichard Henderson * 20878cfdacaaSRichard Henderson * Load @p->size bytes from @p->haddr, which is RAM. 20888cfdacaaSRichard Henderson * The bytes to concatenated in big-endian order with @ret_be. 20898cfdacaaSRichard Henderson */ 20908cfdacaaSRichard Henderson static uint64_t do_ld_bytes_beN(MMULookupPageData *p, uint64_t ret_be) 20918cfdacaaSRichard Henderson { 20928cfdacaaSRichard Henderson uint8_t *haddr = p->haddr; 20938cfdacaaSRichard Henderson int i, size = p->size; 20948cfdacaaSRichard Henderson 20958cfdacaaSRichard Henderson for (i = 0; i < size; i++) { 20968cfdacaaSRichard Henderson ret_be = (ret_be << 8) | haddr[i]; 20978cfdacaaSRichard Henderson } 20988cfdacaaSRichard Henderson return ret_be; 20998cfdacaaSRichard Henderson } 21008cfdacaaSRichard Henderson 2101cdfac37bSRichard Henderson /** 2102cdfac37bSRichard Henderson * do_ld_parts_beN 2103cdfac37bSRichard Henderson * @p: translation parameters 2104cdfac37bSRichard Henderson * @ret_be: accumulated data 2105cdfac37bSRichard Henderson * 2106cdfac37bSRichard Henderson * As do_ld_bytes_beN, but atomically on each aligned part. 2107cdfac37bSRichard Henderson */ 2108cdfac37bSRichard Henderson static uint64_t do_ld_parts_beN(MMULookupPageData *p, uint64_t ret_be) 2109cdfac37bSRichard Henderson { 2110cdfac37bSRichard Henderson void *haddr = p->haddr; 2111cdfac37bSRichard Henderson int size = p->size; 2112cdfac37bSRichard Henderson 2113cdfac37bSRichard Henderson do { 2114cdfac37bSRichard Henderson uint64_t x; 2115cdfac37bSRichard Henderson int n; 2116cdfac37bSRichard Henderson 2117cdfac37bSRichard Henderson /* 2118cdfac37bSRichard Henderson * Find minimum of alignment and size. 2119cdfac37bSRichard Henderson * This is slightly stronger than required by MO_ATOM_SUBALIGN, which 2120cdfac37bSRichard Henderson * would have only checked the low bits of addr|size once at the start, 2121cdfac37bSRichard Henderson * but is just as easy. 2122cdfac37bSRichard Henderson */ 2123cdfac37bSRichard Henderson switch (((uintptr_t)haddr | size) & 7) { 2124cdfac37bSRichard Henderson case 4: 2125cdfac37bSRichard Henderson x = cpu_to_be32(load_atomic4(haddr)); 2126cdfac37bSRichard Henderson ret_be = (ret_be << 32) | x; 2127cdfac37bSRichard Henderson n = 4; 2128cdfac37bSRichard Henderson break; 2129cdfac37bSRichard Henderson case 2: 2130cdfac37bSRichard Henderson case 6: 2131cdfac37bSRichard Henderson x = cpu_to_be16(load_atomic2(haddr)); 2132cdfac37bSRichard Henderson ret_be = (ret_be << 16) | x; 2133cdfac37bSRichard Henderson n = 2; 2134cdfac37bSRichard Henderson break; 2135cdfac37bSRichard Henderson default: 2136cdfac37bSRichard Henderson x = *(uint8_t *)haddr; 2137cdfac37bSRichard Henderson ret_be = (ret_be << 8) | x; 2138cdfac37bSRichard Henderson n = 1; 2139cdfac37bSRichard Henderson break; 2140cdfac37bSRichard Henderson case 0: 2141cdfac37bSRichard Henderson g_assert_not_reached(); 2142cdfac37bSRichard Henderson } 2143cdfac37bSRichard Henderson haddr += n; 2144cdfac37bSRichard Henderson size -= n; 2145cdfac37bSRichard Henderson } while (size != 0); 2146cdfac37bSRichard Henderson return ret_be; 2147cdfac37bSRichard Henderson } 2148cdfac37bSRichard Henderson 2149cdfac37bSRichard Henderson /** 2150cdfac37bSRichard Henderson * do_ld_parts_be4 2151cdfac37bSRichard Henderson * @p: translation parameters 2152cdfac37bSRichard Henderson * @ret_be: accumulated data 2153cdfac37bSRichard Henderson * 2154cdfac37bSRichard Henderson * As do_ld_bytes_beN, but with one atomic load. 2155cdfac37bSRichard Henderson * Four aligned bytes are guaranteed to cover the load. 2156cdfac37bSRichard Henderson */ 2157cdfac37bSRichard Henderson static uint64_t do_ld_whole_be4(MMULookupPageData *p, uint64_t ret_be) 2158cdfac37bSRichard Henderson { 2159cdfac37bSRichard Henderson int o = p->addr & 3; 2160cdfac37bSRichard Henderson uint32_t x = load_atomic4(p->haddr - o); 2161cdfac37bSRichard Henderson 2162cdfac37bSRichard Henderson x = cpu_to_be32(x); 2163cdfac37bSRichard Henderson x <<= o * 8; 2164cdfac37bSRichard Henderson x >>= (4 - p->size) * 8; 2165cdfac37bSRichard Henderson return (ret_be << (p->size * 8)) | x; 2166cdfac37bSRichard Henderson } 2167cdfac37bSRichard Henderson 2168cdfac37bSRichard Henderson /** 2169cdfac37bSRichard Henderson * do_ld_parts_be8 2170cdfac37bSRichard Henderson * @p: translation parameters 2171cdfac37bSRichard Henderson * @ret_be: accumulated data 2172cdfac37bSRichard Henderson * 2173cdfac37bSRichard Henderson * As do_ld_bytes_beN, but with one atomic load. 2174cdfac37bSRichard Henderson * Eight aligned bytes are guaranteed to cover the load. 2175cdfac37bSRichard Henderson */ 2176d50ef446SAnton Johansson static uint64_t do_ld_whole_be8(CPUState *cpu, uintptr_t ra, 2177cdfac37bSRichard Henderson MMULookupPageData *p, uint64_t ret_be) 2178cdfac37bSRichard Henderson { 2179cdfac37bSRichard Henderson int o = p->addr & 7; 218073fda56fSAnton Johansson uint64_t x = load_atomic8_or_exit(cpu, ra, p->haddr - o); 2181cdfac37bSRichard Henderson 2182cdfac37bSRichard Henderson x = cpu_to_be64(x); 2183cdfac37bSRichard Henderson x <<= o * 8; 2184cdfac37bSRichard Henderson x >>= (8 - p->size) * 8; 2185cdfac37bSRichard Henderson return (ret_be << (p->size * 8)) | x; 2186cdfac37bSRichard Henderson } 2187cdfac37bSRichard Henderson 218835c653c4SRichard Henderson /** 218935c653c4SRichard Henderson * do_ld_parts_be16 219035c653c4SRichard Henderson * @p: translation parameters 219135c653c4SRichard Henderson * @ret_be: accumulated data 219235c653c4SRichard Henderson * 219335c653c4SRichard Henderson * As do_ld_bytes_beN, but with one atomic load. 219435c653c4SRichard Henderson * 16 aligned bytes are guaranteed to cover the load. 219535c653c4SRichard Henderson */ 2196d50ef446SAnton Johansson static Int128 do_ld_whole_be16(CPUState *cpu, uintptr_t ra, 219735c653c4SRichard Henderson MMULookupPageData *p, uint64_t ret_be) 219835c653c4SRichard Henderson { 219935c653c4SRichard Henderson int o = p->addr & 15; 220073fda56fSAnton Johansson Int128 x, y = load_atomic16_or_exit(cpu, ra, p->haddr - o); 220135c653c4SRichard Henderson int size = p->size; 220235c653c4SRichard Henderson 220335c653c4SRichard Henderson if (!HOST_BIG_ENDIAN) { 220435c653c4SRichard Henderson y = bswap128(y); 220535c653c4SRichard Henderson } 220635c653c4SRichard Henderson y = int128_lshift(y, o * 8); 220735c653c4SRichard Henderson y = int128_urshift(y, (16 - size) * 8); 220835c653c4SRichard Henderson x = int128_make64(ret_be); 220935c653c4SRichard Henderson x = int128_lshift(x, size * 8); 221035c653c4SRichard Henderson return int128_or(x, y); 221135c653c4SRichard Henderson } 221235c653c4SRichard Henderson 22138cfdacaaSRichard Henderson /* 22148cfdacaaSRichard Henderson * Wrapper for the above. 22158cfdacaaSRichard Henderson */ 2216d50ef446SAnton Johansson static uint64_t do_ld_beN(CPUState *cpu, MMULookupPageData *p, 2217cdfac37bSRichard Henderson uint64_t ret_be, int mmu_idx, MMUAccessType type, 2218cdfac37bSRichard Henderson MemOp mop, uintptr_t ra) 22198cfdacaaSRichard Henderson { 2220cdfac37bSRichard Henderson MemOp atom; 2221cdfac37bSRichard Henderson unsigned tmp, half_size; 2222cdfac37bSRichard Henderson 22238cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2224d50ef446SAnton Johansson return do_ld_mmio_beN(cpu, p->full, ret_be, p->addr, p->size, 22251966855eSRichard Henderson mmu_idx, type, ra); 2226cdfac37bSRichard Henderson } 2227cdfac37bSRichard Henderson 2228cdfac37bSRichard Henderson /* 2229cdfac37bSRichard Henderson * It is a given that we cross a page and therefore there is no 2230cdfac37bSRichard Henderson * atomicity for the load as a whole, but subobjects may need attention. 2231cdfac37bSRichard Henderson */ 2232cdfac37bSRichard Henderson atom = mop & MO_ATOM_MASK; 2233cdfac37bSRichard Henderson switch (atom) { 2234cdfac37bSRichard Henderson case MO_ATOM_SUBALIGN: 2235cdfac37bSRichard Henderson return do_ld_parts_beN(p, ret_be); 2236cdfac37bSRichard Henderson 2237cdfac37bSRichard Henderson case MO_ATOM_IFALIGN_PAIR: 2238cdfac37bSRichard Henderson case MO_ATOM_WITHIN16_PAIR: 2239cdfac37bSRichard Henderson tmp = mop & MO_SIZE; 2240cdfac37bSRichard Henderson tmp = tmp ? tmp - 1 : 0; 2241cdfac37bSRichard Henderson half_size = 1 << tmp; 2242cdfac37bSRichard Henderson if (atom == MO_ATOM_IFALIGN_PAIR 2243cdfac37bSRichard Henderson ? p->size == half_size 2244cdfac37bSRichard Henderson : p->size >= half_size) { 2245cdfac37bSRichard Henderson if (!HAVE_al8_fast && p->size < 4) { 2246cdfac37bSRichard Henderson return do_ld_whole_be4(p, ret_be); 22478cfdacaaSRichard Henderson } else { 2248d50ef446SAnton Johansson return do_ld_whole_be8(cpu, ra, p, ret_be); 2249cdfac37bSRichard Henderson } 2250cdfac37bSRichard Henderson } 2251cdfac37bSRichard Henderson /* fall through */ 2252cdfac37bSRichard Henderson 2253cdfac37bSRichard Henderson case MO_ATOM_IFALIGN: 2254cdfac37bSRichard Henderson case MO_ATOM_WITHIN16: 2255cdfac37bSRichard Henderson case MO_ATOM_NONE: 22568cfdacaaSRichard Henderson return do_ld_bytes_beN(p, ret_be); 2257cdfac37bSRichard Henderson 2258cdfac37bSRichard Henderson default: 2259cdfac37bSRichard Henderson g_assert_not_reached(); 22608cfdacaaSRichard Henderson } 22618cfdacaaSRichard Henderson } 22628cfdacaaSRichard Henderson 226335c653c4SRichard Henderson /* 226435c653c4SRichard Henderson * Wrapper for the above, for 8 < size < 16. 226535c653c4SRichard Henderson */ 2266d50ef446SAnton Johansson static Int128 do_ld16_beN(CPUState *cpu, MMULookupPageData *p, 226735c653c4SRichard Henderson uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra) 226835c653c4SRichard Henderson { 226935c653c4SRichard Henderson int size = p->size; 227035c653c4SRichard Henderson uint64_t b; 227135c653c4SRichard Henderson MemOp atom; 227235c653c4SRichard Henderson 227335c653c4SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2274d50ef446SAnton Johansson return do_ld16_mmio_beN(cpu, p->full, a, p->addr, size, mmu_idx, ra); 227535c653c4SRichard Henderson } 227635c653c4SRichard Henderson 227735c653c4SRichard Henderson /* 227835c653c4SRichard Henderson * It is a given that we cross a page and therefore there is no 227935c653c4SRichard Henderson * atomicity for the load as a whole, but subobjects may need attention. 228035c653c4SRichard Henderson */ 228135c653c4SRichard Henderson atom = mop & MO_ATOM_MASK; 228235c653c4SRichard Henderson switch (atom) { 228335c653c4SRichard Henderson case MO_ATOM_SUBALIGN: 228435c653c4SRichard Henderson p->size = size - 8; 228535c653c4SRichard Henderson a = do_ld_parts_beN(p, a); 228635c653c4SRichard Henderson p->haddr += size - 8; 228735c653c4SRichard Henderson p->size = 8; 228835c653c4SRichard Henderson b = do_ld_parts_beN(p, 0); 228935c653c4SRichard Henderson break; 229035c653c4SRichard Henderson 229135c653c4SRichard Henderson case MO_ATOM_WITHIN16_PAIR: 229235c653c4SRichard Henderson /* Since size > 8, this is the half that must be atomic. */ 2293d50ef446SAnton Johansson return do_ld_whole_be16(cpu, ra, p, a); 229435c653c4SRichard Henderson 229535c653c4SRichard Henderson case MO_ATOM_IFALIGN_PAIR: 229635c653c4SRichard Henderson /* 229735c653c4SRichard Henderson * Since size > 8, both halves are misaligned, 229835c653c4SRichard Henderson * and so neither is atomic. 229935c653c4SRichard Henderson */ 230035c653c4SRichard Henderson case MO_ATOM_IFALIGN: 230135c653c4SRichard Henderson case MO_ATOM_WITHIN16: 230235c653c4SRichard Henderson case MO_ATOM_NONE: 230335c653c4SRichard Henderson p->size = size - 8; 230435c653c4SRichard Henderson a = do_ld_bytes_beN(p, a); 230535c653c4SRichard Henderson b = ldq_be_p(p->haddr + size - 8); 230635c653c4SRichard Henderson break; 230735c653c4SRichard Henderson 230835c653c4SRichard Henderson default: 230935c653c4SRichard Henderson g_assert_not_reached(); 231035c653c4SRichard Henderson } 231135c653c4SRichard Henderson 231235c653c4SRichard Henderson return int128_make128(b, a); 231335c653c4SRichard Henderson } 231435c653c4SRichard Henderson 2315d50ef446SAnton Johansson static uint8_t do_ld_1(CPUState *cpu, MMULookupPageData *p, int mmu_idx, 23168cfdacaaSRichard Henderson MMUAccessType type, uintptr_t ra) 23178cfdacaaSRichard Henderson { 23188cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2319d50ef446SAnton Johansson return do_ld_mmio_beN(cpu, p->full, 0, p->addr, 1, mmu_idx, type, ra); 23208cfdacaaSRichard Henderson } else { 23218cfdacaaSRichard Henderson return *(uint8_t *)p->haddr; 23228cfdacaaSRichard Henderson } 23238cfdacaaSRichard Henderson } 23248cfdacaaSRichard Henderson 2325d50ef446SAnton Johansson static uint16_t do_ld_2(CPUState *cpu, MMULookupPageData *p, int mmu_idx, 23268cfdacaaSRichard Henderson MMUAccessType type, MemOp memop, uintptr_t ra) 23278cfdacaaSRichard Henderson { 2328f7eaf9d7SRichard Henderson uint16_t ret; 23298cfdacaaSRichard Henderson 23308cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2331d50ef446SAnton Johansson ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 2, mmu_idx, type, ra); 2332f7eaf9d7SRichard Henderson if ((memop & MO_BSWAP) == MO_LE) { 2333f7eaf9d7SRichard Henderson ret = bswap16(ret); 23348cfdacaaSRichard Henderson } 2335f7eaf9d7SRichard Henderson } else { 23368cfdacaaSRichard Henderson /* Perform the load host endian, then swap if necessary. */ 233773fda56fSAnton Johansson ret = load_atom_2(cpu, ra, p->haddr, memop); 23388cfdacaaSRichard Henderson if (memop & MO_BSWAP) { 23398cfdacaaSRichard Henderson ret = bswap16(ret); 23408cfdacaaSRichard Henderson } 2341f7eaf9d7SRichard Henderson } 23428cfdacaaSRichard Henderson return ret; 23438cfdacaaSRichard Henderson } 23448cfdacaaSRichard Henderson 2345d50ef446SAnton Johansson static uint32_t do_ld_4(CPUState *cpu, MMULookupPageData *p, int mmu_idx, 23468cfdacaaSRichard Henderson MMUAccessType type, MemOp memop, uintptr_t ra) 23478cfdacaaSRichard Henderson { 23488cfdacaaSRichard Henderson uint32_t ret; 23498cfdacaaSRichard Henderson 23508cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2351d50ef446SAnton Johansson ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 4, mmu_idx, type, ra); 2352f7eaf9d7SRichard Henderson if ((memop & MO_BSWAP) == MO_LE) { 2353f7eaf9d7SRichard Henderson ret = bswap32(ret); 23548cfdacaaSRichard Henderson } 2355f7eaf9d7SRichard Henderson } else { 23568cfdacaaSRichard Henderson /* Perform the load host endian. */ 235773fda56fSAnton Johansson ret = load_atom_4(cpu, ra, p->haddr, memop); 23588cfdacaaSRichard Henderson if (memop & MO_BSWAP) { 23598cfdacaaSRichard Henderson ret = bswap32(ret); 23608cfdacaaSRichard Henderson } 2361f7eaf9d7SRichard Henderson } 23628cfdacaaSRichard Henderson return ret; 23638cfdacaaSRichard Henderson } 23648cfdacaaSRichard Henderson 2365d50ef446SAnton Johansson static uint64_t do_ld_8(CPUState *cpu, MMULookupPageData *p, int mmu_idx, 23668cfdacaaSRichard Henderson MMUAccessType type, MemOp memop, uintptr_t ra) 23678cfdacaaSRichard Henderson { 23688cfdacaaSRichard Henderson uint64_t ret; 23698cfdacaaSRichard Henderson 23708cfdacaaSRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2371d50ef446SAnton Johansson ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 8, mmu_idx, type, ra); 2372f7eaf9d7SRichard Henderson if ((memop & MO_BSWAP) == MO_LE) { 2373f7eaf9d7SRichard Henderson ret = bswap64(ret); 23748cfdacaaSRichard Henderson } 2375f7eaf9d7SRichard Henderson } else { 23768cfdacaaSRichard Henderson /* Perform the load host endian. */ 237773fda56fSAnton Johansson ret = load_atom_8(cpu, ra, p->haddr, memop); 23788cfdacaaSRichard Henderson if (memop & MO_BSWAP) { 23798cfdacaaSRichard Henderson ret = bswap64(ret); 23808cfdacaaSRichard Henderson } 2381f7eaf9d7SRichard Henderson } 23828cfdacaaSRichard Henderson return ret; 23838cfdacaaSRichard Henderson } 23848cfdacaaSRichard Henderson 2385d50ef446SAnton Johansson static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, 23868cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType access_type) 23878cfdacaaSRichard Henderson { 23888cfdacaaSRichard Henderson MMULookupLocals l; 23898cfdacaaSRichard Henderson bool crosspage; 23908cfdacaaSRichard Henderson 2391f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 2392d50ef446SAnton Johansson crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); 23938cfdacaaSRichard Henderson tcg_debug_assert(!crosspage); 23948cfdacaaSRichard Henderson 2395d50ef446SAnton Johansson return do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra); 23962dd92606SRichard Henderson } 23972dd92606SRichard Henderson 2398d50ef446SAnton Johansson static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, 23998cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType access_type) 24002dd92606SRichard Henderson { 24018cfdacaaSRichard Henderson MMULookupLocals l; 24028cfdacaaSRichard Henderson bool crosspage; 24038cfdacaaSRichard Henderson uint16_t ret; 24048cfdacaaSRichard Henderson uint8_t a, b; 24058cfdacaaSRichard Henderson 2406f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 2407d50ef446SAnton Johansson crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); 24088cfdacaaSRichard Henderson if (likely(!crosspage)) { 2409d50ef446SAnton Johansson return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); 24108cfdacaaSRichard Henderson } 24118cfdacaaSRichard Henderson 2412d50ef446SAnton Johansson a = do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra); 2413d50ef446SAnton Johansson b = do_ld_1(cpu, &l.page[1], l.mmu_idx, access_type, ra); 24148cfdacaaSRichard Henderson 24158cfdacaaSRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 24168cfdacaaSRichard Henderson ret = a | (b << 8); 24178cfdacaaSRichard Henderson } else { 24188cfdacaaSRichard Henderson ret = b | (a << 8); 24198cfdacaaSRichard Henderson } 24208cfdacaaSRichard Henderson return ret; 2421eed56642SAlex Bennée } 2422eed56642SAlex Bennée 2423d50ef446SAnton Johansson static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, 24248cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType access_type) 24252dd92606SRichard Henderson { 24268cfdacaaSRichard Henderson MMULookupLocals l; 24278cfdacaaSRichard Henderson bool crosspage; 24288cfdacaaSRichard Henderson uint32_t ret; 24298cfdacaaSRichard Henderson 2430f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 2431d50ef446SAnton Johansson crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); 24328cfdacaaSRichard Henderson if (likely(!crosspage)) { 2433d50ef446SAnton Johansson return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); 24348cfdacaaSRichard Henderson } 24358cfdacaaSRichard Henderson 2436d50ef446SAnton Johansson ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra); 2437d50ef446SAnton Johansson ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra); 24388cfdacaaSRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 24398cfdacaaSRichard Henderson ret = bswap32(ret); 24408cfdacaaSRichard Henderson } 24418cfdacaaSRichard Henderson return ret; 2442eed56642SAlex Bennée } 2443eed56642SAlex Bennée 2444d50ef446SAnton Johansson static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi, 24458cfdacaaSRichard Henderson uintptr_t ra, MMUAccessType access_type) 24468cfdacaaSRichard Henderson { 24478cfdacaaSRichard Henderson MMULookupLocals l; 24488cfdacaaSRichard Henderson bool crosspage; 24498cfdacaaSRichard Henderson uint64_t ret; 24508cfdacaaSRichard Henderson 2451f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 2452d50ef446SAnton Johansson crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l); 24538cfdacaaSRichard Henderson if (likely(!crosspage)) { 2454d50ef446SAnton Johansson return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra); 24558cfdacaaSRichard Henderson } 24568cfdacaaSRichard Henderson 2457d50ef446SAnton Johansson ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra); 2458d50ef446SAnton Johansson ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra); 24598cfdacaaSRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 24608cfdacaaSRichard Henderson ret = bswap64(ret); 24618cfdacaaSRichard Henderson } 24628cfdacaaSRichard Henderson return ret; 2463eed56642SAlex Bennée } 2464eed56642SAlex Bennée 2465d50ef446SAnton Johansson static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr, 246635c653c4SRichard Henderson MemOpIdx oi, uintptr_t ra) 246735c653c4SRichard Henderson { 246835c653c4SRichard Henderson MMULookupLocals l; 246935c653c4SRichard Henderson bool crosspage; 247035c653c4SRichard Henderson uint64_t a, b; 247135c653c4SRichard Henderson Int128 ret; 247235c653c4SRichard Henderson int first; 247335c653c4SRichard Henderson 2474f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); 2475d50ef446SAnton Johansson crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l); 247635c653c4SRichard Henderson if (likely(!crosspage)) { 247735c653c4SRichard Henderson if (unlikely(l.page[0].flags & TLB_MMIO)) { 2478d50ef446SAnton Johansson ret = do_ld16_mmio_beN(cpu, l.page[0].full, 0, addr, 16, 24798bf67267SRichard Henderson l.mmu_idx, ra); 2480f7eaf9d7SRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 2481f7eaf9d7SRichard Henderson ret = bswap128(ret); 248235c653c4SRichard Henderson } 2483f7eaf9d7SRichard Henderson } else { 2484f7eaf9d7SRichard Henderson /* Perform the load host endian. */ 248573fda56fSAnton Johansson ret = load_atom_16(cpu, ra, l.page[0].haddr, l.memop); 248635c653c4SRichard Henderson if (l.memop & MO_BSWAP) { 248735c653c4SRichard Henderson ret = bswap128(ret); 248835c653c4SRichard Henderson } 2489f7eaf9d7SRichard Henderson } 249035c653c4SRichard Henderson return ret; 249135c653c4SRichard Henderson } 249235c653c4SRichard Henderson 249335c653c4SRichard Henderson first = l.page[0].size; 249435c653c4SRichard Henderson if (first == 8) { 249535c653c4SRichard Henderson MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64; 249635c653c4SRichard Henderson 2497d50ef446SAnton Johansson a = do_ld_8(cpu, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra); 2498d50ef446SAnton Johansson b = do_ld_8(cpu, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra); 249935c653c4SRichard Henderson if ((mop8 & MO_BSWAP) == MO_LE) { 250035c653c4SRichard Henderson ret = int128_make128(a, b); 250135c653c4SRichard Henderson } else { 250235c653c4SRichard Henderson ret = int128_make128(b, a); 250335c653c4SRichard Henderson } 250435c653c4SRichard Henderson return ret; 250535c653c4SRichard Henderson } 250635c653c4SRichard Henderson 250735c653c4SRichard Henderson if (first < 8) { 2508d50ef446SAnton Johansson a = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, 250935c653c4SRichard Henderson MMU_DATA_LOAD, l.memop, ra); 2510d50ef446SAnton Johansson ret = do_ld16_beN(cpu, &l.page[1], a, l.mmu_idx, l.memop, ra); 251135c653c4SRichard Henderson } else { 2512d50ef446SAnton Johansson ret = do_ld16_beN(cpu, &l.page[0], 0, l.mmu_idx, l.memop, ra); 251335c653c4SRichard Henderson b = int128_getlo(ret); 251435c653c4SRichard Henderson ret = int128_lshift(ret, l.page[1].size * 8); 251535c653c4SRichard Henderson a = int128_gethi(ret); 2516d50ef446SAnton Johansson b = do_ld_beN(cpu, &l.page[1], b, l.mmu_idx, 251735c653c4SRichard Henderson MMU_DATA_LOAD, l.memop, ra); 251835c653c4SRichard Henderson ret = int128_make128(b, a); 251935c653c4SRichard Henderson } 252035c653c4SRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 252135c653c4SRichard Henderson ret = bswap128(ret); 252235c653c4SRichard Henderson } 252335c653c4SRichard Henderson return ret; 252435c653c4SRichard Henderson } 252535c653c4SRichard Henderson 2526d03f1408SRichard Henderson /* 2527eed56642SAlex Bennée * Store Helpers 2528eed56642SAlex Bennée */ 2529eed56642SAlex Bennée 253059213461SRichard Henderson /** 253159213461SRichard Henderson * do_st_mmio_leN: 2532d50ef446SAnton Johansson * @cpu: generic cpu state 25331966855eSRichard Henderson * @full: page parameters 253459213461SRichard Henderson * @val_le: data to store 25351966855eSRichard Henderson * @addr: virtual address 25361966855eSRichard Henderson * @size: number of bytes 253759213461SRichard Henderson * @mmu_idx: virtual address context 253859213461SRichard Henderson * @ra: return address into tcg generated code, or 0 2539a4a411fbSStefan Hajnoczi * Context: BQL held 254059213461SRichard Henderson * 25411966855eSRichard Henderson * Store @size bytes at @addr, which is memory-mapped i/o. 254259213461SRichard Henderson * The bytes to store are extracted in little-endian order from @val_le; 254359213461SRichard Henderson * return the bytes of @val_le beyond @p->size that have not been stored. 254459213461SRichard Henderson */ 2545d50ef446SAnton Johansson static uint64_t int_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, 25461966855eSRichard Henderson uint64_t val_le, vaddr addr, int size, 25471f9823ceSRichard Henderson int mmu_idx, uintptr_t ra, 25481f9823ceSRichard Henderson MemoryRegion *mr, hwaddr mr_offset) 25496b8b622eSRichard Henderson { 2550190aba80SRichard Henderson do { 25515646d6a7SRichard Henderson MemOp this_mop; 25525646d6a7SRichard Henderson unsigned this_size; 25535646d6a7SRichard Henderson MemTxResult r; 25545646d6a7SRichard Henderson 2555190aba80SRichard Henderson /* Store aligned pieces up to 8 bytes. */ 25565646d6a7SRichard Henderson this_mop = ctz32(size | (int)addr | 8); 25575646d6a7SRichard Henderson this_size = 1 << this_mop; 25585646d6a7SRichard Henderson this_mop |= MO_LE; 25595646d6a7SRichard Henderson 25605646d6a7SRichard Henderson r = memory_region_dispatch_write(mr, mr_offset, val_le, 25611f9823ceSRichard Henderson this_mop, full->attrs); 25625646d6a7SRichard Henderson if (unlikely(r != MEMTX_OK)) { 2563d50ef446SAnton Johansson io_failed(cpu, full, addr, this_size, MMU_DATA_STORE, 25645646d6a7SRichard Henderson mmu_idx, r, ra); 256559213461SRichard Henderson } 25665646d6a7SRichard Henderson if (this_size == 8) { 25675646d6a7SRichard Henderson return 0; 25685646d6a7SRichard Henderson } 25695646d6a7SRichard Henderson 25705646d6a7SRichard Henderson val_le >>= this_size * 8; 25715646d6a7SRichard Henderson addr += this_size; 25725646d6a7SRichard Henderson mr_offset += this_size; 25735646d6a7SRichard Henderson size -= this_size; 2574190aba80SRichard Henderson } while (size); 2575190aba80SRichard Henderson 257659213461SRichard Henderson return val_le; 257759213461SRichard Henderson } 257859213461SRichard Henderson 2579d50ef446SAnton Johansson static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, 25801f9823ceSRichard Henderson uint64_t val_le, vaddr addr, int size, 25811f9823ceSRichard Henderson int mmu_idx, uintptr_t ra) 25821f9823ceSRichard Henderson { 25831f9823ceSRichard Henderson MemoryRegionSection *section; 25841f9823ceSRichard Henderson hwaddr mr_offset; 25851f9823ceSRichard Henderson MemoryRegion *mr; 25861f9823ceSRichard Henderson MemTxAttrs attrs; 25871f9823ceSRichard Henderson 25881f9823ceSRichard Henderson tcg_debug_assert(size > 0 && size <= 8); 25891f9823ceSRichard Henderson 25901f9823ceSRichard Henderson attrs = full->attrs; 2591d50ef446SAnton Johansson section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); 25921f9823ceSRichard Henderson mr = section->mr; 25931f9823ceSRichard Henderson 25946aba908dSJonathan Cameron BQL_LOCK_GUARD(); 25956aba908dSJonathan Cameron return int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx, 25961f9823ceSRichard Henderson ra, mr, mr_offset); 25971f9823ceSRichard Henderson } 25981f9823ceSRichard Henderson 2599d50ef446SAnton Johansson static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, 26001f9823ceSRichard Henderson Int128 val_le, vaddr addr, int size, 26011f9823ceSRichard Henderson int mmu_idx, uintptr_t ra) 26021f9823ceSRichard Henderson { 26031f9823ceSRichard Henderson MemoryRegionSection *section; 26041f9823ceSRichard Henderson MemoryRegion *mr; 26051f9823ceSRichard Henderson hwaddr mr_offset; 26061f9823ceSRichard Henderson MemTxAttrs attrs; 26071f9823ceSRichard Henderson 26081f9823ceSRichard Henderson tcg_debug_assert(size > 8 && size <= 16); 26091f9823ceSRichard Henderson 26101f9823ceSRichard Henderson attrs = full->attrs; 2611d50ef446SAnton Johansson section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); 26121f9823ceSRichard Henderson mr = section->mr; 26131f9823ceSRichard Henderson 26146aba908dSJonathan Cameron BQL_LOCK_GUARD(); 2615d50ef446SAnton Johansson int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8, 26161f9823ceSRichard Henderson mmu_idx, ra, mr, mr_offset); 26176aba908dSJonathan Cameron return int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8, 26181f9823ceSRichard Henderson size - 8, mmu_idx, ra, mr, mr_offset + 8); 26191f9823ceSRichard Henderson } 26201f9823ceSRichard Henderson 26216b8b622eSRichard Henderson /* 262259213461SRichard Henderson * Wrapper for the above. 26236b8b622eSRichard Henderson */ 2624d50ef446SAnton Johansson static uint64_t do_st_leN(CPUState *cpu, MMULookupPageData *p, 26255b36f268SRichard Henderson uint64_t val_le, int mmu_idx, 26265b36f268SRichard Henderson MemOp mop, uintptr_t ra) 262759213461SRichard Henderson { 26285b36f268SRichard Henderson MemOp atom; 26295b36f268SRichard Henderson unsigned tmp, half_size; 26305b36f268SRichard Henderson 263159213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2632d50ef446SAnton Johansson return do_st_mmio_leN(cpu, p->full, val_le, p->addr, 26331966855eSRichard Henderson p->size, mmu_idx, ra); 263459213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 263559213461SRichard Henderson return val_le >> (p->size * 8); 26365b36f268SRichard Henderson } 26375b36f268SRichard Henderson 26385b36f268SRichard Henderson /* 26395b36f268SRichard Henderson * It is a given that we cross a page and therefore there is no atomicity 26405b36f268SRichard Henderson * for the store as a whole, but subobjects may need attention. 26415b36f268SRichard Henderson */ 26425b36f268SRichard Henderson atom = mop & MO_ATOM_MASK; 26435b36f268SRichard Henderson switch (atom) { 26445b36f268SRichard Henderson case MO_ATOM_SUBALIGN: 26455b36f268SRichard Henderson return store_parts_leN(p->haddr, p->size, val_le); 26465b36f268SRichard Henderson 26475b36f268SRichard Henderson case MO_ATOM_IFALIGN_PAIR: 26485b36f268SRichard Henderson case MO_ATOM_WITHIN16_PAIR: 26495b36f268SRichard Henderson tmp = mop & MO_SIZE; 26505b36f268SRichard Henderson tmp = tmp ? tmp - 1 : 0; 26515b36f268SRichard Henderson half_size = 1 << tmp; 26525b36f268SRichard Henderson if (atom == MO_ATOM_IFALIGN_PAIR 26535b36f268SRichard Henderson ? p->size == half_size 26545b36f268SRichard Henderson : p->size >= half_size) { 26555b36f268SRichard Henderson if (!HAVE_al8_fast && p->size <= 4) { 26565b36f268SRichard Henderson return store_whole_le4(p->haddr, p->size, val_le); 26575b36f268SRichard Henderson } else if (HAVE_al8) { 26585b36f268SRichard Henderson return store_whole_le8(p->haddr, p->size, val_le); 26596b8b622eSRichard Henderson } else { 2660d50ef446SAnton Johansson cpu_loop_exit_atomic(cpu, ra); 26615b36f268SRichard Henderson } 26625b36f268SRichard Henderson } 26635b36f268SRichard Henderson /* fall through */ 26645b36f268SRichard Henderson 26655b36f268SRichard Henderson case MO_ATOM_IFALIGN: 26665b36f268SRichard Henderson case MO_ATOM_WITHIN16: 26675b36f268SRichard Henderson case MO_ATOM_NONE: 26685b36f268SRichard Henderson return store_bytes_leN(p->haddr, p->size, val_le); 26695b36f268SRichard Henderson 26705b36f268SRichard Henderson default: 26715b36f268SRichard Henderson g_assert_not_reached(); 26726b8b622eSRichard Henderson } 26736b8b622eSRichard Henderson } 26746b8b622eSRichard Henderson 267535c653c4SRichard Henderson /* 267635c653c4SRichard Henderson * Wrapper for the above, for 8 < size < 16. 267735c653c4SRichard Henderson */ 2678d50ef446SAnton Johansson static uint64_t do_st16_leN(CPUState *cpu, MMULookupPageData *p, 267935c653c4SRichard Henderson Int128 val_le, int mmu_idx, 268035c653c4SRichard Henderson MemOp mop, uintptr_t ra) 268135c653c4SRichard Henderson { 268235c653c4SRichard Henderson int size = p->size; 268335c653c4SRichard Henderson MemOp atom; 268435c653c4SRichard Henderson 268535c653c4SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2686d50ef446SAnton Johansson return do_st16_mmio_leN(cpu, p->full, val_le, p->addr, 26871f9823ceSRichard Henderson size, mmu_idx, ra); 268835c653c4SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 268935c653c4SRichard Henderson return int128_gethi(val_le) >> ((size - 8) * 8); 269035c653c4SRichard Henderson } 269135c653c4SRichard Henderson 269235c653c4SRichard Henderson /* 269335c653c4SRichard Henderson * It is a given that we cross a page and therefore there is no atomicity 269435c653c4SRichard Henderson * for the store as a whole, but subobjects may need attention. 269535c653c4SRichard Henderson */ 269635c653c4SRichard Henderson atom = mop & MO_ATOM_MASK; 269735c653c4SRichard Henderson switch (atom) { 269835c653c4SRichard Henderson case MO_ATOM_SUBALIGN: 269935c653c4SRichard Henderson store_parts_leN(p->haddr, 8, int128_getlo(val_le)); 270035c653c4SRichard Henderson return store_parts_leN(p->haddr + 8, p->size - 8, 270135c653c4SRichard Henderson int128_gethi(val_le)); 270235c653c4SRichard Henderson 270335c653c4SRichard Henderson case MO_ATOM_WITHIN16_PAIR: 270435c653c4SRichard Henderson /* Since size > 8, this is the half that must be atomic. */ 27056046f6e9SRichard Henderson if (!HAVE_CMPXCHG128) { 2706d50ef446SAnton Johansson cpu_loop_exit_atomic(cpu, ra); 270735c653c4SRichard Henderson } 270835c653c4SRichard Henderson return store_whole_le16(p->haddr, p->size, val_le); 270935c653c4SRichard Henderson 271035c653c4SRichard Henderson case MO_ATOM_IFALIGN_PAIR: 271135c653c4SRichard Henderson /* 271235c653c4SRichard Henderson * Since size > 8, both halves are misaligned, 271335c653c4SRichard Henderson * and so neither is atomic. 271435c653c4SRichard Henderson */ 271535c653c4SRichard Henderson case MO_ATOM_IFALIGN: 27162be6a486SRichard Henderson case MO_ATOM_WITHIN16: 271735c653c4SRichard Henderson case MO_ATOM_NONE: 271835c653c4SRichard Henderson stq_le_p(p->haddr, int128_getlo(val_le)); 271935c653c4SRichard Henderson return store_bytes_leN(p->haddr + 8, p->size - 8, 272035c653c4SRichard Henderson int128_gethi(val_le)); 272135c653c4SRichard Henderson 272235c653c4SRichard Henderson default: 272335c653c4SRichard Henderson g_assert_not_reached(); 272435c653c4SRichard Henderson } 272535c653c4SRichard Henderson } 272635c653c4SRichard Henderson 2727d50ef446SAnton Johansson static void do_st_1(CPUState *cpu, MMULookupPageData *p, uint8_t val, 272859213461SRichard Henderson int mmu_idx, uintptr_t ra) 2729eed56642SAlex Bennée { 273059213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2731d50ef446SAnton Johansson do_st_mmio_leN(cpu, p->full, val, p->addr, 1, mmu_idx, ra); 273259213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 273359213461SRichard Henderson /* nothing */ 27345b87b3e6SRichard Henderson } else { 273559213461SRichard Henderson *(uint8_t *)p->haddr = val; 27365b87b3e6SRichard Henderson } 2737eed56642SAlex Bennée } 2738eed56642SAlex Bennée 2739d50ef446SAnton Johansson static void do_st_2(CPUState *cpu, MMULookupPageData *p, uint16_t val, 274059213461SRichard Henderson int mmu_idx, MemOp memop, uintptr_t ra) 2741eed56642SAlex Bennée { 274259213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2743f7eaf9d7SRichard Henderson if ((memop & MO_BSWAP) != MO_LE) { 2744f7eaf9d7SRichard Henderson val = bswap16(val); 2745f7eaf9d7SRichard Henderson } 2746d50ef446SAnton Johansson do_st_mmio_leN(cpu, p->full, val, p->addr, 2, mmu_idx, ra); 274759213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 274859213461SRichard Henderson /* nothing */ 274959213461SRichard Henderson } else { 275059213461SRichard Henderson /* Swap to host endian if necessary, then store. */ 275159213461SRichard Henderson if (memop & MO_BSWAP) { 275259213461SRichard Henderson val = bswap16(val); 275359213461SRichard Henderson } 275473fda56fSAnton Johansson store_atom_2(cpu, ra, p->haddr, memop, val); 275559213461SRichard Henderson } 275659213461SRichard Henderson } 275759213461SRichard Henderson 2758d50ef446SAnton Johansson static void do_st_4(CPUState *cpu, MMULookupPageData *p, uint32_t val, 275959213461SRichard Henderson int mmu_idx, MemOp memop, uintptr_t ra) 276059213461SRichard Henderson { 276159213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2762f7eaf9d7SRichard Henderson if ((memop & MO_BSWAP) != MO_LE) { 2763f7eaf9d7SRichard Henderson val = bswap32(val); 2764f7eaf9d7SRichard Henderson } 2765d50ef446SAnton Johansson do_st_mmio_leN(cpu, p->full, val, p->addr, 4, mmu_idx, ra); 276659213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 276759213461SRichard Henderson /* nothing */ 276859213461SRichard Henderson } else { 276959213461SRichard Henderson /* Swap to host endian if necessary, then store. */ 277059213461SRichard Henderson if (memop & MO_BSWAP) { 277159213461SRichard Henderson val = bswap32(val); 277259213461SRichard Henderson } 277373fda56fSAnton Johansson store_atom_4(cpu, ra, p->haddr, memop, val); 277459213461SRichard Henderson } 277559213461SRichard Henderson } 277659213461SRichard Henderson 2777d50ef446SAnton Johansson static void do_st_8(CPUState *cpu, MMULookupPageData *p, uint64_t val, 277859213461SRichard Henderson int mmu_idx, MemOp memop, uintptr_t ra) 277959213461SRichard Henderson { 278059213461SRichard Henderson if (unlikely(p->flags & TLB_MMIO)) { 2781f7eaf9d7SRichard Henderson if ((memop & MO_BSWAP) != MO_LE) { 2782f7eaf9d7SRichard Henderson val = bswap64(val); 2783f7eaf9d7SRichard Henderson } 2784d50ef446SAnton Johansson do_st_mmio_leN(cpu, p->full, val, p->addr, 8, mmu_idx, ra); 278559213461SRichard Henderson } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) { 278659213461SRichard Henderson /* nothing */ 278759213461SRichard Henderson } else { 278859213461SRichard Henderson /* Swap to host endian if necessary, then store. */ 278959213461SRichard Henderson if (memop & MO_BSWAP) { 279059213461SRichard Henderson val = bswap64(val); 279159213461SRichard Henderson } 279273fda56fSAnton Johansson store_atom_8(cpu, ra, p->haddr, memop, val); 279359213461SRichard Henderson } 2794eed56642SAlex Bennée } 2795eed56642SAlex Bennée 2796e20f73fbSAnton Johansson static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val, 279759213461SRichard Henderson MemOpIdx oi, uintptr_t ra) 2798f83bcecbSRichard Henderson { 279959213461SRichard Henderson MMULookupLocals l; 280059213461SRichard Henderson bool crosspage; 280159213461SRichard Henderson 2802f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 2803e20f73fbSAnton Johansson crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); 280459213461SRichard Henderson tcg_debug_assert(!crosspage); 280559213461SRichard Henderson 2806e20f73fbSAnton Johansson do_st_1(cpu, &l.page[0], val, l.mmu_idx, ra); 2807e20f73fbSAnton Johansson } 2808e20f73fbSAnton Johansson 2809d50ef446SAnton Johansson static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val, 281059213461SRichard Henderson MemOpIdx oi, uintptr_t ra) 2811f83bcecbSRichard Henderson { 281259213461SRichard Henderson MMULookupLocals l; 281359213461SRichard Henderson bool crosspage; 281459213461SRichard Henderson uint8_t a, b; 281559213461SRichard Henderson 2816f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 2817d50ef446SAnton Johansson crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); 281859213461SRichard Henderson if (likely(!crosspage)) { 2819d50ef446SAnton Johansson do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); 282059213461SRichard Henderson return; 282159213461SRichard Henderson } 282259213461SRichard Henderson 282359213461SRichard Henderson if ((l.memop & MO_BSWAP) == MO_LE) { 282459213461SRichard Henderson a = val, b = val >> 8; 282559213461SRichard Henderson } else { 282659213461SRichard Henderson b = val, a = val >> 8; 282759213461SRichard Henderson } 2828d50ef446SAnton Johansson do_st_1(cpu, &l.page[0], a, l.mmu_idx, ra); 2829d50ef446SAnton Johansson do_st_1(cpu, &l.page[1], b, l.mmu_idx, ra); 2830f83bcecbSRichard Henderson } 2831f83bcecbSRichard Henderson 2832d50ef446SAnton Johansson static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val, 283359213461SRichard Henderson MemOpIdx oi, uintptr_t ra) 2834f83bcecbSRichard Henderson { 283559213461SRichard Henderson MMULookupLocals l; 283659213461SRichard Henderson bool crosspage; 283759213461SRichard Henderson 2838f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 2839d50ef446SAnton Johansson crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); 284059213461SRichard Henderson if (likely(!crosspage)) { 2841d50ef446SAnton Johansson do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); 284259213461SRichard Henderson return; 284359213461SRichard Henderson } 284459213461SRichard Henderson 284559213461SRichard Henderson /* Swap to little endian for simplicity, then store by bytes. */ 284659213461SRichard Henderson if ((l.memop & MO_BSWAP) != MO_LE) { 284759213461SRichard Henderson val = bswap32(val); 284859213461SRichard Henderson } 2849d50ef446SAnton Johansson val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); 2850d50ef446SAnton Johansson (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra); 2851eed56642SAlex Bennée } 2852eed56642SAlex Bennée 2853d50ef446SAnton Johansson static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val, 285459213461SRichard Henderson MemOpIdx oi, uintptr_t ra) 285559213461SRichard Henderson { 285659213461SRichard Henderson MMULookupLocals l; 285759213461SRichard Henderson bool crosspage; 285859213461SRichard Henderson 2859f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 2860d50ef446SAnton Johansson crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); 286159213461SRichard Henderson if (likely(!crosspage)) { 2862d50ef446SAnton Johansson do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); 286359213461SRichard Henderson return; 286459213461SRichard Henderson } 286559213461SRichard Henderson 286659213461SRichard Henderson /* Swap to little endian for simplicity, then store by bytes. */ 286759213461SRichard Henderson if ((l.memop & MO_BSWAP) != MO_LE) { 286859213461SRichard Henderson val = bswap64(val); 286959213461SRichard Henderson } 2870d50ef446SAnton Johansson val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); 2871d50ef446SAnton Johansson (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra); 2872eed56642SAlex Bennée } 2873eed56642SAlex Bennée 2874d50ef446SAnton Johansson static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val, 287535c653c4SRichard Henderson MemOpIdx oi, uintptr_t ra) 287635c653c4SRichard Henderson { 287735c653c4SRichard Henderson MMULookupLocals l; 287835c653c4SRichard Henderson bool crosspage; 287935c653c4SRichard Henderson uint64_t a, b; 288035c653c4SRichard Henderson int first; 288135c653c4SRichard Henderson 2882f86e8f3dSRichard Henderson cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); 2883d50ef446SAnton Johansson crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l); 288435c653c4SRichard Henderson if (likely(!crosspage)) { 2885f7eaf9d7SRichard Henderson if (unlikely(l.page[0].flags & TLB_MMIO)) { 2886f7eaf9d7SRichard Henderson if ((l.memop & MO_BSWAP) != MO_LE) { 2887f7eaf9d7SRichard Henderson val = bswap128(val); 2888f7eaf9d7SRichard Henderson } 2889d50ef446SAnton Johansson do_st16_mmio_leN(cpu, l.page[0].full, val, addr, 16, l.mmu_idx, ra); 2890f7eaf9d7SRichard Henderson } else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) { 2891f7eaf9d7SRichard Henderson /* nothing */ 2892f7eaf9d7SRichard Henderson } else { 289335c653c4SRichard Henderson /* Swap to host endian if necessary, then store. */ 289435c653c4SRichard Henderson if (l.memop & MO_BSWAP) { 289535c653c4SRichard Henderson val = bswap128(val); 289635c653c4SRichard Henderson } 289773fda56fSAnton Johansson store_atom_16(cpu, ra, l.page[0].haddr, l.memop, val); 289835c653c4SRichard Henderson } 289935c653c4SRichard Henderson return; 290035c653c4SRichard Henderson } 290135c653c4SRichard Henderson 290235c653c4SRichard Henderson first = l.page[0].size; 290335c653c4SRichard Henderson if (first == 8) { 290435c653c4SRichard Henderson MemOp mop8 = (l.memop & ~(MO_SIZE | MO_BSWAP)) | MO_64; 290535c653c4SRichard Henderson 290635c653c4SRichard Henderson if (l.memop & MO_BSWAP) { 290735c653c4SRichard Henderson val = bswap128(val); 290835c653c4SRichard Henderson } 290935c653c4SRichard Henderson if (HOST_BIG_ENDIAN) { 291035c653c4SRichard Henderson b = int128_getlo(val), a = int128_gethi(val); 291135c653c4SRichard Henderson } else { 291235c653c4SRichard Henderson a = int128_getlo(val), b = int128_gethi(val); 291335c653c4SRichard Henderson } 2914d50ef446SAnton Johansson do_st_8(cpu, &l.page[0], a, l.mmu_idx, mop8, ra); 2915d50ef446SAnton Johansson do_st_8(cpu, &l.page[1], b, l.mmu_idx, mop8, ra); 291635c653c4SRichard Henderson return; 291735c653c4SRichard Henderson } 291835c653c4SRichard Henderson 291935c653c4SRichard Henderson if ((l.memop & MO_BSWAP) != MO_LE) { 292035c653c4SRichard Henderson val = bswap128(val); 292135c653c4SRichard Henderson } 292235c653c4SRichard Henderson if (first < 8) { 2923d50ef446SAnton Johansson do_st_leN(cpu, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra); 292435c653c4SRichard Henderson val = int128_urshift(val, first * 8); 2925d50ef446SAnton Johansson do_st16_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra); 292635c653c4SRichard Henderson } else { 2927d50ef446SAnton Johansson b = do_st16_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra); 2928d50ef446SAnton Johansson do_st_leN(cpu, &l.page[1], b, l.mmu_idx, l.memop, ra); 292935c653c4SRichard Henderson } 293035c653c4SRichard Henderson } 293135c653c4SRichard Henderson 2932f83bcecbSRichard Henderson #include "ldst_common.c.inc" 2933cfe04a4bSRichard Henderson 2934be9568b4SRichard Henderson /* 2935be9568b4SRichard Henderson * First set of functions passes in OI and RETADDR. 2936be9568b4SRichard Henderson * This makes them callable from other helpers. 2937be9568b4SRichard Henderson */ 2938d9bb58e5SYang Zhong 2939d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \ 2940be9568b4SRichard Henderson glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) 2941a754f7f3SRichard Henderson 2942707526adSRichard Henderson #define ATOMIC_MMU_CLEANUP 2943d9bb58e5SYang Zhong 2944139c1837SPaolo Bonzini #include "atomic_common.c.inc" 2945d9bb58e5SYang Zhong 2946d9bb58e5SYang Zhong #define DATA_SIZE 1 2947d9bb58e5SYang Zhong #include "atomic_template.h" 2948d9bb58e5SYang Zhong 2949d9bb58e5SYang Zhong #define DATA_SIZE 2 2950d9bb58e5SYang Zhong #include "atomic_template.h" 2951d9bb58e5SYang Zhong 2952d9bb58e5SYang Zhong #define DATA_SIZE 4 2953d9bb58e5SYang Zhong #include "atomic_template.h" 2954d9bb58e5SYang Zhong 2955d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 2956d9bb58e5SYang Zhong #define DATA_SIZE 8 2957d9bb58e5SYang Zhong #include "atomic_template.h" 2958d9bb58e5SYang Zhong #endif 2959d9bb58e5SYang Zhong 296076f9d6adSRichard Henderson #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128 2961d9bb58e5SYang Zhong #define DATA_SIZE 16 2962d9bb58e5SYang Zhong #include "atomic_template.h" 2963d9bb58e5SYang Zhong #endif 2964d9bb58e5SYang Zhong 2965d9bb58e5SYang Zhong /* Code access functions. */ 2966d9bb58e5SYang Zhong 2967fc4120a3SRichard Henderson uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) 2968eed56642SAlex Bennée { 29693b916140SRichard Henderson CPUState *cs = env_cpu(env); 29703b916140SRichard Henderson MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(cs, true)); 29713b916140SRichard Henderson return do_ld1_mmu(cs, addr, oi, 0, MMU_INST_FETCH); 29724cef72d0SAlex Bennée } 29734cef72d0SAlex Bennée 2974fc4120a3SRichard Henderson uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) 29752dd92606SRichard Henderson { 29763b916140SRichard Henderson CPUState *cs = env_cpu(env); 29773b916140SRichard Henderson MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(cs, true)); 29783b916140SRichard Henderson return do_ld2_mmu(cs, addr, oi, 0, MMU_INST_FETCH); 29792dd92606SRichard Henderson } 29802dd92606SRichard Henderson 2981fc4120a3SRichard Henderson uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) 29824cef72d0SAlex Bennée { 29833b916140SRichard Henderson CPUState *cs = env_cpu(env); 29843b916140SRichard Henderson MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(cs, true)); 29853b916140SRichard Henderson return do_ld4_mmu(cs, addr, oi, 0, MMU_INST_FETCH); 2986eed56642SAlex Bennée } 2987d9bb58e5SYang Zhong 2988fc4120a3SRichard Henderson uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) 2989eed56642SAlex Bennée { 29903b916140SRichard Henderson CPUState *cs = env_cpu(env); 29913b916140SRichard Henderson MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(cs, true)); 29923b916140SRichard Henderson return do_ld8_mmu(cs, addr, oi, 0, MMU_INST_FETCH); 2993eed56642SAlex Bennée } 299428990626SRichard Henderson 299528990626SRichard Henderson uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr, 299628990626SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 299728990626SRichard Henderson { 2998d50ef446SAnton Johansson return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); 299928990626SRichard Henderson } 300028990626SRichard Henderson 300128990626SRichard Henderson uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr, 300228990626SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 300328990626SRichard Henderson { 3004d50ef446SAnton Johansson return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); 300528990626SRichard Henderson } 300628990626SRichard Henderson 300728990626SRichard Henderson uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr, 300828990626SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 300928990626SRichard Henderson { 3010d50ef446SAnton Johansson return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); 301128990626SRichard Henderson } 301228990626SRichard Henderson 301328990626SRichard Henderson uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr, 301428990626SRichard Henderson MemOpIdx oi, uintptr_t retaddr) 301528990626SRichard Henderson { 3016d50ef446SAnton Johansson return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH); 301728990626SRichard Henderson } 3018