1d9bb58e5SYang Zhong /* 2d9bb58e5SYang Zhong * Common CPU TLB handling 3d9bb58e5SYang Zhong * 4d9bb58e5SYang Zhong * Copyright (c) 2003 Fabrice Bellard 5d9bb58e5SYang Zhong * 6d9bb58e5SYang Zhong * This library is free software; you can redistribute it and/or 7d9bb58e5SYang Zhong * modify it under the terms of the GNU Lesser General Public 8d9bb58e5SYang Zhong * License as published by the Free Software Foundation; either 9d9bb58e5SYang Zhong * version 2 of the License, or (at your option) any later version. 10d9bb58e5SYang Zhong * 11d9bb58e5SYang Zhong * This library is distributed in the hope that it will be useful, 12d9bb58e5SYang Zhong * but WITHOUT ANY WARRANTY; without even the implied warranty of 13d9bb58e5SYang Zhong * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14d9bb58e5SYang Zhong * Lesser General Public License for more details. 15d9bb58e5SYang Zhong * 16d9bb58e5SYang Zhong * You should have received a copy of the GNU Lesser General Public 17d9bb58e5SYang Zhong * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18d9bb58e5SYang Zhong */ 19d9bb58e5SYang Zhong 20d9bb58e5SYang Zhong #include "qemu/osdep.h" 21d9bb58e5SYang Zhong #include "qemu/main-loop.h" 22d9bb58e5SYang Zhong #include "cpu.h" 23d9bb58e5SYang Zhong #include "exec/exec-all.h" 24d9bb58e5SYang Zhong #include "exec/memory.h" 25d9bb58e5SYang Zhong #include "exec/address-spaces.h" 26d9bb58e5SYang Zhong #include "exec/cpu_ldst.h" 27d9bb58e5SYang Zhong #include "exec/cputlb.h" 28d9bb58e5SYang Zhong #include "exec/memory-internal.h" 29d9bb58e5SYang Zhong #include "exec/ram_addr.h" 30d9bb58e5SYang Zhong #include "tcg/tcg.h" 31d9bb58e5SYang Zhong #include "qemu/error-report.h" 32d9bb58e5SYang Zhong #include "exec/log.h" 33d9bb58e5SYang Zhong #include "exec/helper-proto.h" 34d9bb58e5SYang Zhong #include "qemu/atomic.h" 35e6cd4bb5SRichard Henderson #include "qemu/atomic128.h" 36d9bb58e5SYang Zhong 37d9bb58e5SYang Zhong /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ 38d9bb58e5SYang Zhong /* #define DEBUG_TLB */ 39d9bb58e5SYang Zhong /* #define DEBUG_TLB_LOG */ 40d9bb58e5SYang Zhong 41d9bb58e5SYang Zhong #ifdef DEBUG_TLB 42d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 1 43d9bb58e5SYang Zhong # ifdef DEBUG_TLB_LOG 44d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 1 45d9bb58e5SYang Zhong # else 46d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 47d9bb58e5SYang Zhong # endif 48d9bb58e5SYang Zhong #else 49d9bb58e5SYang Zhong # define DEBUG_TLB_GATE 0 50d9bb58e5SYang Zhong # define DEBUG_TLB_LOG_GATE 0 51d9bb58e5SYang Zhong #endif 52d9bb58e5SYang Zhong 53d9bb58e5SYang Zhong #define tlb_debug(fmt, ...) do { \ 54d9bb58e5SYang Zhong if (DEBUG_TLB_LOG_GATE) { \ 55d9bb58e5SYang Zhong qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ 56d9bb58e5SYang Zhong ## __VA_ARGS__); \ 57d9bb58e5SYang Zhong } else if (DEBUG_TLB_GATE) { \ 58d9bb58e5SYang Zhong fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ 59d9bb58e5SYang Zhong } \ 60d9bb58e5SYang Zhong } while (0) 61d9bb58e5SYang Zhong 62ea9025cbSEmilio G. Cota #define assert_cpu_is_self(cpu) do { \ 63d9bb58e5SYang Zhong if (DEBUG_TLB_GATE) { \ 64ea9025cbSEmilio G. Cota g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ 65d9bb58e5SYang Zhong } \ 66d9bb58e5SYang Zhong } while (0) 67d9bb58e5SYang Zhong 68d9bb58e5SYang Zhong /* run_on_cpu_data.target_ptr should always be big enough for a 69d9bb58e5SYang Zhong * target_ulong even on 32 bit builds */ 70d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); 71d9bb58e5SYang Zhong 72d9bb58e5SYang Zhong /* We currently can't handle more than 16 bits in the MMUIDX bitmask. 73d9bb58e5SYang Zhong */ 74d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 75d9bb58e5SYang Zhong #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 76d9bb58e5SYang Zhong 775005e253SEmilio G. Cota void tlb_init(CPUState *cpu) 785005e253SEmilio G. Cota { 7971aec354SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 8071aec354SEmilio G. Cota 8153d28455SRichard Henderson qemu_spin_init(&env->tlb_c.lock); 825005e253SEmilio G. Cota } 835005e253SEmilio G. Cota 84d9bb58e5SYang Zhong /* flush_all_helper: run fn across all cpus 85d9bb58e5SYang Zhong * 86d9bb58e5SYang Zhong * If the wait flag is set then the src cpu's helper will be queued as 87d9bb58e5SYang Zhong * "safe" work and the loop exited creating a synchronisation point 88d9bb58e5SYang Zhong * where all queued work will be finished before execution starts 89d9bb58e5SYang Zhong * again. 90d9bb58e5SYang Zhong */ 91d9bb58e5SYang Zhong static void flush_all_helper(CPUState *src, run_on_cpu_func fn, 92d9bb58e5SYang Zhong run_on_cpu_data d) 93d9bb58e5SYang Zhong { 94d9bb58e5SYang Zhong CPUState *cpu; 95d9bb58e5SYang Zhong 96d9bb58e5SYang Zhong CPU_FOREACH(cpu) { 97d9bb58e5SYang Zhong if (cpu != src) { 98d9bb58e5SYang Zhong async_run_on_cpu(cpu, fn, d); 99d9bb58e5SYang Zhong } 100d9bb58e5SYang Zhong } 101d9bb58e5SYang Zhong } 102d9bb58e5SYang Zhong 10383974cf4SEmilio G. Cota size_t tlb_flush_count(void) 10483974cf4SEmilio G. Cota { 10583974cf4SEmilio G. Cota CPUState *cpu; 10683974cf4SEmilio G. Cota size_t count = 0; 10783974cf4SEmilio G. Cota 10883974cf4SEmilio G. Cota CPU_FOREACH(cpu) { 10983974cf4SEmilio G. Cota CPUArchState *env = cpu->env_ptr; 11083974cf4SEmilio G. Cota 11183974cf4SEmilio G. Cota count += atomic_read(&env->tlb_flush_count); 11283974cf4SEmilio G. Cota } 11383974cf4SEmilio G. Cota return count; 11483974cf4SEmilio G. Cota } 115d9bb58e5SYang Zhong 1161308e026SRichard Henderson static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx) 1171308e026SRichard Henderson { 1181308e026SRichard Henderson memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0])); 1191308e026SRichard Henderson memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0])); 1201308e026SRichard Henderson env->tlb_d[mmu_idx].large_page_addr = -1; 1211308e026SRichard Henderson env->tlb_d[mmu_idx].large_page_mask = -1; 122d5363e58SRichard Henderson env->tlb_d[mmu_idx].vindex = 0; 1231308e026SRichard Henderson } 1241308e026SRichard Henderson 125d9bb58e5SYang Zhong static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) 126d9bb58e5SYang Zhong { 127d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 128d9bb58e5SYang Zhong unsigned long mmu_idx_bitmask = data.host_int; 129d9bb58e5SYang Zhong int mmu_idx; 130d9bb58e5SYang Zhong 131d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 132d9bb58e5SYang Zhong 1331308e026SRichard Henderson tlb_debug("mmu_idx:0x%04lx\n", mmu_idx_bitmask); 134d9bb58e5SYang Zhong 13553d28455SRichard Henderson qemu_spin_lock(&env->tlb_c.lock); 13660a2ad7dSRichard Henderson env->tlb_c.pending_flush &= ~mmu_idx_bitmask; 13760a2ad7dSRichard Henderson 138d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 139d9bb58e5SYang Zhong if (test_bit(mmu_idx, &mmu_idx_bitmask)) { 1401308e026SRichard Henderson tlb_flush_one_mmuidx_locked(env, mmu_idx); 141d9bb58e5SYang Zhong } 142d9bb58e5SYang Zhong } 14353d28455SRichard Henderson qemu_spin_unlock(&env->tlb_c.lock); 144d9bb58e5SYang Zhong 145f3ced3c5SEmilio G. Cota cpu_tb_jmp_cache_clear(cpu); 146*64f2674bSRichard Henderson 147*64f2674bSRichard Henderson if (mmu_idx_bitmask == ALL_MMUIDX_BITS) { 148*64f2674bSRichard Henderson atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1); 149*64f2674bSRichard Henderson } 150d9bb58e5SYang Zhong } 151d9bb58e5SYang Zhong 152d9bb58e5SYang Zhong void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 153d9bb58e5SYang Zhong { 154d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); 155d9bb58e5SYang Zhong 156*64f2674bSRichard Henderson if (cpu->created && !qemu_cpu_is_self(cpu)) { 15760a2ad7dSRichard Henderson CPUArchState *env = cpu->env_ptr; 15860a2ad7dSRichard Henderson uint16_t pending, to_clean; 159d9bb58e5SYang Zhong 16060a2ad7dSRichard Henderson qemu_spin_lock(&env->tlb_c.lock); 16160a2ad7dSRichard Henderson pending = env->tlb_c.pending_flush; 16260a2ad7dSRichard Henderson to_clean = idxmap & ~pending; 16360a2ad7dSRichard Henderson env->tlb_c.pending_flush = pending | idxmap; 16460a2ad7dSRichard Henderson qemu_spin_unlock(&env->tlb_c.lock); 165d9bb58e5SYang Zhong 16660a2ad7dSRichard Henderson if (to_clean) { 16760a2ad7dSRichard Henderson tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", to_clean); 168d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, 16960a2ad7dSRichard Henderson RUN_ON_CPU_HOST_INT(to_clean)); 170d9bb58e5SYang Zhong } 171d9bb58e5SYang Zhong } else { 17260a2ad7dSRichard Henderson tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); 173d9bb58e5SYang Zhong } 174d9bb58e5SYang Zhong } 175d9bb58e5SYang Zhong 176*64f2674bSRichard Henderson void tlb_flush(CPUState *cpu) 177*64f2674bSRichard Henderson { 178*64f2674bSRichard Henderson tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); 179*64f2674bSRichard Henderson } 180*64f2674bSRichard Henderson 181d9bb58e5SYang Zhong void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) 182d9bb58e5SYang Zhong { 183d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 184d9bb58e5SYang Zhong 185d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 186d9bb58e5SYang Zhong 187d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 188d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); 189d9bb58e5SYang Zhong } 190d9bb58e5SYang Zhong 191*64f2674bSRichard Henderson void tlb_flush_all_cpus(CPUState *src_cpu) 192*64f2674bSRichard Henderson { 193*64f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); 194*64f2674bSRichard Henderson } 195*64f2674bSRichard Henderson 196*64f2674bSRichard Henderson void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) 197d9bb58e5SYang Zhong { 198d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; 199d9bb58e5SYang Zhong 200d9bb58e5SYang Zhong tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); 201d9bb58e5SYang Zhong 202d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 203d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); 204d9bb58e5SYang Zhong } 205d9bb58e5SYang Zhong 206*64f2674bSRichard Henderson void tlb_flush_all_cpus_synced(CPUState *src_cpu) 207*64f2674bSRichard Henderson { 208*64f2674bSRichard Henderson tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); 209*64f2674bSRichard Henderson } 210*64f2674bSRichard Henderson 21168fea038SRichard Henderson static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, 21268fea038SRichard Henderson target_ulong page) 213d9bb58e5SYang Zhong { 21468fea038SRichard Henderson return tlb_hit_page(tlb_entry->addr_read, page) || 215403f290cSEmilio G. Cota tlb_hit_page(tlb_addr_write(tlb_entry), page) || 21668fea038SRichard Henderson tlb_hit_page(tlb_entry->addr_code, page); 21768fea038SRichard Henderson } 21868fea038SRichard Henderson 21953d28455SRichard Henderson /* Called with tlb_c.lock held */ 22071aec354SEmilio G. Cota static inline void tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, 22171aec354SEmilio G. Cota target_ulong page) 22268fea038SRichard Henderson { 22368fea038SRichard Henderson if (tlb_hit_page_anyprot(tlb_entry, page)) { 224d9bb58e5SYang Zhong memset(tlb_entry, -1, sizeof(*tlb_entry)); 225d9bb58e5SYang Zhong } 226d9bb58e5SYang Zhong } 227d9bb58e5SYang Zhong 22853d28455SRichard Henderson /* Called with tlb_c.lock held */ 22971aec354SEmilio G. Cota static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, 23068fea038SRichard Henderson target_ulong page) 23168fea038SRichard Henderson { 23268fea038SRichard Henderson int k; 23371aec354SEmilio G. Cota 23471aec354SEmilio G. Cota assert_cpu_is_self(ENV_GET_CPU(env)); 23568fea038SRichard Henderson for (k = 0; k < CPU_VTLB_SIZE; k++) { 23671aec354SEmilio G. Cota tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page); 23768fea038SRichard Henderson } 23868fea038SRichard Henderson } 23968fea038SRichard Henderson 2401308e026SRichard Henderson static void tlb_flush_page_locked(CPUArchState *env, int midx, 2411308e026SRichard Henderson target_ulong page) 2421308e026SRichard Henderson { 2431308e026SRichard Henderson target_ulong lp_addr = env->tlb_d[midx].large_page_addr; 2441308e026SRichard Henderson target_ulong lp_mask = env->tlb_d[midx].large_page_mask; 2451308e026SRichard Henderson 2461308e026SRichard Henderson /* Check if we need to flush due to large pages. */ 2471308e026SRichard Henderson if ((page & lp_mask) == lp_addr) { 2481308e026SRichard Henderson tlb_debug("forcing full flush midx %d (" 2491308e026SRichard Henderson TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 2501308e026SRichard Henderson midx, lp_addr, lp_mask); 2511308e026SRichard Henderson tlb_flush_one_mmuidx_locked(env, midx); 2521308e026SRichard Henderson } else { 2531308e026SRichard Henderson tlb_flush_entry_locked(tlb_entry(env, midx, page), page); 2541308e026SRichard Henderson tlb_flush_vtlb_page_locked(env, midx, page); 2551308e026SRichard Henderson } 2561308e026SRichard Henderson } 2571308e026SRichard Henderson 258d9bb58e5SYang Zhong static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data) 259d9bb58e5SYang Zhong { 260d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 261d9bb58e5SYang Zhong target_ulong addr = (target_ulong) data.target_ptr; 262d9bb58e5SYang Zhong int mmu_idx; 263d9bb58e5SYang Zhong 264d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 265d9bb58e5SYang Zhong 2661308e026SRichard Henderson tlb_debug("page addr:" TARGET_FMT_lx "\n", addr); 267d9bb58e5SYang Zhong 268d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 26953d28455SRichard Henderson qemu_spin_lock(&env->tlb_c.lock); 270d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 2711308e026SRichard Henderson tlb_flush_page_locked(env, mmu_idx, addr); 272d9bb58e5SYang Zhong } 27353d28455SRichard Henderson qemu_spin_unlock(&env->tlb_c.lock); 274d9bb58e5SYang Zhong 275d9bb58e5SYang Zhong tb_flush_jmp_cache(cpu, addr); 276d9bb58e5SYang Zhong } 277d9bb58e5SYang Zhong 278d9bb58e5SYang Zhong void tlb_flush_page(CPUState *cpu, target_ulong addr) 279d9bb58e5SYang Zhong { 280d9bb58e5SYang Zhong tlb_debug("page :" TARGET_FMT_lx "\n", addr); 281d9bb58e5SYang Zhong 282d9bb58e5SYang Zhong if (!qemu_cpu_is_self(cpu)) { 283d9bb58e5SYang Zhong async_run_on_cpu(cpu, tlb_flush_page_async_work, 284d9bb58e5SYang Zhong RUN_ON_CPU_TARGET_PTR(addr)); 285d9bb58e5SYang Zhong } else { 286d9bb58e5SYang Zhong tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr)); 287d9bb58e5SYang Zhong } 288d9bb58e5SYang Zhong } 289d9bb58e5SYang Zhong 290d9bb58e5SYang Zhong /* As we are going to hijack the bottom bits of the page address for a 291d9bb58e5SYang Zhong * mmuidx bit mask we need to fail to build if we can't do that 292d9bb58e5SYang Zhong */ 293d9bb58e5SYang Zhong QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN); 294d9bb58e5SYang Zhong 295d9bb58e5SYang Zhong static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu, 296d9bb58e5SYang Zhong run_on_cpu_data data) 297d9bb58e5SYang Zhong { 298d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 299d9bb58e5SYang Zhong target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr; 300d9bb58e5SYang Zhong target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK; 301d9bb58e5SYang Zhong unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS; 302d9bb58e5SYang Zhong int mmu_idx; 303d9bb58e5SYang Zhong 304d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 305d9bb58e5SYang Zhong 3061308e026SRichard Henderson tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n", 307383beda9SRichard Henderson addr, mmu_idx_bitmap); 308d9bb58e5SYang Zhong 30953d28455SRichard Henderson qemu_spin_lock(&env->tlb_c.lock); 310d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 311d9bb58e5SYang Zhong if (test_bit(mmu_idx, &mmu_idx_bitmap)) { 3121308e026SRichard Henderson tlb_flush_page_locked(env, mmu_idx, addr); 313d9bb58e5SYang Zhong } 314d9bb58e5SYang Zhong } 31553d28455SRichard Henderson qemu_spin_unlock(&env->tlb_c.lock); 316d9bb58e5SYang Zhong 317d9bb58e5SYang Zhong tb_flush_jmp_cache(cpu, addr); 318d9bb58e5SYang Zhong } 319d9bb58e5SYang Zhong 320d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) 321d9bb58e5SYang Zhong { 322d9bb58e5SYang Zhong target_ulong addr_and_mmu_idx; 323d9bb58e5SYang Zhong 324d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); 325d9bb58e5SYang Zhong 326d9bb58e5SYang Zhong /* This should already be page aligned */ 327d9bb58e5SYang Zhong addr_and_mmu_idx = addr & TARGET_PAGE_MASK; 328d9bb58e5SYang Zhong addr_and_mmu_idx |= idxmap; 329d9bb58e5SYang Zhong 330d9bb58e5SYang Zhong if (!qemu_cpu_is_self(cpu)) { 3311308e026SRichard Henderson async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work, 332d9bb58e5SYang Zhong RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 333d9bb58e5SYang Zhong } else { 3341308e026SRichard Henderson tlb_flush_page_by_mmuidx_async_work( 335d9bb58e5SYang Zhong cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 336d9bb58e5SYang Zhong } 337d9bb58e5SYang Zhong } 338d9bb58e5SYang Zhong 339d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, 340d9bb58e5SYang Zhong uint16_t idxmap) 341d9bb58e5SYang Zhong { 3421308e026SRichard Henderson const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work; 343d9bb58e5SYang Zhong target_ulong addr_and_mmu_idx; 344d9bb58e5SYang Zhong 345d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 346d9bb58e5SYang Zhong 347d9bb58e5SYang Zhong /* This should already be page aligned */ 348d9bb58e5SYang Zhong addr_and_mmu_idx = addr & TARGET_PAGE_MASK; 349d9bb58e5SYang Zhong addr_and_mmu_idx |= idxmap; 350d9bb58e5SYang Zhong 351d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 352d9bb58e5SYang Zhong fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 353d9bb58e5SYang Zhong } 354d9bb58e5SYang Zhong 355d9bb58e5SYang Zhong void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, 356d9bb58e5SYang Zhong target_ulong addr, 357d9bb58e5SYang Zhong uint16_t idxmap) 358d9bb58e5SYang Zhong { 3591308e026SRichard Henderson const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work; 360d9bb58e5SYang Zhong target_ulong addr_and_mmu_idx; 361d9bb58e5SYang Zhong 362d9bb58e5SYang Zhong tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); 363d9bb58e5SYang Zhong 364d9bb58e5SYang Zhong /* This should already be page aligned */ 365d9bb58e5SYang Zhong addr_and_mmu_idx = addr & TARGET_PAGE_MASK; 366d9bb58e5SYang Zhong addr_and_mmu_idx |= idxmap; 367d9bb58e5SYang Zhong 368d9bb58e5SYang Zhong flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 369d9bb58e5SYang Zhong async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); 370d9bb58e5SYang Zhong } 371d9bb58e5SYang Zhong 372d9bb58e5SYang Zhong void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) 373d9bb58e5SYang Zhong { 374d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_page_async_work; 375d9bb58e5SYang Zhong 376d9bb58e5SYang Zhong flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr)); 377d9bb58e5SYang Zhong fn(src, RUN_ON_CPU_TARGET_PTR(addr)); 378d9bb58e5SYang Zhong } 379d9bb58e5SYang Zhong 380d9bb58e5SYang Zhong void tlb_flush_page_all_cpus_synced(CPUState *src, 381d9bb58e5SYang Zhong target_ulong addr) 382d9bb58e5SYang Zhong { 383d9bb58e5SYang Zhong const run_on_cpu_func fn = tlb_flush_page_async_work; 384d9bb58e5SYang Zhong 385d9bb58e5SYang Zhong flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr)); 386d9bb58e5SYang Zhong async_safe_run_on_cpu(src, fn, RUN_ON_CPU_TARGET_PTR(addr)); 387d9bb58e5SYang Zhong } 388d9bb58e5SYang Zhong 389d9bb58e5SYang Zhong /* update the TLBs so that writes to code in the virtual page 'addr' 390d9bb58e5SYang Zhong can be detected */ 391d9bb58e5SYang Zhong void tlb_protect_code(ram_addr_t ram_addr) 392d9bb58e5SYang Zhong { 393d9bb58e5SYang Zhong cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, 394d9bb58e5SYang Zhong DIRTY_MEMORY_CODE); 395d9bb58e5SYang Zhong } 396d9bb58e5SYang Zhong 397d9bb58e5SYang Zhong /* update the TLB so that writes in physical page 'phys_addr' are no longer 398d9bb58e5SYang Zhong tested for self modifying code */ 399d9bb58e5SYang Zhong void tlb_unprotect_code(ram_addr_t ram_addr) 400d9bb58e5SYang Zhong { 401d9bb58e5SYang Zhong cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); 402d9bb58e5SYang Zhong } 403d9bb58e5SYang Zhong 404d9bb58e5SYang Zhong 405d9bb58e5SYang Zhong /* 406d9bb58e5SYang Zhong * Dirty write flag handling 407d9bb58e5SYang Zhong * 408d9bb58e5SYang Zhong * When the TCG code writes to a location it looks up the address in 409d9bb58e5SYang Zhong * the TLB and uses that data to compute the final address. If any of 410d9bb58e5SYang Zhong * the lower bits of the address are set then the slow path is forced. 411d9bb58e5SYang Zhong * There are a number of reasons to do this but for normal RAM the 412d9bb58e5SYang Zhong * most usual is detecting writes to code regions which may invalidate 413d9bb58e5SYang Zhong * generated code. 414d9bb58e5SYang Zhong * 41571aec354SEmilio G. Cota * Other vCPUs might be reading their TLBs during guest execution, so we update 41671aec354SEmilio G. Cota * te->addr_write with atomic_set. We don't need to worry about this for 41771aec354SEmilio G. Cota * oversized guests as MTTCG is disabled for them. 418d9bb58e5SYang Zhong * 41953d28455SRichard Henderson * Called with tlb_c.lock held. 420d9bb58e5SYang Zhong */ 42171aec354SEmilio G. Cota static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 42271aec354SEmilio G. Cota uintptr_t start, uintptr_t length) 423d9bb58e5SYang Zhong { 424d9bb58e5SYang Zhong uintptr_t addr = tlb_entry->addr_write; 425d9bb58e5SYang Zhong 426d9bb58e5SYang Zhong if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) { 427d9bb58e5SYang Zhong addr &= TARGET_PAGE_MASK; 428d9bb58e5SYang Zhong addr += tlb_entry->addend; 429d9bb58e5SYang Zhong if ((addr - start) < length) { 430d9bb58e5SYang Zhong #if TCG_OVERSIZED_GUEST 43171aec354SEmilio G. Cota tlb_entry->addr_write |= TLB_NOTDIRTY; 432d9bb58e5SYang Zhong #else 43371aec354SEmilio G. Cota atomic_set(&tlb_entry->addr_write, 43471aec354SEmilio G. Cota tlb_entry->addr_write | TLB_NOTDIRTY); 435d9bb58e5SYang Zhong #endif 436d9bb58e5SYang Zhong } 43771aec354SEmilio G. Cota } 43871aec354SEmilio G. Cota } 43971aec354SEmilio G. Cota 44071aec354SEmilio G. Cota /* 44153d28455SRichard Henderson * Called with tlb_c.lock held. 44271aec354SEmilio G. Cota * Called only from the vCPU context, i.e. the TLB's owner thread. 44371aec354SEmilio G. Cota */ 44471aec354SEmilio G. Cota static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 44571aec354SEmilio G. Cota { 44671aec354SEmilio G. Cota *d = *s; 44771aec354SEmilio G. Cota } 448d9bb58e5SYang Zhong 449d9bb58e5SYang Zhong /* This is a cross vCPU call (i.e. another vCPU resetting the flags of 45071aec354SEmilio G. Cota * the target vCPU). 45153d28455SRichard Henderson * We must take tlb_c.lock to avoid racing with another vCPU update. The only 45271aec354SEmilio G. Cota * thing actually updated is the target TLB entry ->addr_write flags. 453d9bb58e5SYang Zhong */ 454d9bb58e5SYang Zhong void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 455d9bb58e5SYang Zhong { 456d9bb58e5SYang Zhong CPUArchState *env; 457d9bb58e5SYang Zhong 458d9bb58e5SYang Zhong int mmu_idx; 459d9bb58e5SYang Zhong 460d9bb58e5SYang Zhong env = cpu->env_ptr; 46153d28455SRichard Henderson qemu_spin_lock(&env->tlb_c.lock); 462d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 463d9bb58e5SYang Zhong unsigned int i; 464d9bb58e5SYang Zhong 465d9bb58e5SYang Zhong for (i = 0; i < CPU_TLB_SIZE; i++) { 46671aec354SEmilio G. Cota tlb_reset_dirty_range_locked(&env->tlb_table[mmu_idx][i], start1, 46771aec354SEmilio G. Cota length); 468d9bb58e5SYang Zhong } 469d9bb58e5SYang Zhong 470d9bb58e5SYang Zhong for (i = 0; i < CPU_VTLB_SIZE; i++) { 47171aec354SEmilio G. Cota tlb_reset_dirty_range_locked(&env->tlb_v_table[mmu_idx][i], start1, 47271aec354SEmilio G. Cota length); 473d9bb58e5SYang Zhong } 474d9bb58e5SYang Zhong } 47553d28455SRichard Henderson qemu_spin_unlock(&env->tlb_c.lock); 476d9bb58e5SYang Zhong } 477d9bb58e5SYang Zhong 47853d28455SRichard Henderson /* Called with tlb_c.lock held */ 47971aec354SEmilio G. Cota static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 48071aec354SEmilio G. Cota target_ulong vaddr) 481d9bb58e5SYang Zhong { 482d9bb58e5SYang Zhong if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { 483d9bb58e5SYang Zhong tlb_entry->addr_write = vaddr; 484d9bb58e5SYang Zhong } 485d9bb58e5SYang Zhong } 486d9bb58e5SYang Zhong 487d9bb58e5SYang Zhong /* update the TLB corresponding to virtual page vaddr 488d9bb58e5SYang Zhong so that it is no longer dirty */ 489d9bb58e5SYang Zhong void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) 490d9bb58e5SYang Zhong { 491d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 492d9bb58e5SYang Zhong int mmu_idx; 493d9bb58e5SYang Zhong 494d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 495d9bb58e5SYang Zhong 496d9bb58e5SYang Zhong vaddr &= TARGET_PAGE_MASK; 49753d28455SRichard Henderson qemu_spin_lock(&env->tlb_c.lock); 498d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 499383beda9SRichard Henderson tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); 500d9bb58e5SYang Zhong } 501d9bb58e5SYang Zhong 502d9bb58e5SYang Zhong for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 503d9bb58e5SYang Zhong int k; 504d9bb58e5SYang Zhong for (k = 0; k < CPU_VTLB_SIZE; k++) { 50571aec354SEmilio G. Cota tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr); 506d9bb58e5SYang Zhong } 507d9bb58e5SYang Zhong } 50853d28455SRichard Henderson qemu_spin_unlock(&env->tlb_c.lock); 509d9bb58e5SYang Zhong } 510d9bb58e5SYang Zhong 511d9bb58e5SYang Zhong /* Our TLB does not support large pages, so remember the area covered by 512d9bb58e5SYang Zhong large pages and trigger a full TLB flush if these are invalidated. */ 5131308e026SRichard Henderson static void tlb_add_large_page(CPUArchState *env, int mmu_idx, 5141308e026SRichard Henderson target_ulong vaddr, target_ulong size) 515d9bb58e5SYang Zhong { 5161308e026SRichard Henderson target_ulong lp_addr = env->tlb_d[mmu_idx].large_page_addr; 5171308e026SRichard Henderson target_ulong lp_mask = ~(size - 1); 518d9bb58e5SYang Zhong 5191308e026SRichard Henderson if (lp_addr == (target_ulong)-1) { 5201308e026SRichard Henderson /* No previous large page. */ 5211308e026SRichard Henderson lp_addr = vaddr; 5221308e026SRichard Henderson } else { 523d9bb58e5SYang Zhong /* Extend the existing region to include the new page. 5241308e026SRichard Henderson This is a compromise between unnecessary flushes and 5251308e026SRichard Henderson the cost of maintaining a full variable size TLB. */ 5261308e026SRichard Henderson lp_mask &= env->tlb_d[mmu_idx].large_page_mask; 5271308e026SRichard Henderson while (((lp_addr ^ vaddr) & lp_mask) != 0) { 5281308e026SRichard Henderson lp_mask <<= 1; 529d9bb58e5SYang Zhong } 5301308e026SRichard Henderson } 5311308e026SRichard Henderson env->tlb_d[mmu_idx].large_page_addr = lp_addr & lp_mask; 5321308e026SRichard Henderson env->tlb_d[mmu_idx].large_page_mask = lp_mask; 533d9bb58e5SYang Zhong } 534d9bb58e5SYang Zhong 535d9bb58e5SYang Zhong /* Add a new TLB entry. At most one entry for a given virtual address 536d9bb58e5SYang Zhong * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 537d9bb58e5SYang Zhong * supplied size is only used by tlb_flush_page. 538d9bb58e5SYang Zhong * 539d9bb58e5SYang Zhong * Called from TCG-generated code, which is under an RCU read-side 540d9bb58e5SYang Zhong * critical section. 541d9bb58e5SYang Zhong */ 542d9bb58e5SYang Zhong void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, 543d9bb58e5SYang Zhong hwaddr paddr, MemTxAttrs attrs, int prot, 544d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 545d9bb58e5SYang Zhong { 546d9bb58e5SYang Zhong CPUArchState *env = cpu->env_ptr; 547d9bb58e5SYang Zhong MemoryRegionSection *section; 548d9bb58e5SYang Zhong unsigned int index; 549d9bb58e5SYang Zhong target_ulong address; 550d9bb58e5SYang Zhong target_ulong code_address; 551d9bb58e5SYang Zhong uintptr_t addend; 55268fea038SRichard Henderson CPUTLBEntry *te, tn; 55355df6fcfSPeter Maydell hwaddr iotlb, xlat, sz, paddr_page; 55455df6fcfSPeter Maydell target_ulong vaddr_page; 555d9bb58e5SYang Zhong int asidx = cpu_asidx_from_attrs(cpu, attrs); 556d9bb58e5SYang Zhong 557d9bb58e5SYang Zhong assert_cpu_is_self(cpu); 55855df6fcfSPeter Maydell 5591308e026SRichard Henderson if (size <= TARGET_PAGE_SIZE) { 56055df6fcfSPeter Maydell sz = TARGET_PAGE_SIZE; 56155df6fcfSPeter Maydell } else { 5621308e026SRichard Henderson tlb_add_large_page(env, mmu_idx, vaddr, size); 563d9bb58e5SYang Zhong sz = size; 56455df6fcfSPeter Maydell } 56555df6fcfSPeter Maydell vaddr_page = vaddr & TARGET_PAGE_MASK; 56655df6fcfSPeter Maydell paddr_page = paddr & TARGET_PAGE_MASK; 56755df6fcfSPeter Maydell 56855df6fcfSPeter Maydell section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, 56955df6fcfSPeter Maydell &xlat, &sz, attrs, &prot); 570d9bb58e5SYang Zhong assert(sz >= TARGET_PAGE_SIZE); 571d9bb58e5SYang Zhong 572d9bb58e5SYang Zhong tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx 573d9bb58e5SYang Zhong " prot=%x idx=%d\n", 574d9bb58e5SYang Zhong vaddr, paddr, prot, mmu_idx); 575d9bb58e5SYang Zhong 57655df6fcfSPeter Maydell address = vaddr_page; 57755df6fcfSPeter Maydell if (size < TARGET_PAGE_SIZE) { 57855df6fcfSPeter Maydell /* 57955df6fcfSPeter Maydell * Slow-path the TLB entries; we will repeat the MMU check and TLB 58055df6fcfSPeter Maydell * fill on every access. 58155df6fcfSPeter Maydell */ 58255df6fcfSPeter Maydell address |= TLB_RECHECK; 58355df6fcfSPeter Maydell } 58455df6fcfSPeter Maydell if (!memory_region_is_ram(section->mr) && 58555df6fcfSPeter Maydell !memory_region_is_romd(section->mr)) { 586d9bb58e5SYang Zhong /* IO memory case */ 587d9bb58e5SYang Zhong address |= TLB_MMIO; 588d9bb58e5SYang Zhong addend = 0; 589d9bb58e5SYang Zhong } else { 590d9bb58e5SYang Zhong /* TLB_MMIO for rom/romd handled below */ 591d9bb58e5SYang Zhong addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; 592d9bb58e5SYang Zhong } 593d9bb58e5SYang Zhong 594d9bb58e5SYang Zhong code_address = address; 59555df6fcfSPeter Maydell iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page, 59655df6fcfSPeter Maydell paddr_page, xlat, prot, &address); 597d9bb58e5SYang Zhong 598383beda9SRichard Henderson index = tlb_index(env, mmu_idx, vaddr_page); 599383beda9SRichard Henderson te = tlb_entry(env, mmu_idx, vaddr_page); 600d9bb58e5SYang Zhong 60168fea038SRichard Henderson /* 60271aec354SEmilio G. Cota * Hold the TLB lock for the rest of the function. We could acquire/release 60371aec354SEmilio G. Cota * the lock several times in the function, but it is faster to amortize the 60471aec354SEmilio G. Cota * acquisition cost by acquiring it just once. Note that this leads to 60571aec354SEmilio G. Cota * a longer critical section, but this is not a concern since the TLB lock 60671aec354SEmilio G. Cota * is unlikely to be contended. 60771aec354SEmilio G. Cota */ 60853d28455SRichard Henderson qemu_spin_lock(&env->tlb_c.lock); 60971aec354SEmilio G. Cota 61071aec354SEmilio G. Cota /* Make sure there's no cached translation for the new page. */ 61171aec354SEmilio G. Cota tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); 61271aec354SEmilio G. Cota 61371aec354SEmilio G. Cota /* 61468fea038SRichard Henderson * Only evict the old entry to the victim tlb if it's for a 61568fea038SRichard Henderson * different page; otherwise just overwrite the stale data. 61668fea038SRichard Henderson */ 61768fea038SRichard Henderson if (!tlb_hit_page_anyprot(te, vaddr_page)) { 618d5363e58SRichard Henderson unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE; 61968fea038SRichard Henderson CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx]; 62068fea038SRichard Henderson 62168fea038SRichard Henderson /* Evict the old entry into the victim tlb. */ 62271aec354SEmilio G. Cota copy_tlb_helper_locked(tv, te); 623d9bb58e5SYang Zhong env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; 62468fea038SRichard Henderson } 625d9bb58e5SYang Zhong 626d9bb58e5SYang Zhong /* refill the tlb */ 627ace41090SPeter Maydell /* 628ace41090SPeter Maydell * At this point iotlb contains a physical section number in the lower 629ace41090SPeter Maydell * TARGET_PAGE_BITS, and either 630ace41090SPeter Maydell * + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM) 631ace41090SPeter Maydell * + the offset within section->mr of the page base (otherwise) 63255df6fcfSPeter Maydell * We subtract the vaddr_page (which is page aligned and thus won't 633ace41090SPeter Maydell * disturb the low bits) to give an offset which can be added to the 634ace41090SPeter Maydell * (non-page-aligned) vaddr of the eventual memory access to get 635ace41090SPeter Maydell * the MemoryRegion offset for the access. Note that the vaddr we 636ace41090SPeter Maydell * subtract here is that of the page base, and not the same as the 637ace41090SPeter Maydell * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). 638ace41090SPeter Maydell */ 63955df6fcfSPeter Maydell env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page; 640d9bb58e5SYang Zhong env->iotlb[mmu_idx][index].attrs = attrs; 641d9bb58e5SYang Zhong 642d9bb58e5SYang Zhong /* Now calculate the new entry */ 64355df6fcfSPeter Maydell tn.addend = addend - vaddr_page; 644d9bb58e5SYang Zhong if (prot & PAGE_READ) { 645d9bb58e5SYang Zhong tn.addr_read = address; 646d9bb58e5SYang Zhong } else { 647d9bb58e5SYang Zhong tn.addr_read = -1; 648d9bb58e5SYang Zhong } 649d9bb58e5SYang Zhong 650d9bb58e5SYang Zhong if (prot & PAGE_EXEC) { 651d9bb58e5SYang Zhong tn.addr_code = code_address; 652d9bb58e5SYang Zhong } else { 653d9bb58e5SYang Zhong tn.addr_code = -1; 654d9bb58e5SYang Zhong } 655d9bb58e5SYang Zhong 656d9bb58e5SYang Zhong tn.addr_write = -1; 657d9bb58e5SYang Zhong if (prot & PAGE_WRITE) { 658d9bb58e5SYang Zhong if ((memory_region_is_ram(section->mr) && section->readonly) 659d9bb58e5SYang Zhong || memory_region_is_romd(section->mr)) { 660d9bb58e5SYang Zhong /* Write access calls the I/O callback. */ 661d9bb58e5SYang Zhong tn.addr_write = address | TLB_MMIO; 662d9bb58e5SYang Zhong } else if (memory_region_is_ram(section->mr) 663d9bb58e5SYang Zhong && cpu_physical_memory_is_clean( 664d9bb58e5SYang Zhong memory_region_get_ram_addr(section->mr) + xlat)) { 665d9bb58e5SYang Zhong tn.addr_write = address | TLB_NOTDIRTY; 666d9bb58e5SYang Zhong } else { 667d9bb58e5SYang Zhong tn.addr_write = address; 668d9bb58e5SYang Zhong } 669f52bfb12SDavid Hildenbrand if (prot & PAGE_WRITE_INV) { 670f52bfb12SDavid Hildenbrand tn.addr_write |= TLB_INVALID_MASK; 671f52bfb12SDavid Hildenbrand } 672d9bb58e5SYang Zhong } 673d9bb58e5SYang Zhong 67471aec354SEmilio G. Cota copy_tlb_helper_locked(te, &tn); 67553d28455SRichard Henderson qemu_spin_unlock(&env->tlb_c.lock); 676d9bb58e5SYang Zhong } 677d9bb58e5SYang Zhong 678d9bb58e5SYang Zhong /* Add a new TLB entry, but without specifying the memory 679d9bb58e5SYang Zhong * transaction attributes to be used. 680d9bb58e5SYang Zhong */ 681d9bb58e5SYang Zhong void tlb_set_page(CPUState *cpu, target_ulong vaddr, 682d9bb58e5SYang Zhong hwaddr paddr, int prot, 683d9bb58e5SYang Zhong int mmu_idx, target_ulong size) 684d9bb58e5SYang Zhong { 685d9bb58e5SYang Zhong tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, 686d9bb58e5SYang Zhong prot, mmu_idx, size); 687d9bb58e5SYang Zhong } 688d9bb58e5SYang Zhong 689d9bb58e5SYang Zhong static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) 690d9bb58e5SYang Zhong { 691d9bb58e5SYang Zhong ram_addr_t ram_addr; 692d9bb58e5SYang Zhong 693d9bb58e5SYang Zhong ram_addr = qemu_ram_addr_from_host(ptr); 694d9bb58e5SYang Zhong if (ram_addr == RAM_ADDR_INVALID) { 695d9bb58e5SYang Zhong error_report("Bad ram pointer %p", ptr); 696d9bb58e5SYang Zhong abort(); 697d9bb58e5SYang Zhong } 698d9bb58e5SYang Zhong return ram_addr; 699d9bb58e5SYang Zhong } 700d9bb58e5SYang Zhong 701d9bb58e5SYang Zhong static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 70204e3aabdSPeter Maydell int mmu_idx, 70355df6fcfSPeter Maydell target_ulong addr, uintptr_t retaddr, 704dbea78a4SPeter Maydell bool recheck, MMUAccessType access_type, int size) 705d9bb58e5SYang Zhong { 706d9bb58e5SYang Zhong CPUState *cpu = ENV_GET_CPU(env); 7072d54f194SPeter Maydell hwaddr mr_offset; 7082d54f194SPeter Maydell MemoryRegionSection *section; 7092d54f194SPeter Maydell MemoryRegion *mr; 710d9bb58e5SYang Zhong uint64_t val; 711d9bb58e5SYang Zhong bool locked = false; 71204e3aabdSPeter Maydell MemTxResult r; 713d9bb58e5SYang Zhong 71455df6fcfSPeter Maydell if (recheck) { 71555df6fcfSPeter Maydell /* 71655df6fcfSPeter Maydell * This is a TLB_RECHECK access, where the MMU protection 71755df6fcfSPeter Maydell * covers a smaller range than a target page, and we must 71855df6fcfSPeter Maydell * repeat the MMU check here. This tlb_fill() call might 71955df6fcfSPeter Maydell * longjump out if this access should cause a guest exception. 72055df6fcfSPeter Maydell */ 721383beda9SRichard Henderson CPUTLBEntry *entry; 72255df6fcfSPeter Maydell target_ulong tlb_addr; 72355df6fcfSPeter Maydell 72455df6fcfSPeter Maydell tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); 72555df6fcfSPeter Maydell 726383beda9SRichard Henderson entry = tlb_entry(env, mmu_idx, addr); 727383beda9SRichard Henderson tlb_addr = entry->addr_read; 72855df6fcfSPeter Maydell if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { 72955df6fcfSPeter Maydell /* RAM access */ 730383beda9SRichard Henderson uintptr_t haddr = addr + entry->addend; 73155df6fcfSPeter Maydell 73255df6fcfSPeter Maydell return ldn_p((void *)haddr, size); 73355df6fcfSPeter Maydell } 73455df6fcfSPeter Maydell /* Fall through for handling IO accesses */ 73555df6fcfSPeter Maydell } 73655df6fcfSPeter Maydell 7372d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 7382d54f194SPeter Maydell mr = section->mr; 7392d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 740d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 741d9bb58e5SYang Zhong if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { 742d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 743d9bb58e5SYang Zhong } 744d9bb58e5SYang Zhong 745d9bb58e5SYang Zhong cpu->mem_io_vaddr = addr; 746dbea78a4SPeter Maydell cpu->mem_io_access_type = access_type; 747d9bb58e5SYang Zhong 7488b812533SAlex Bennée if (mr->global_locking && !qemu_mutex_iothread_locked()) { 749d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 750d9bb58e5SYang Zhong locked = true; 751d9bb58e5SYang Zhong } 7522d54f194SPeter Maydell r = memory_region_dispatch_read(mr, mr_offset, 75304e3aabdSPeter Maydell &val, size, iotlbentry->attrs); 75404e3aabdSPeter Maydell if (r != MEMTX_OK) { 7552d54f194SPeter Maydell hwaddr physaddr = mr_offset + 7562d54f194SPeter Maydell section->offset_within_address_space - 7572d54f194SPeter Maydell section->offset_within_region; 7582d54f194SPeter Maydell 759dbea78a4SPeter Maydell cpu_transaction_failed(cpu, physaddr, addr, size, access_type, 76004e3aabdSPeter Maydell mmu_idx, iotlbentry->attrs, r, retaddr); 76104e3aabdSPeter Maydell } 762d9bb58e5SYang Zhong if (locked) { 763d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 764d9bb58e5SYang Zhong } 765d9bb58e5SYang Zhong 766d9bb58e5SYang Zhong return val; 767d9bb58e5SYang Zhong } 768d9bb58e5SYang Zhong 769d9bb58e5SYang Zhong static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, 77004e3aabdSPeter Maydell int mmu_idx, 771d9bb58e5SYang Zhong uint64_t val, target_ulong addr, 77255df6fcfSPeter Maydell uintptr_t retaddr, bool recheck, int size) 773d9bb58e5SYang Zhong { 774d9bb58e5SYang Zhong CPUState *cpu = ENV_GET_CPU(env); 7752d54f194SPeter Maydell hwaddr mr_offset; 7762d54f194SPeter Maydell MemoryRegionSection *section; 7772d54f194SPeter Maydell MemoryRegion *mr; 778d9bb58e5SYang Zhong bool locked = false; 77904e3aabdSPeter Maydell MemTxResult r; 780d9bb58e5SYang Zhong 78155df6fcfSPeter Maydell if (recheck) { 78255df6fcfSPeter Maydell /* 78355df6fcfSPeter Maydell * This is a TLB_RECHECK access, where the MMU protection 78455df6fcfSPeter Maydell * covers a smaller range than a target page, and we must 78555df6fcfSPeter Maydell * repeat the MMU check here. This tlb_fill() call might 78655df6fcfSPeter Maydell * longjump out if this access should cause a guest exception. 78755df6fcfSPeter Maydell */ 788383beda9SRichard Henderson CPUTLBEntry *entry; 78955df6fcfSPeter Maydell target_ulong tlb_addr; 79055df6fcfSPeter Maydell 79155df6fcfSPeter Maydell tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); 79255df6fcfSPeter Maydell 793383beda9SRichard Henderson entry = tlb_entry(env, mmu_idx, addr); 794403f290cSEmilio G. Cota tlb_addr = tlb_addr_write(entry); 79555df6fcfSPeter Maydell if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { 79655df6fcfSPeter Maydell /* RAM access */ 797383beda9SRichard Henderson uintptr_t haddr = addr + entry->addend; 79855df6fcfSPeter Maydell 79955df6fcfSPeter Maydell stn_p((void *)haddr, size, val); 80055df6fcfSPeter Maydell return; 80155df6fcfSPeter Maydell } 80255df6fcfSPeter Maydell /* Fall through for handling IO accesses */ 80355df6fcfSPeter Maydell } 80455df6fcfSPeter Maydell 8052d54f194SPeter Maydell section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); 8062d54f194SPeter Maydell mr = section->mr; 8072d54f194SPeter Maydell mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; 808d9bb58e5SYang Zhong if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { 809d9bb58e5SYang Zhong cpu_io_recompile(cpu, retaddr); 810d9bb58e5SYang Zhong } 811d9bb58e5SYang Zhong cpu->mem_io_vaddr = addr; 812d9bb58e5SYang Zhong cpu->mem_io_pc = retaddr; 813d9bb58e5SYang Zhong 8148b812533SAlex Bennée if (mr->global_locking && !qemu_mutex_iothread_locked()) { 815d9bb58e5SYang Zhong qemu_mutex_lock_iothread(); 816d9bb58e5SYang Zhong locked = true; 817d9bb58e5SYang Zhong } 8182d54f194SPeter Maydell r = memory_region_dispatch_write(mr, mr_offset, 81904e3aabdSPeter Maydell val, size, iotlbentry->attrs); 82004e3aabdSPeter Maydell if (r != MEMTX_OK) { 8212d54f194SPeter Maydell hwaddr physaddr = mr_offset + 8222d54f194SPeter Maydell section->offset_within_address_space - 8232d54f194SPeter Maydell section->offset_within_region; 8242d54f194SPeter Maydell 82504e3aabdSPeter Maydell cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE, 82604e3aabdSPeter Maydell mmu_idx, iotlbentry->attrs, r, retaddr); 82704e3aabdSPeter Maydell } 828d9bb58e5SYang Zhong if (locked) { 829d9bb58e5SYang Zhong qemu_mutex_unlock_iothread(); 830d9bb58e5SYang Zhong } 831d9bb58e5SYang Zhong } 832d9bb58e5SYang Zhong 833d9bb58e5SYang Zhong /* Return true if ADDR is present in the victim tlb, and has been copied 834d9bb58e5SYang Zhong back to the main tlb. */ 835d9bb58e5SYang Zhong static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, 836d9bb58e5SYang Zhong size_t elt_ofs, target_ulong page) 837d9bb58e5SYang Zhong { 838d9bb58e5SYang Zhong size_t vidx; 83971aec354SEmilio G. Cota 84071aec354SEmilio G. Cota assert_cpu_is_self(ENV_GET_CPU(env)); 841d9bb58e5SYang Zhong for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 842d9bb58e5SYang Zhong CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx]; 843403f290cSEmilio G. Cota target_ulong cmp; 844403f290cSEmilio G. Cota 845403f290cSEmilio G. Cota /* elt_ofs might correspond to .addr_write, so use atomic_read */ 846403f290cSEmilio G. Cota #if TCG_OVERSIZED_GUEST 847403f290cSEmilio G. Cota cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); 848403f290cSEmilio G. Cota #else 849403f290cSEmilio G. Cota cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); 850403f290cSEmilio G. Cota #endif 851d9bb58e5SYang Zhong 852d9bb58e5SYang Zhong if (cmp == page) { 853d9bb58e5SYang Zhong /* Found entry in victim tlb, swap tlb and iotlb. */ 854d9bb58e5SYang Zhong CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index]; 855d9bb58e5SYang Zhong 85653d28455SRichard Henderson qemu_spin_lock(&env->tlb_c.lock); 85771aec354SEmilio G. Cota copy_tlb_helper_locked(&tmptlb, tlb); 85871aec354SEmilio G. Cota copy_tlb_helper_locked(tlb, vtlb); 85971aec354SEmilio G. Cota copy_tlb_helper_locked(vtlb, &tmptlb); 86053d28455SRichard Henderson qemu_spin_unlock(&env->tlb_c.lock); 861d9bb58e5SYang Zhong 862d9bb58e5SYang Zhong CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index]; 863d9bb58e5SYang Zhong CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx]; 864d9bb58e5SYang Zhong tmpio = *io; *io = *vio; *vio = tmpio; 865d9bb58e5SYang Zhong return true; 866d9bb58e5SYang Zhong } 867d9bb58e5SYang Zhong } 868d9bb58e5SYang Zhong return false; 869d9bb58e5SYang Zhong } 870d9bb58e5SYang Zhong 871d9bb58e5SYang Zhong /* Macro to call the above, with local variables from the use context. */ 872d9bb58e5SYang Zhong #define VICTIM_TLB_HIT(TY, ADDR) \ 873d9bb58e5SYang Zhong victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ 874d9bb58e5SYang Zhong (ADDR) & TARGET_PAGE_MASK) 875d9bb58e5SYang Zhong 876f2553f04SKONRAD Frederic /* NOTE: this function can trigger an exception */ 877f2553f04SKONRAD Frederic /* NOTE2: the returned address is not exactly the physical address: it 878f2553f04SKONRAD Frederic * is actually a ram_addr_t (in system mode; the user mode emulation 879f2553f04SKONRAD Frederic * version of this function returns a guest virtual address). 880f2553f04SKONRAD Frederic */ 881f2553f04SKONRAD Frederic tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) 882f2553f04SKONRAD Frederic { 883383beda9SRichard Henderson uintptr_t mmu_idx = cpu_mmu_index(env, true); 884383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 885383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 886f2553f04SKONRAD Frederic void *p; 887f2553f04SKONRAD Frederic 888383beda9SRichard Henderson if (unlikely(!tlb_hit(entry->addr_code, addr))) { 889b493ccf1SPeter Maydell if (!VICTIM_TLB_HIT(addr_code, addr)) { 89098670d47SLaurent Vivier tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); 89171b9a453SKONRAD Frederic } 892383beda9SRichard Henderson assert(tlb_hit(entry->addr_code, addr)); 893f2553f04SKONRAD Frederic } 89455df6fcfSPeter Maydell 895383beda9SRichard Henderson if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) { 89655df6fcfSPeter Maydell /* 89755a7cb14SPeter Maydell * Return -1 if we can't translate and execute from an entire 89855a7cb14SPeter Maydell * page of RAM here, which will cause us to execute by loading 89955a7cb14SPeter Maydell * and translating one insn at a time, without caching: 90055a7cb14SPeter Maydell * - TLB_RECHECK: means the MMU protection covers a smaller range 90155a7cb14SPeter Maydell * than a target page, so we must redo the MMU check every insn 90255a7cb14SPeter Maydell * - TLB_MMIO: region is not backed by RAM 90355df6fcfSPeter Maydell */ 90420cb6ae4SPeter Maydell return -1; 90555df6fcfSPeter Maydell } 90655df6fcfSPeter Maydell 907383beda9SRichard Henderson p = (void *)((uintptr_t)addr + entry->addend); 908f2553f04SKONRAD Frederic return qemu_ram_addr_from_host_nofail(p); 909f2553f04SKONRAD Frederic } 910f2553f04SKONRAD Frederic 911d9bb58e5SYang Zhong /* Probe for whether the specified guest write access is permitted. 912d9bb58e5SYang Zhong * If it is not permitted then an exception will be taken in the same 913d9bb58e5SYang Zhong * way as if this were a real write access (and we will not return). 914d9bb58e5SYang Zhong * Otherwise the function will return, and there will be a valid 915d9bb58e5SYang Zhong * entry in the TLB for this access. 916d9bb58e5SYang Zhong */ 91798670d47SLaurent Vivier void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, 918d9bb58e5SYang Zhong uintptr_t retaddr) 919d9bb58e5SYang Zhong { 920383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 921383beda9SRichard Henderson CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); 922d9bb58e5SYang Zhong 923403f290cSEmilio G. Cota if (!tlb_hit(tlb_addr_write(entry), addr)) { 924d9bb58e5SYang Zhong /* TLB entry is for a different page */ 925d9bb58e5SYang Zhong if (!VICTIM_TLB_HIT(addr_write, addr)) { 92698670d47SLaurent Vivier tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, 92798670d47SLaurent Vivier mmu_idx, retaddr); 928d9bb58e5SYang Zhong } 929d9bb58e5SYang Zhong } 930d9bb58e5SYang Zhong } 931d9bb58e5SYang Zhong 932d9bb58e5SYang Zhong /* Probe for a read-modify-write atomic operation. Do not allow unaligned 933d9bb58e5SYang Zhong * operations, or io operations to proceed. Return the host address. */ 934d9bb58e5SYang Zhong static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, 93534d49937SPeter Maydell TCGMemOpIdx oi, uintptr_t retaddr, 93634d49937SPeter Maydell NotDirtyInfo *ndi) 937d9bb58e5SYang Zhong { 938d9bb58e5SYang Zhong size_t mmu_idx = get_mmuidx(oi); 939383beda9SRichard Henderson uintptr_t index = tlb_index(env, mmu_idx, addr); 940383beda9SRichard Henderson CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); 941403f290cSEmilio G. Cota target_ulong tlb_addr = tlb_addr_write(tlbe); 942d9bb58e5SYang Zhong TCGMemOp mop = get_memop(oi); 943d9bb58e5SYang Zhong int a_bits = get_alignment_bits(mop); 944d9bb58e5SYang Zhong int s_bits = mop & MO_SIZE; 94534d49937SPeter Maydell void *hostaddr; 946d9bb58e5SYang Zhong 947d9bb58e5SYang Zhong /* Adjust the given return address. */ 948d9bb58e5SYang Zhong retaddr -= GETPC_ADJ; 949d9bb58e5SYang Zhong 950d9bb58e5SYang Zhong /* Enforce guest required alignment. */ 951d9bb58e5SYang Zhong if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 952d9bb58e5SYang Zhong /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 953d9bb58e5SYang Zhong cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, 954d9bb58e5SYang Zhong mmu_idx, retaddr); 955d9bb58e5SYang Zhong } 956d9bb58e5SYang Zhong 957d9bb58e5SYang Zhong /* Enforce qemu required alignment. */ 958d9bb58e5SYang Zhong if (unlikely(addr & ((1 << s_bits) - 1))) { 959d9bb58e5SYang Zhong /* We get here if guest alignment was not requested, 960d9bb58e5SYang Zhong or was not enforced by cpu_unaligned_access above. 961d9bb58e5SYang Zhong We might widen the access and emulate, but for now 962d9bb58e5SYang Zhong mark an exception and exit the cpu loop. */ 963d9bb58e5SYang Zhong goto stop_the_world; 964d9bb58e5SYang Zhong } 965d9bb58e5SYang Zhong 966d9bb58e5SYang Zhong /* Check TLB entry and enforce page permissions. */ 967334692bcSPeter Maydell if (!tlb_hit(tlb_addr, addr)) { 968d9bb58e5SYang Zhong if (!VICTIM_TLB_HIT(addr_write, addr)) { 96998670d47SLaurent Vivier tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE, 97098670d47SLaurent Vivier mmu_idx, retaddr); 971d9bb58e5SYang Zhong } 972403f290cSEmilio G. Cota tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; 973d9bb58e5SYang Zhong } 974d9bb58e5SYang Zhong 97555df6fcfSPeter Maydell /* Notice an IO access or a needs-MMU-lookup access */ 97655df6fcfSPeter Maydell if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) { 977d9bb58e5SYang Zhong /* There's really nothing that can be done to 978d9bb58e5SYang Zhong support this apart from stop-the-world. */ 979d9bb58e5SYang Zhong goto stop_the_world; 980d9bb58e5SYang Zhong } 981d9bb58e5SYang Zhong 982d9bb58e5SYang Zhong /* Let the guest notice RMW on a write-only page. */ 98334d49937SPeter Maydell if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { 98498670d47SLaurent Vivier tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD, 98598670d47SLaurent Vivier mmu_idx, retaddr); 986d9bb58e5SYang Zhong /* Since we don't support reads and writes to different addresses, 987d9bb58e5SYang Zhong and we do have the proper page loaded for write, this shouldn't 988d9bb58e5SYang Zhong ever return. But just in case, handle via stop-the-world. */ 989d9bb58e5SYang Zhong goto stop_the_world; 990d9bb58e5SYang Zhong } 991d9bb58e5SYang Zhong 99234d49937SPeter Maydell hostaddr = (void *)((uintptr_t)addr + tlbe->addend); 99334d49937SPeter Maydell 99434d49937SPeter Maydell ndi->active = false; 99534d49937SPeter Maydell if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 99634d49937SPeter Maydell ndi->active = true; 99734d49937SPeter Maydell memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr, 99834d49937SPeter Maydell qemu_ram_addr_from_host_nofail(hostaddr), 99934d49937SPeter Maydell 1 << s_bits); 100034d49937SPeter Maydell } 100134d49937SPeter Maydell 100234d49937SPeter Maydell return hostaddr; 1003d9bb58e5SYang Zhong 1004d9bb58e5SYang Zhong stop_the_world: 1005d9bb58e5SYang Zhong cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr); 1006d9bb58e5SYang Zhong } 1007d9bb58e5SYang Zhong 1008d9bb58e5SYang Zhong #ifdef TARGET_WORDS_BIGENDIAN 1009d9bb58e5SYang Zhong # define TGT_BE(X) (X) 1010d9bb58e5SYang Zhong # define TGT_LE(X) BSWAP(X) 1011d9bb58e5SYang Zhong #else 1012d9bb58e5SYang Zhong # define TGT_BE(X) BSWAP(X) 1013d9bb58e5SYang Zhong # define TGT_LE(X) (X) 1014d9bb58e5SYang Zhong #endif 1015d9bb58e5SYang Zhong 1016d9bb58e5SYang Zhong #define MMUSUFFIX _mmu 1017d9bb58e5SYang Zhong 1018d9bb58e5SYang Zhong #define DATA_SIZE 1 1019d9bb58e5SYang Zhong #include "softmmu_template.h" 1020d9bb58e5SYang Zhong 1021d9bb58e5SYang Zhong #define DATA_SIZE 2 1022d9bb58e5SYang Zhong #include "softmmu_template.h" 1023d9bb58e5SYang Zhong 1024d9bb58e5SYang Zhong #define DATA_SIZE 4 1025d9bb58e5SYang Zhong #include "softmmu_template.h" 1026d9bb58e5SYang Zhong 1027d9bb58e5SYang Zhong #define DATA_SIZE 8 1028d9bb58e5SYang Zhong #include "softmmu_template.h" 1029d9bb58e5SYang Zhong 1030d9bb58e5SYang Zhong /* First set of helpers allows passing in of OI and RETADDR. This makes 1031d9bb58e5SYang Zhong them callable from other helpers. */ 1032d9bb58e5SYang Zhong 1033d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr 1034d9bb58e5SYang Zhong #define ATOMIC_NAME(X) \ 1035d9bb58e5SYang Zhong HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) 103634d49937SPeter Maydell #define ATOMIC_MMU_DECLS NotDirtyInfo ndi 103734d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi) 103834d49937SPeter Maydell #define ATOMIC_MMU_CLEANUP \ 103934d49937SPeter Maydell do { \ 104034d49937SPeter Maydell if (unlikely(ndi.active)) { \ 104134d49937SPeter Maydell memory_notdirty_write_complete(&ndi); \ 104234d49937SPeter Maydell } \ 104334d49937SPeter Maydell } while (0) 1044d9bb58e5SYang Zhong 1045d9bb58e5SYang Zhong #define DATA_SIZE 1 1046d9bb58e5SYang Zhong #include "atomic_template.h" 1047d9bb58e5SYang Zhong 1048d9bb58e5SYang Zhong #define DATA_SIZE 2 1049d9bb58e5SYang Zhong #include "atomic_template.h" 1050d9bb58e5SYang Zhong 1051d9bb58e5SYang Zhong #define DATA_SIZE 4 1052d9bb58e5SYang Zhong #include "atomic_template.h" 1053d9bb58e5SYang Zhong 1054d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 1055d9bb58e5SYang Zhong #define DATA_SIZE 8 1056d9bb58e5SYang Zhong #include "atomic_template.h" 1057d9bb58e5SYang Zhong #endif 1058d9bb58e5SYang Zhong 1059e6cd4bb5SRichard Henderson #if HAVE_CMPXCHG128 || HAVE_ATOMIC128 1060d9bb58e5SYang Zhong #define DATA_SIZE 16 1061d9bb58e5SYang Zhong #include "atomic_template.h" 1062d9bb58e5SYang Zhong #endif 1063d9bb58e5SYang Zhong 1064d9bb58e5SYang Zhong /* Second set of helpers are directly callable from TCG as helpers. */ 1065d9bb58e5SYang Zhong 1066d9bb58e5SYang Zhong #undef EXTRA_ARGS 1067d9bb58e5SYang Zhong #undef ATOMIC_NAME 1068d9bb58e5SYang Zhong #undef ATOMIC_MMU_LOOKUP 1069d9bb58e5SYang Zhong #define EXTRA_ARGS , TCGMemOpIdx oi 1070d9bb58e5SYang Zhong #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) 107134d49937SPeter Maydell #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi) 1072d9bb58e5SYang Zhong 1073d9bb58e5SYang Zhong #define DATA_SIZE 1 1074d9bb58e5SYang Zhong #include "atomic_template.h" 1075d9bb58e5SYang Zhong 1076d9bb58e5SYang Zhong #define DATA_SIZE 2 1077d9bb58e5SYang Zhong #include "atomic_template.h" 1078d9bb58e5SYang Zhong 1079d9bb58e5SYang Zhong #define DATA_SIZE 4 1080d9bb58e5SYang Zhong #include "atomic_template.h" 1081d9bb58e5SYang Zhong 1082d9bb58e5SYang Zhong #ifdef CONFIG_ATOMIC64 1083d9bb58e5SYang Zhong #define DATA_SIZE 8 1084d9bb58e5SYang Zhong #include "atomic_template.h" 1085d9bb58e5SYang Zhong #endif 1086d9bb58e5SYang Zhong 1087d9bb58e5SYang Zhong /* Code access functions. */ 1088d9bb58e5SYang Zhong 1089d9bb58e5SYang Zhong #undef MMUSUFFIX 1090d9bb58e5SYang Zhong #define MMUSUFFIX _cmmu 1091d9bb58e5SYang Zhong #undef GETPC 1092d9bb58e5SYang Zhong #define GETPC() ((uintptr_t)0) 1093d9bb58e5SYang Zhong #define SOFTMMU_CODE_ACCESS 1094d9bb58e5SYang Zhong 1095d9bb58e5SYang Zhong #define DATA_SIZE 1 1096d9bb58e5SYang Zhong #include "softmmu_template.h" 1097d9bb58e5SYang Zhong 1098d9bb58e5SYang Zhong #define DATA_SIZE 2 1099d9bb58e5SYang Zhong #include "softmmu_template.h" 1100d9bb58e5SYang Zhong 1101d9bb58e5SYang Zhong #define DATA_SIZE 4 1102d9bb58e5SYang Zhong #include "softmmu_template.h" 1103d9bb58e5SYang Zhong 1104d9bb58e5SYang Zhong #define DATA_SIZE 8 1105d9bb58e5SYang Zhong #include "softmmu_template.h" 1106