1 /* 2 * defines common to all virtual CPUs 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #ifndef CPU_ALL_H 20 #define CPU_ALL_H 21 22 #include "exec/page-protection.h" 23 #include "exec/cpu-common.h" 24 #include "exec/memory.h" 25 #include "exec/tswap.h" 26 #include "hw/core/cpu.h" 27 28 /* some important defines: 29 * 30 * HOST_BIG_ENDIAN : whether the host cpu is big endian and 31 * otherwise little endian. 32 * 33 * TARGET_BIG_ENDIAN : same for the target cpu 34 */ 35 36 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN 37 #define BSWAP_NEEDED 38 #endif 39 40 /* Target-endianness CPU memory access functions. These fit into the 41 * {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h. 42 */ 43 #if TARGET_BIG_ENDIAN 44 #define lduw_p(p) lduw_be_p(p) 45 #define ldsw_p(p) ldsw_be_p(p) 46 #define ldl_p(p) ldl_be_p(p) 47 #define ldq_p(p) ldq_be_p(p) 48 #define stw_p(p, v) stw_be_p(p, v) 49 #define stl_p(p, v) stl_be_p(p, v) 50 #define stq_p(p, v) stq_be_p(p, v) 51 #define ldn_p(p, sz) ldn_be_p(p, sz) 52 #define stn_p(p, sz, v) stn_be_p(p, sz, v) 53 #else 54 #define lduw_p(p) lduw_le_p(p) 55 #define ldsw_p(p) ldsw_le_p(p) 56 #define ldl_p(p) ldl_le_p(p) 57 #define ldq_p(p) ldq_le_p(p) 58 #define stw_p(p, v) stw_le_p(p, v) 59 #define stl_p(p, v) stl_le_p(p, v) 60 #define stq_p(p, v) stq_le_p(p, v) 61 #define ldn_p(p, sz) ldn_le_p(p, sz) 62 #define stn_p(p, sz, v) stn_le_p(p, sz, v) 63 #endif 64 65 /* MMU memory access macros */ 66 67 #if defined(CONFIG_USER_ONLY) 68 #include "user/abitypes.h" 69 70 /* 71 * If non-zero, the guest virtual address space is a contiguous subset 72 * of the host virtual address space, i.e. '-R reserved_va' is in effect 73 * either from the command-line or by default. The value is the last 74 * byte of the guest address space e.g. UINT32_MAX. 75 * 76 * If zero, the host and guest virtual address spaces are intermingled. 77 */ 78 extern unsigned long reserved_va; 79 80 /* 81 * Limit the guest addresses as best we can. 82 * 83 * When not using -R reserved_va, we cannot really limit the guest 84 * to less address space than the host. For 32-bit guests, this 85 * acts as a sanity check that we're not giving the guest an address 86 * that it cannot even represent. For 64-bit guests... the address 87 * might not be what the real kernel would give, but it is at least 88 * representable in the guest. 89 * 90 * TODO: Improve address allocation to avoid this problem, and to 91 * avoid setting bits at the top of guest addresses that might need 92 * to be used for tags. 93 */ 94 #define GUEST_ADDR_MAX_ \ 95 ((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \ 96 UINT32_MAX : ~0ul) 97 #define GUEST_ADDR_MAX (reserved_va ? : GUEST_ADDR_MAX_) 98 99 #else 100 101 #include "exec/hwaddr.h" 102 103 #define SUFFIX 104 #define ARG1 as 105 #define ARG1_DECL AddressSpace *as 106 #define TARGET_ENDIANNESS 107 #include "exec/memory_ldst.h.inc" 108 109 #define SUFFIX _cached_slow 110 #define ARG1 cache 111 #define ARG1_DECL MemoryRegionCache *cache 112 #define TARGET_ENDIANNESS 113 #include "exec/memory_ldst.h.inc" 114 115 static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val) 116 { 117 address_space_stl_notdirty(as, addr, val, 118 MEMTXATTRS_UNSPECIFIED, NULL); 119 } 120 121 #define SUFFIX 122 #define ARG1 as 123 #define ARG1_DECL AddressSpace *as 124 #define TARGET_ENDIANNESS 125 #include "exec/memory_ldst_phys.h.inc" 126 127 /* Inline fast path for direct RAM access. */ 128 #define ENDIANNESS 129 #include "exec/memory_ldst_cached.h.inc" 130 131 #define SUFFIX _cached 132 #define ARG1 cache 133 #define ARG1_DECL MemoryRegionCache *cache 134 #define TARGET_ENDIANNESS 135 #include "exec/memory_ldst_phys.h.inc" 136 #endif 137 138 /* page related stuff */ 139 140 #ifdef TARGET_PAGE_BITS_VARY 141 # include "exec/page-vary.h" 142 extern const TargetPageBits target_page; 143 # ifdef CONFIG_DEBUG_TCG 144 # define TARGET_PAGE_BITS ({ assert(target_page.decided); \ 145 target_page.bits; }) 146 # define TARGET_PAGE_MASK ({ assert(target_page.decided); \ 147 (target_long)target_page.mask; }) 148 # else 149 # define TARGET_PAGE_BITS target_page.bits 150 # define TARGET_PAGE_MASK ((target_long)target_page.mask) 151 # endif 152 # define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK) 153 #else 154 # define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS 155 # define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) 156 # define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS) 157 #endif 158 159 #define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE) 160 161 #if defined(CONFIG_USER_ONLY) 162 void page_dump(FILE *f); 163 164 typedef int (*walk_memory_regions_fn)(void *, target_ulong, 165 target_ulong, unsigned long); 166 int walk_memory_regions(void *, walk_memory_regions_fn); 167 168 int page_get_flags(target_ulong address); 169 170 /** 171 * page_set_flags: 172 * @start: first byte of range 173 * @last: last byte of range 174 * @flags: flags to set 175 * Context: holding mmap lock 176 * 177 * Modify the flags of a page and invalidate the code if necessary. 178 * The flag PAGE_WRITE_ORG is positioned automatically depending 179 * on PAGE_WRITE. The mmap_lock should already be held. 180 */ 181 void page_set_flags(target_ulong start, target_ulong last, int flags); 182 183 void page_reset_target_data(target_ulong start, target_ulong last); 184 185 /** 186 * page_check_range 187 * @start: first byte of range 188 * @len: length of range 189 * @flags: flags required for each page 190 * 191 * Return true if every page in [@start, @start+@len) has @flags set. 192 * Return false if any page is unmapped. Thus testing flags == 0 is 193 * equivalent to testing for flags == PAGE_VALID. 194 */ 195 bool page_check_range(target_ulong start, target_ulong last, int flags); 196 197 /** 198 * page_check_range_empty: 199 * @start: first byte of range 200 * @last: last byte of range 201 * Context: holding mmap lock 202 * 203 * Return true if the entire range [@start, @last] is unmapped. 204 * The memory lock must be held so that the caller will can ensure 205 * the result stays true until a new mapping can be installed. 206 */ 207 bool page_check_range_empty(target_ulong start, target_ulong last); 208 209 /** 210 * page_find_range_empty 211 * @min: first byte of search range 212 * @max: last byte of search range 213 * @len: size of the hole required 214 * @align: alignment of the hole required (power of 2) 215 * 216 * If there is a range [x, x+@len) within [@min, @max] such that 217 * x % @align == 0, then return x. Otherwise return -1. 218 * The memory lock must be held, as the caller will want to ensure 219 * the returned range stays empty until a new mapping can be installed. 220 */ 221 target_ulong page_find_range_empty(target_ulong min, target_ulong max, 222 target_ulong len, target_ulong align); 223 224 /** 225 * page_get_target_data(address) 226 * @address: guest virtual address 227 * 228 * Return TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate 229 * with the guest page at @address, allocating it if necessary. The 230 * caller should already have verified that the address is valid. 231 * 232 * The memory will be freed when the guest page is deallocated, 233 * e.g. with the munmap system call. 234 */ 235 void *page_get_target_data(target_ulong address) 236 __attribute__((returns_nonnull)); 237 #endif 238 239 CPUArchState *cpu_copy(CPUArchState *env); 240 241 /* Flags for use in ENV->INTERRUPT_PENDING. 242 243 The numbers assigned here are non-sequential in order to preserve 244 binary compatibility with the vmstate dump. Bit 0 (0x0001) was 245 previously used for CPU_INTERRUPT_EXIT, and is cleared when loading 246 the vmstate dump. */ 247 248 /* External hardware interrupt pending. This is typically used for 249 interrupts from devices. */ 250 #define CPU_INTERRUPT_HARD 0x0002 251 252 /* Exit the current TB. This is typically used when some system-level device 253 makes some change to the memory mapping. E.g. the a20 line change. */ 254 #define CPU_INTERRUPT_EXITTB 0x0004 255 256 /* Halt the CPU. */ 257 #define CPU_INTERRUPT_HALT 0x0020 258 259 /* Debug event pending. */ 260 #define CPU_INTERRUPT_DEBUG 0x0080 261 262 /* Reset signal. */ 263 #define CPU_INTERRUPT_RESET 0x0400 264 265 /* Several target-specific external hardware interrupts. Each target/cpu.h 266 should define proper names based on these defines. */ 267 #define CPU_INTERRUPT_TGT_EXT_0 0x0008 268 #define CPU_INTERRUPT_TGT_EXT_1 0x0010 269 #define CPU_INTERRUPT_TGT_EXT_2 0x0040 270 #define CPU_INTERRUPT_TGT_EXT_3 0x0200 271 #define CPU_INTERRUPT_TGT_EXT_4 0x1000 272 273 /* Several target-specific internal interrupts. These differ from the 274 preceding target-specific interrupts in that they are intended to 275 originate from within the cpu itself, typically in response to some 276 instruction being executed. These, therefore, are not masked while 277 single-stepping within the debugger. */ 278 #define CPU_INTERRUPT_TGT_INT_0 0x0100 279 #define CPU_INTERRUPT_TGT_INT_1 0x0800 280 #define CPU_INTERRUPT_TGT_INT_2 0x2000 281 282 /* First unused bit: 0x4000. */ 283 284 /* The set of all bits that should be masked when single-stepping. */ 285 #define CPU_INTERRUPT_SSTEP_MASK \ 286 (CPU_INTERRUPT_HARD \ 287 | CPU_INTERRUPT_TGT_EXT_0 \ 288 | CPU_INTERRUPT_TGT_EXT_1 \ 289 | CPU_INTERRUPT_TGT_EXT_2 \ 290 | CPU_INTERRUPT_TGT_EXT_3 \ 291 | CPU_INTERRUPT_TGT_EXT_4) 292 293 #ifdef CONFIG_USER_ONLY 294 295 /* 296 * Allow some level of source compatibility with softmmu. We do not 297 * support any of the more exotic features, so only invalid pages may 298 * be signaled by probe_access_flags(). 299 */ 300 #define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1)) 301 #define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 2)) 302 #define TLB_WATCHPOINT 0 303 304 static inline int cpu_mmu_index(CPUState *cs, bool ifetch) 305 { 306 return MMU_USER_IDX; 307 } 308 #else 309 310 /* 311 * Flags stored in the low bits of the TLB virtual address. 312 * These are defined so that fast path ram access is all zeros. 313 * The flags all must be between TARGET_PAGE_BITS and 314 * maximum address alignment bit. 315 * 316 * Use TARGET_PAGE_BITS_MIN so that these bits are constant 317 * when TARGET_PAGE_BITS_VARY is in effect. 318 * 319 * The count, if not the placement of these bits is known 320 * to tcg/tcg-op-ldst.c, check_max_alignment(). 321 */ 322 /* Zero if TLB entry is valid. */ 323 #define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1)) 324 /* Set if TLB entry references a clean RAM page. The iotlb entry will 325 contain the page physical address. */ 326 #define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2)) 327 /* Set if TLB entry is an IO callback. */ 328 #define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3)) 329 /* Set if TLB entry writes ignored. */ 330 #define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 4)) 331 /* Set if the slow path must be used; more flags in CPUTLBEntryFull. */ 332 #define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 5)) 333 334 /* 335 * Use this mask to check interception with an alignment mask 336 * in a TCG backend. 337 */ 338 #define TLB_FLAGS_MASK \ 339 (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \ 340 | TLB_FORCE_SLOW | TLB_DISCARD_WRITE) 341 342 /* 343 * Flags stored in CPUTLBEntryFull.slow_flags[x]. 344 * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x]. 345 */ 346 /* Set if TLB entry requires byte swap. */ 347 #define TLB_BSWAP (1 << 0) 348 /* Set if TLB entry contains a watchpoint. */ 349 #define TLB_WATCHPOINT (1 << 1) 350 /* Set if TLB entry requires aligned accesses. */ 351 #define TLB_CHECK_ALIGNED (1 << 2) 352 353 #define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED) 354 355 /* The two sets of flags must not overlap. */ 356 QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK); 357 358 /** 359 * tlb_hit_page: return true if page aligned @addr is a hit against the 360 * TLB entry @tlb_addr 361 * 362 * @addr: virtual address to test (must be page aligned) 363 * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) 364 */ 365 static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr) 366 { 367 return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)); 368 } 369 370 /** 371 * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr 372 * 373 * @addr: virtual address to test (need not be page aligned) 374 * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value) 375 */ 376 static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr) 377 { 378 return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK); 379 } 380 381 #endif /* !CONFIG_USER_ONLY */ 382 383 /* Validate correct placement of CPUArchState. */ 384 #include "cpu.h" 385 QEMU_BUILD_BUG_ON(offsetof(ArchCPU, parent_obj) != 0); 386 QEMU_BUILD_BUG_ON(offsetof(ArchCPU, env) != sizeof(CPUState)); 387 388 #endif /* CPU_ALL_H */ 389