1 /* 2 * common defines for all CPUs 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #ifndef CPU_DEFS_H 20 #define CPU_DEFS_H 21 22 #ifndef NEED_CPU_H 23 #error cpu.h included from common code 24 #endif 25 26 #include "qemu/host-utils.h" 27 #include "qemu/thread.h" 28 #ifndef CONFIG_USER_ONLY 29 #include "exec/hwaddr.h" 30 #endif 31 #include "exec/memattrs.h" 32 #include "hw/core/cpu.h" 33 34 #include "cpu-param.h" 35 36 #ifndef TARGET_LONG_BITS 37 # error TARGET_LONG_BITS must be defined in cpu-param.h 38 #endif 39 #ifndef NB_MMU_MODES 40 # error NB_MMU_MODES must be defined in cpu-param.h 41 #endif 42 #ifndef TARGET_PHYS_ADDR_SPACE_BITS 43 # error TARGET_PHYS_ADDR_SPACE_BITS must be defined in cpu-param.h 44 #endif 45 #ifndef TARGET_VIRT_ADDR_SPACE_BITS 46 # error TARGET_VIRT_ADDR_SPACE_BITS must be defined in cpu-param.h 47 #endif 48 #ifndef TARGET_PAGE_BITS 49 # ifdef TARGET_PAGE_BITS_VARY 50 # ifndef TARGET_PAGE_BITS_MIN 51 # error TARGET_PAGE_BITS_MIN must be defined in cpu-param.h 52 # endif 53 # else 54 # error TARGET_PAGE_BITS must be defined in cpu-param.h 55 # endif 56 #endif 57 58 #define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8) 59 60 /* target_ulong is the type of a virtual address */ 61 #if TARGET_LONG_SIZE == 4 62 typedef int32_t target_long; 63 typedef uint32_t target_ulong; 64 #define TARGET_FMT_lx "%08x" 65 #define TARGET_FMT_ld "%d" 66 #define TARGET_FMT_lu "%u" 67 #elif TARGET_LONG_SIZE == 8 68 typedef int64_t target_long; 69 typedef uint64_t target_ulong; 70 #define TARGET_FMT_lx "%016" PRIx64 71 #define TARGET_FMT_ld "%" PRId64 72 #define TARGET_FMT_lu "%" PRIu64 73 #else 74 #error TARGET_LONG_SIZE undefined 75 #endif 76 77 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) 78 79 /* use a fully associative victim tlb of 8 entries */ 80 #define CPU_VTLB_SIZE 8 81 82 #if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32 83 #define CPU_TLB_ENTRY_BITS 4 84 #else 85 #define CPU_TLB_ENTRY_BITS 5 86 #endif 87 88 #define CPU_TLB_DYN_MIN_BITS 6 89 #define CPU_TLB_DYN_DEFAULT_BITS 8 90 91 # if HOST_LONG_BITS == 32 92 /* Make sure we do not require a double-word shift for the TLB load */ 93 # define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS) 94 # else /* HOST_LONG_BITS == 64 */ 95 /* 96 * Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) == 97 * 2**34 == 16G of address space. This is roughly what one would expect a 98 * TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel 99 * Skylake's Level-2 STLB has 16 1G entries. 100 * Also, make sure we do not size the TLB past the guest's address space. 101 */ 102 # ifdef TARGET_PAGE_BITS_VARY 103 # define CPU_TLB_DYN_MAX_BITS \ 104 MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS) 105 # else 106 # define CPU_TLB_DYN_MAX_BITS \ 107 MIN_CONST(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS) 108 # endif 109 # endif 110 111 /* Minimalized TLB entry for use by TCG fast path. */ 112 typedef struct CPUTLBEntry { 113 /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address 114 bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not 115 go directly to ram. 116 bit 3 : indicates that the entry is invalid 117 bit 2..0 : zero 118 */ 119 union { 120 struct { 121 target_ulong addr_read; 122 target_ulong addr_write; 123 target_ulong addr_code; 124 /* Addend to virtual address to get host address. IO accesses 125 use the corresponding iotlb value. */ 126 uintptr_t addend; 127 }; 128 /* padding to get a power of two size */ 129 uint8_t dummy[1 << CPU_TLB_ENTRY_BITS]; 130 }; 131 } CPUTLBEntry; 132 133 QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS)); 134 135 136 #endif /* !CONFIG_USER_ONLY && CONFIG_TCG */ 137 138 #if !defined(CONFIG_USER_ONLY) 139 /* 140 * The full TLB entry, which is not accessed by generated TCG code, 141 * so the layout is not as critical as that of CPUTLBEntry. This is 142 * also why we don't want to combine the two structs. 143 */ 144 typedef struct CPUTLBEntryFull { 145 /* 146 * @xlat_section contains: 147 * - in the lower TARGET_PAGE_BITS, a physical section number 148 * - with the lower TARGET_PAGE_BITS masked off, an offset which 149 * must be added to the virtual address to obtain: 150 * + the ram_addr_t of the target RAM (if the physical section 151 * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM) 152 * + the offset within the target MemoryRegion (otherwise) 153 */ 154 hwaddr xlat_section; 155 156 /* 157 * @phys_addr contains the physical address in the address space 158 * given by cpu_asidx_from_attrs(cpu, @attrs). 159 */ 160 hwaddr phys_addr; 161 162 /* @attrs contains the memory transaction attributes for the page. */ 163 MemTxAttrs attrs; 164 165 /* @prot contains the complete protections for the page. */ 166 uint8_t prot; 167 168 /* @lg_page_size contains the log2 of the page size. */ 169 uint8_t lg_page_size; 170 171 /* 172 * Allow target-specific additions to this structure. 173 * This may be used to cache items from the guest cpu 174 * page tables for later use by the implementation. 175 */ 176 #ifdef TARGET_PAGE_ENTRY_EXTRA 177 TARGET_PAGE_ENTRY_EXTRA 178 #endif 179 } CPUTLBEntryFull; 180 #endif /* !CONFIG_USER_ONLY */ 181 182 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) 183 /* 184 * Data elements that are per MMU mode, minus the bits accessed by 185 * the TCG fast path. 186 */ 187 typedef struct CPUTLBDesc { 188 /* 189 * Describe a region covering all of the large pages allocated 190 * into the tlb. When any page within this region is flushed, 191 * we must flush the entire tlb. The region is matched if 192 * (addr & large_page_mask) == large_page_addr. 193 */ 194 target_ulong large_page_addr; 195 target_ulong large_page_mask; 196 /* host time (in ns) at the beginning of the time window */ 197 int64_t window_begin_ns; 198 /* maximum number of entries observed in the window */ 199 size_t window_max_entries; 200 size_t n_used_entries; 201 /* The next index to use in the tlb victim table. */ 202 size_t vindex; 203 /* The tlb victim table, in two parts. */ 204 CPUTLBEntry vtable[CPU_VTLB_SIZE]; 205 CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE]; 206 CPUTLBEntryFull *fulltlb; 207 } CPUTLBDesc; 208 209 /* 210 * Data elements that are per MMU mode, accessed by the fast path. 211 * The structure is aligned to aid loading the pair with one insn. 212 */ 213 typedef struct CPUTLBDescFast { 214 /* Contains (n_entries - 1) << CPU_TLB_ENTRY_BITS */ 215 uintptr_t mask; 216 /* The array of tlb entries itself. */ 217 CPUTLBEntry *table; 218 } CPUTLBDescFast QEMU_ALIGNED(2 * sizeof(void *)); 219 220 /* 221 * Data elements that are shared between all MMU modes. 222 */ 223 typedef struct CPUTLBCommon { 224 /* Serialize updates to f.table and d.vtable, and others as noted. */ 225 QemuSpin lock; 226 /* 227 * Within dirty, for each bit N, modifications have been made to 228 * mmu_idx N since the last time that mmu_idx was flushed. 229 * Protected by tlb_c.lock. 230 */ 231 uint16_t dirty; 232 /* 233 * Statistics. These are not lock protected, but are read and 234 * written atomically. This allows the monitor to print a snapshot 235 * of the stats without interfering with the cpu. 236 */ 237 size_t full_flush_count; 238 size_t part_flush_count; 239 size_t elide_flush_count; 240 } CPUTLBCommon; 241 242 /* 243 * The entire softmmu tlb, for all MMU modes. 244 * The meaning of each of the MMU modes is defined in the target code. 245 * Since this is placed within CPUNegativeOffsetState, the smallest 246 * negative offsets are at the end of the struct. 247 */ 248 249 typedef struct CPUTLB { 250 CPUTLBCommon c; 251 CPUTLBDesc d[NB_MMU_MODES]; 252 CPUTLBDescFast f[NB_MMU_MODES]; 253 } CPUTLB; 254 255 /* This will be used by TCG backends to compute offsets. */ 256 #define TLB_MASK_TABLE_OFS(IDX) \ 257 ((int)offsetof(ArchCPU, neg.tlb.f[IDX]) - (int)offsetof(ArchCPU, env)) 258 259 #else 260 261 typedef struct CPUTLB { } CPUTLB; 262 263 #endif /* !CONFIG_USER_ONLY && CONFIG_TCG */ 264 265 /* 266 * This structure must be placed in ArchCPU immediately 267 * before CPUArchState, as a field named "neg". 268 */ 269 typedef struct CPUNegativeOffsetState { 270 CPUTLB tlb; 271 IcountDecr icount_decr; 272 } CPUNegativeOffsetState; 273 274 #endif 275