1 /* 2 * MIPS internal definitions and helpers 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2 or later. 5 * See the COPYING file in the top-level directory. 6 */ 7 8 #ifndef MIPS_INTERNAL_H 9 #define MIPS_INTERNAL_H 10 11 #include "exec/memattrs.h" 12 #ifdef CONFIG_TCG 13 #include "tcg/tcg-internal.h" 14 #endif 15 16 /* 17 * MMU types, the first four entries have the same layout as the 18 * CP0C0_MT field. 19 */ 20 enum mips_mmu_types { 21 MMU_TYPE_NONE = 0, 22 MMU_TYPE_R4000 = 1, /* Standard TLB */ 23 MMU_TYPE_BAT = 2, /* Block Address Translation */ 24 MMU_TYPE_FMT = 3, /* Fixed Mapping */ 25 MMU_TYPE_DVF = 4, /* Dual VTLB and FTLB */ 26 MMU_TYPE_R3000, 27 MMU_TYPE_R6000, 28 MMU_TYPE_R8000 29 }; 30 31 struct mips_def_t { 32 const char *name; 33 int32_t CP0_PRid; 34 int32_t CP0_Config0; 35 int32_t CP0_Config1; 36 int32_t CP0_Config2; 37 int32_t CP0_Config3; 38 int32_t CP0_Config4; 39 int32_t CP0_Config4_rw_bitmask; 40 int32_t CP0_Config5; 41 int32_t CP0_Config5_rw_bitmask; 42 int32_t CP0_Config6; 43 int32_t CP0_Config6_rw_bitmask; 44 int32_t CP0_Config7; 45 int32_t CP0_Config7_rw_bitmask; 46 target_ulong CP0_LLAddr_rw_bitmask; 47 int CP0_LLAddr_shift; 48 int32_t SYNCI_Step; 49 int32_t CCRes; 50 int32_t CP0_Status_rw_bitmask; 51 int32_t CP0_TCStatus_rw_bitmask; 52 int32_t CP0_SRSCtl; 53 int32_t CP1_fcr0; 54 int32_t CP1_fcr31_rw_bitmask; 55 int32_t CP1_fcr31; 56 int32_t MSAIR; 57 int32_t SEGBITS; 58 int32_t PABITS; 59 int32_t CP0_SRSConf0_rw_bitmask; 60 int32_t CP0_SRSConf0; 61 int32_t CP0_SRSConf1_rw_bitmask; 62 int32_t CP0_SRSConf1; 63 int32_t CP0_SRSConf2_rw_bitmask; 64 int32_t CP0_SRSConf2; 65 int32_t CP0_SRSConf3_rw_bitmask; 66 int32_t CP0_SRSConf3; 67 int32_t CP0_SRSConf4_rw_bitmask; 68 int32_t CP0_SRSConf4; 69 int32_t CP0_PageGrain_rw_bitmask; 70 int32_t CP0_PageGrain; 71 target_ulong CP0_EBaseWG_rw_bitmask; 72 uint64_t insn_flags; 73 enum mips_mmu_types mmu_type; 74 int32_t SAARP; 75 }; 76 77 extern const char regnames[32][4]; 78 extern const char fregnames[32][4]; 79 80 extern const struct mips_def_t mips_defs[]; 81 extern const int mips_defs_number; 82 83 int mips_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); 84 int mips_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 85 void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, 86 MMUAccessType access_type, 87 int mmu_idx, uintptr_t retaddr); 88 89 #define USEG_LIMIT ((target_ulong)(int32_t)0x7FFFFFFFUL) 90 #define KSEG0_BASE ((target_ulong)(int32_t)0x80000000UL) 91 #define KSEG1_BASE ((target_ulong)(int32_t)0xA0000000UL) 92 #define KSEG2_BASE ((target_ulong)(int32_t)0xC0000000UL) 93 #define KSEG3_BASE ((target_ulong)(int32_t)0xE0000000UL) 94 95 #define KVM_KSEG0_BASE ((target_ulong)(int32_t)0x40000000UL) 96 #define KVM_KSEG2_BASE ((target_ulong)(int32_t)0x60000000UL) 97 98 #if !defined(CONFIG_USER_ONLY) 99 100 enum { 101 TLBRET_XI = -6, 102 TLBRET_RI = -5, 103 TLBRET_DIRTY = -4, 104 TLBRET_INVALID = -3, 105 TLBRET_NOMATCH = -2, 106 TLBRET_BADADDR = -1, 107 TLBRET_MATCH = 0 108 }; 109 110 int get_physical_address(CPUMIPSState *env, hwaddr *physical, 111 int *prot, target_ulong real_address, 112 MMUAccessType access_type, int mmu_idx); 113 hwaddr mips_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); 114 115 typedef struct r4k_tlb_t r4k_tlb_t; 116 struct r4k_tlb_t { 117 target_ulong VPN; 118 uint32_t PageMask; 119 uint16_t ASID; 120 uint32_t MMID; 121 unsigned int G:1; 122 unsigned int C0:3; 123 unsigned int C1:3; 124 unsigned int V0:1; 125 unsigned int V1:1; 126 unsigned int D0:1; 127 unsigned int D1:1; 128 unsigned int XI0:1; 129 unsigned int XI1:1; 130 unsigned int RI0:1; 131 unsigned int RI1:1; 132 unsigned int EHINV:1; 133 uint64_t PFN[2]; 134 }; 135 136 struct CPUMIPSTLBContext { 137 uint32_t nb_tlb; 138 uint32_t tlb_in_use; 139 int (*map_address)(struct CPUMIPSState *env, hwaddr *physical, int *prot, 140 target_ulong address, MMUAccessType access_type); 141 void (*helper_tlbwi)(struct CPUMIPSState *env); 142 void (*helper_tlbwr)(struct CPUMIPSState *env); 143 void (*helper_tlbp)(struct CPUMIPSState *env); 144 void (*helper_tlbr)(struct CPUMIPSState *env); 145 void (*helper_tlbinv)(struct CPUMIPSState *env); 146 void (*helper_tlbinvf)(struct CPUMIPSState *env); 147 union { 148 struct { 149 r4k_tlb_t tlb[MIPS_TLB_MAX]; 150 } r4k; 151 } mmu; 152 }; 153 154 void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 155 vaddr addr, unsigned size, 156 MMUAccessType access_type, 157 int mmu_idx, MemTxAttrs attrs, 158 MemTxResult response, uintptr_t retaddr); 159 extern const VMStateDescription vmstate_mips_cpu; 160 161 #endif /* !CONFIG_USER_ONLY */ 162 163 #define cpu_signal_handler cpu_mips_signal_handler 164 165 static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState *env) 166 { 167 return (env->CP0_Status & (1 << CP0St_IE)) && 168 !(env->CP0_Status & (1 << CP0St_EXL)) && 169 !(env->CP0_Status & (1 << CP0St_ERL)) && 170 !(env->hflags & MIPS_HFLAG_DM) && 171 /* 172 * Note that the TCStatus IXMT field is initialized to zero, 173 * and only MT capable cores can set it to one. So we don't 174 * need to check for MT capabilities here. 175 */ 176 !(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_IXMT)); 177 } 178 179 /* Check if there is pending and not masked out interrupt */ 180 static inline bool cpu_mips_hw_interrupts_pending(CPUMIPSState *env) 181 { 182 int32_t pending; 183 int32_t status; 184 bool r; 185 186 pending = env->CP0_Cause & CP0Ca_IP_mask; 187 status = env->CP0_Status & CP0Ca_IP_mask; 188 189 if (env->CP0_Config3 & (1 << CP0C3_VEIC)) { 190 /* 191 * A MIPS configured with a vectorizing external interrupt controller 192 * will feed a vector into the Cause pending lines. The core treats 193 * the status lines as a vector level, not as individual masks. 194 */ 195 r = pending > status; 196 } else { 197 /* 198 * A MIPS configured with compatibility or VInt (Vectored Interrupts) 199 * treats the pending lines as individual interrupt lines, the status 200 * lines are individual masks. 201 */ 202 r = (pending & status) != 0; 203 } 204 return r; 205 } 206 207 void mips_tcg_init(void); 208 209 void msa_reset(CPUMIPSState *env); 210 211 /* cp0_timer.c */ 212 uint32_t cpu_mips_get_count(CPUMIPSState *env); 213 void cpu_mips_store_count(CPUMIPSState *env, uint32_t value); 214 void cpu_mips_store_compare(CPUMIPSState *env, uint32_t value); 215 void cpu_mips_start_count(CPUMIPSState *env); 216 void cpu_mips_stop_count(CPUMIPSState *env); 217 218 static inline void mips_env_set_pc(CPUMIPSState *env, target_ulong value) 219 { 220 env->active_tc.PC = value & ~(target_ulong)1; 221 if (value & 1) { 222 env->hflags |= MIPS_HFLAG_M16; 223 } else { 224 env->hflags &= ~(MIPS_HFLAG_M16); 225 } 226 } 227 228 static inline void restore_pamask(CPUMIPSState *env) 229 { 230 if (env->hflags & MIPS_HFLAG_ELPA) { 231 env->PAMask = (1ULL << env->PABITS) - 1; 232 } else { 233 env->PAMask = PAMASK_BASE; 234 } 235 } 236 237 static inline int mips_vpe_active(CPUMIPSState *env) 238 { 239 int active = 1; 240 241 /* Check that the VPE is enabled. */ 242 if (!(env->mvp->CP0_MVPControl & (1 << CP0MVPCo_EVP))) { 243 active = 0; 244 } 245 /* Check that the VPE is activated. */ 246 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))) { 247 active = 0; 248 } 249 250 /* 251 * Now verify that there are active thread contexts in the VPE. 252 * 253 * This assumes the CPU model will internally reschedule threads 254 * if the active one goes to sleep. If there are no threads available 255 * the active one will be in a sleeping state, and we can turn off 256 * the entire VPE. 257 */ 258 if (!(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_A))) { 259 /* TC is not activated. */ 260 active = 0; 261 } 262 if (env->active_tc.CP0_TCHalt & 1) { 263 /* TC is in halt state. */ 264 active = 0; 265 } 266 267 return active; 268 } 269 270 static inline int mips_vp_active(CPUMIPSState *env) 271 { 272 CPUState *other_cs = first_cpu; 273 274 /* Check if the VP disabled other VPs (which means the VP is enabled) */ 275 if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) { 276 return 1; 277 } 278 279 /* Check if the virtual processor is disabled due to a DVP */ 280 CPU_FOREACH(other_cs) { 281 MIPSCPU *other_cpu = MIPS_CPU(other_cs); 282 if ((&other_cpu->env != env) && 283 ((other_cpu->env.CP0_VPControl >> CP0VPCtl_DIS) & 1)) { 284 return 0; 285 } 286 } 287 return 1; 288 } 289 290 static inline void compute_hflags(CPUMIPSState *env) 291 { 292 env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 | 293 MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU | 294 MIPS_HFLAG_AWRAP | MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2 | 295 MIPS_HFLAG_DSP_R3 | MIPS_HFLAG_SBRI | MIPS_HFLAG_MSA | 296 MIPS_HFLAG_FRE | MIPS_HFLAG_ELPA | MIPS_HFLAG_ERL); 297 if (env->CP0_Status & (1 << CP0St_ERL)) { 298 env->hflags |= MIPS_HFLAG_ERL; 299 } 300 if (!(env->CP0_Status & (1 << CP0St_EXL)) && 301 !(env->CP0_Status & (1 << CP0St_ERL)) && 302 !(env->hflags & MIPS_HFLAG_DM)) { 303 env->hflags |= (env->CP0_Status >> CP0St_KSU) & 304 MIPS_HFLAG_KSU; 305 } 306 #if defined(TARGET_MIPS64) 307 if ((env->insn_flags & ISA_MIPS3) && 308 (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) || 309 (env->CP0_Status & (1 << CP0St_PX)) || 310 (env->CP0_Status & (1 << CP0St_UX)))) { 311 env->hflags |= MIPS_HFLAG_64; 312 } 313 314 if (!(env->insn_flags & ISA_MIPS3)) { 315 env->hflags |= MIPS_HFLAG_AWRAP; 316 } else if (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_UM) && 317 !(env->CP0_Status & (1 << CP0St_UX))) { 318 env->hflags |= MIPS_HFLAG_AWRAP; 319 } else if (env->insn_flags & ISA_MIPS_R6) { 320 /* Address wrapping for Supervisor and Kernel is specified in R6 */ 321 if ((((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_SM) && 322 !(env->CP0_Status & (1 << CP0St_SX))) || 323 (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_KM) && 324 !(env->CP0_Status & (1 << CP0St_KX)))) { 325 env->hflags |= MIPS_HFLAG_AWRAP; 326 } 327 } 328 #endif 329 if (((env->CP0_Status & (1 << CP0St_CU0)) && 330 !(env->insn_flags & ISA_MIPS_R6)) || 331 !(env->hflags & MIPS_HFLAG_KSU)) { 332 env->hflags |= MIPS_HFLAG_CP0; 333 } 334 if (env->CP0_Status & (1 << CP0St_CU1)) { 335 env->hflags |= MIPS_HFLAG_FPU; 336 } 337 if (env->CP0_Status & (1 << CP0St_FR)) { 338 env->hflags |= MIPS_HFLAG_F64; 339 } 340 if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_KM) && 341 (env->CP0_Config5 & (1 << CP0C5_SBRI))) { 342 env->hflags |= MIPS_HFLAG_SBRI; 343 } 344 if (env->insn_flags & ASE_DSP_R3) { 345 /* 346 * Our cpu supports DSP R3 ASE, so enable 347 * access to DSP R3 resources. 348 */ 349 if (env->CP0_Status & (1 << CP0St_MX)) { 350 env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2 | 351 MIPS_HFLAG_DSP_R3; 352 } 353 } else if (env->insn_flags & ASE_DSP_R2) { 354 /* 355 * Our cpu supports DSP R2 ASE, so enable 356 * access to DSP R2 resources. 357 */ 358 if (env->CP0_Status & (1 << CP0St_MX)) { 359 env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2; 360 } 361 362 } else if (env->insn_flags & ASE_DSP) { 363 /* 364 * Our cpu supports DSP ASE, so enable 365 * access to DSP resources. 366 */ 367 if (env->CP0_Status & (1 << CP0St_MX)) { 368 env->hflags |= MIPS_HFLAG_DSP; 369 } 370 371 } 372 if (env->insn_flags & ISA_MIPS_R2) { 373 if (env->active_fpu.fcr0 & (1 << FCR0_F64)) { 374 env->hflags |= MIPS_HFLAG_COP1X; 375 } 376 } else if (env->insn_flags & ISA_MIPS_R1) { 377 if (env->hflags & MIPS_HFLAG_64) { 378 env->hflags |= MIPS_HFLAG_COP1X; 379 } 380 } else if (env->insn_flags & ISA_MIPS4) { 381 /* 382 * All supported MIPS IV CPUs use the XX (CU3) to enable 383 * and disable the MIPS IV extensions to the MIPS III ISA. 384 * Some other MIPS IV CPUs ignore the bit, so the check here 385 * would be too restrictive for them. 386 */ 387 if (env->CP0_Status & (1U << CP0St_CU3)) { 388 env->hflags |= MIPS_HFLAG_COP1X; 389 } 390 } 391 if (ase_msa_available(env)) { 392 if (env->CP0_Config5 & (1 << CP0C5_MSAEn)) { 393 env->hflags |= MIPS_HFLAG_MSA; 394 } 395 } 396 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) { 397 if (env->CP0_Config5 & (1 << CP0C5_FRE)) { 398 env->hflags |= MIPS_HFLAG_FRE; 399 } 400 } 401 if (env->CP0_Config3 & (1 << CP0C3_LPA)) { 402 if (env->CP0_PageGrain & (1 << CP0PG_ELPA)) { 403 env->hflags |= MIPS_HFLAG_ELPA; 404 } 405 } 406 } 407 408 void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc); 409 void cpu_mips_store_status(CPUMIPSState *env, target_ulong val); 410 void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val); 411 412 #endif 413