1 /* 2 * MIPS internal definitions and helpers 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2 or later. 5 * See the COPYING file in the top-level directory. 6 */ 7 8 #ifndef MIPS_INTERNAL_H 9 #define MIPS_INTERNAL_H 10 11 #include "exec/memattrs.h" 12 #include "fpu/softfloat-helpers.h" 13 14 /* 15 * MMU types, the first four entries have the same layout as the 16 * CP0C0_MT field. 17 */ 18 enum mips_mmu_types { 19 MMU_TYPE_NONE = 0, 20 MMU_TYPE_R4000 = 1, /* Standard TLB */ 21 MMU_TYPE_BAT = 2, /* Block Address Translation */ 22 MMU_TYPE_FMT = 3, /* Fixed Mapping */ 23 MMU_TYPE_DVF = 4, /* Dual VTLB and FTLB */ 24 MMU_TYPE_R3000, 25 MMU_TYPE_R6000, 26 MMU_TYPE_R8000 27 }; 28 29 struct mips_def_t { 30 const char *name; 31 int32_t CP0_PRid; 32 int32_t CP0_Config0; 33 int32_t CP0_Config1; 34 int32_t CP0_Config2; 35 int32_t CP0_Config3; 36 int32_t CP0_Config4; 37 int32_t CP0_Config4_rw_bitmask; 38 int32_t CP0_Config5; 39 int32_t CP0_Config5_rw_bitmask; 40 int32_t CP0_Config6; 41 int32_t CP0_Config6_rw_bitmask; 42 int32_t CP0_Config7; 43 int32_t CP0_Config7_rw_bitmask; 44 target_ulong CP0_LLAddr_rw_bitmask; 45 int CP0_LLAddr_shift; 46 int32_t SYNCI_Step; 47 int32_t CCRes; 48 int32_t CP0_Status_rw_bitmask; 49 int32_t CP0_TCStatus_rw_bitmask; 50 int32_t CP0_SRSCtl; 51 int32_t CP1_fcr0; 52 int32_t CP1_fcr31_rw_bitmask; 53 int32_t CP1_fcr31; 54 int32_t MSAIR; 55 int32_t SEGBITS; 56 int32_t PABITS; 57 int32_t CP0_SRSConf0_rw_bitmask; 58 int32_t CP0_SRSConf0; 59 int32_t CP0_SRSConf1_rw_bitmask; 60 int32_t CP0_SRSConf1; 61 int32_t CP0_SRSConf2_rw_bitmask; 62 int32_t CP0_SRSConf2; 63 int32_t CP0_SRSConf3_rw_bitmask; 64 int32_t CP0_SRSConf3; 65 int32_t CP0_SRSConf4_rw_bitmask; 66 int32_t CP0_SRSConf4; 67 int32_t CP0_PageGrain_rw_bitmask; 68 int32_t CP0_PageGrain; 69 target_ulong CP0_EBaseWG_rw_bitmask; 70 uint64_t insn_flags; 71 enum mips_mmu_types mmu_type; 72 int32_t SAARP; 73 }; 74 75 extern const struct mips_def_t mips_defs[]; 76 extern const int mips_defs_number; 77 78 enum CPUMIPSMSADataFormat { 79 DF_BYTE = 0, 80 DF_HALF, 81 DF_WORD, 82 DF_DOUBLE 83 }; 84 85 void mips_cpu_do_interrupt(CPUState *cpu); 86 bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req); 87 void mips_cpu_dump_state(CPUState *cpu, FILE *f, int flags); 88 hwaddr mips_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); 89 int mips_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); 90 int mips_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 91 void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, 92 MMUAccessType access_type, 93 int mmu_idx, uintptr_t retaddr); 94 95 #if !defined(CONFIG_USER_ONLY) 96 97 typedef struct r4k_tlb_t r4k_tlb_t; 98 struct r4k_tlb_t { 99 target_ulong VPN; 100 uint32_t PageMask; 101 uint16_t ASID; 102 uint32_t MMID; 103 unsigned int G:1; 104 unsigned int C0:3; 105 unsigned int C1:3; 106 unsigned int V0:1; 107 unsigned int V1:1; 108 unsigned int D0:1; 109 unsigned int D1:1; 110 unsigned int XI0:1; 111 unsigned int XI1:1; 112 unsigned int RI0:1; 113 unsigned int RI1:1; 114 unsigned int EHINV:1; 115 uint64_t PFN[2]; 116 }; 117 118 struct CPUMIPSTLBContext { 119 uint32_t nb_tlb; 120 uint32_t tlb_in_use; 121 int (*map_address)(struct CPUMIPSState *env, hwaddr *physical, int *prot, 122 target_ulong address, int rw, int access_type); 123 void (*helper_tlbwi)(struct CPUMIPSState *env); 124 void (*helper_tlbwr)(struct CPUMIPSState *env); 125 void (*helper_tlbp)(struct CPUMIPSState *env); 126 void (*helper_tlbr)(struct CPUMIPSState *env); 127 void (*helper_tlbinv)(struct CPUMIPSState *env); 128 void (*helper_tlbinvf)(struct CPUMIPSState *env); 129 union { 130 struct { 131 r4k_tlb_t tlb[MIPS_TLB_MAX]; 132 } r4k; 133 } mmu; 134 }; 135 136 int no_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, 137 target_ulong address, int rw, int access_type); 138 int fixed_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, 139 target_ulong address, int rw, int access_type); 140 int r4k_map_address(CPUMIPSState *env, hwaddr *physical, int *prot, 141 target_ulong address, int rw, int access_type); 142 void r4k_helper_tlbwi(CPUMIPSState *env); 143 void r4k_helper_tlbwr(CPUMIPSState *env); 144 void r4k_helper_tlbp(CPUMIPSState *env); 145 void r4k_helper_tlbr(CPUMIPSState *env); 146 void r4k_helper_tlbinv(CPUMIPSState *env); 147 void r4k_helper_tlbinvf(CPUMIPSState *env); 148 void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra); 149 uint32_t cpu_mips_get_random(CPUMIPSState *env); 150 151 void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 152 vaddr addr, unsigned size, 153 MMUAccessType access_type, 154 int mmu_idx, MemTxAttrs attrs, 155 MemTxResult response, uintptr_t retaddr); 156 hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, 157 int rw); 158 #endif 159 160 #define cpu_signal_handler cpu_mips_signal_handler 161 162 #ifndef CONFIG_USER_ONLY 163 extern const VMStateDescription vmstate_mips_cpu; 164 #endif 165 166 static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState *env) 167 { 168 return (env->CP0_Status & (1 << CP0St_IE)) && 169 !(env->CP0_Status & (1 << CP0St_EXL)) && 170 !(env->CP0_Status & (1 << CP0St_ERL)) && 171 !(env->hflags & MIPS_HFLAG_DM) && 172 /* 173 * Note that the TCStatus IXMT field is initialized to zero, 174 * and only MT capable cores can set it to one. So we don't 175 * need to check for MT capabilities here. 176 */ 177 !(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_IXMT)); 178 } 179 180 /* Check if there is pending and not masked out interrupt */ 181 static inline bool cpu_mips_hw_interrupts_pending(CPUMIPSState *env) 182 { 183 int32_t pending; 184 int32_t status; 185 bool r; 186 187 pending = env->CP0_Cause & CP0Ca_IP_mask; 188 status = env->CP0_Status & CP0Ca_IP_mask; 189 190 if (env->CP0_Config3 & (1 << CP0C3_VEIC)) { 191 /* 192 * A MIPS configured with a vectorizing external interrupt controller 193 * will feed a vector into the Cause pending lines. The core treats 194 * the status lines as a vector level, not as individual masks. 195 */ 196 r = pending > status; 197 } else { 198 /* 199 * A MIPS configured with compatibility or VInt (Vectored Interrupts) 200 * treats the pending lines as individual interrupt lines, the status 201 * lines are individual masks. 202 */ 203 r = (pending & status) != 0; 204 } 205 return r; 206 } 207 208 void mips_tcg_init(void); 209 210 /* cp0_timer.c */ 211 uint32_t cpu_mips_get_count(CPUMIPSState *env); 212 void cpu_mips_store_count(CPUMIPSState *env, uint32_t value); 213 void cpu_mips_store_compare(CPUMIPSState *env, uint32_t value); 214 void cpu_mips_start_count(CPUMIPSState *env); 215 void cpu_mips_stop_count(CPUMIPSState *env); 216 217 /* helper.c */ 218 bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 219 MMUAccessType access_type, int mmu_idx, 220 bool probe, uintptr_t retaddr); 221 222 /* op_helper.c */ 223 uint32_t float_class_s(uint32_t arg, float_status *fst); 224 uint64_t float_class_d(uint64_t arg, float_status *fst); 225 226 extern const FloatRoundMode ieee_rm[4]; 227 228 void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask); 229 230 static inline void restore_rounding_mode(CPUMIPSState *env) 231 { 232 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], 233 &env->active_fpu.fp_status); 234 } 235 236 static inline void restore_flush_mode(CPUMIPSState *env) 237 { 238 set_flush_to_zero((env->active_fpu.fcr31 & (1 << FCR31_FS)) != 0, 239 &env->active_fpu.fp_status); 240 } 241 242 static inline void restore_snan_bit_mode(CPUMIPSState *env) 243 { 244 set_snan_bit_is_one((env->active_fpu.fcr31 & (1 << FCR31_NAN2008)) == 0, 245 &env->active_fpu.fp_status); 246 } 247 248 static inline void restore_fp_status(CPUMIPSState *env) 249 { 250 restore_rounding_mode(env); 251 restore_flush_mode(env); 252 restore_snan_bit_mode(env); 253 } 254 255 static inline void restore_msa_fp_status(CPUMIPSState *env) 256 { 257 float_status *status = &env->active_tc.msa_fp_status; 258 int rounding_mode = (env->active_tc.msacsr & MSACSR_RM_MASK) >> MSACSR_RM; 259 bool flush_to_zero = (env->active_tc.msacsr & MSACSR_FS_MASK) != 0; 260 261 set_float_rounding_mode(ieee_rm[rounding_mode], status); 262 set_flush_to_zero(flush_to_zero, status); 263 set_flush_inputs_to_zero(flush_to_zero, status); 264 } 265 266 static inline void restore_pamask(CPUMIPSState *env) 267 { 268 if (env->hflags & MIPS_HFLAG_ELPA) { 269 env->PAMask = (1ULL << env->PABITS) - 1; 270 } else { 271 env->PAMask = PAMASK_BASE; 272 } 273 } 274 275 static inline int mips_vpe_active(CPUMIPSState *env) 276 { 277 int active = 1; 278 279 /* Check that the VPE is enabled. */ 280 if (!(env->mvp->CP0_MVPControl & (1 << CP0MVPCo_EVP))) { 281 active = 0; 282 } 283 /* Check that the VPE is activated. */ 284 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))) { 285 active = 0; 286 } 287 288 /* 289 * Now verify that there are active thread contexts in the VPE. 290 * 291 * This assumes the CPU model will internally reschedule threads 292 * if the active one goes to sleep. If there are no threads available 293 * the active one will be in a sleeping state, and we can turn off 294 * the entire VPE. 295 */ 296 if (!(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_A))) { 297 /* TC is not activated. */ 298 active = 0; 299 } 300 if (env->active_tc.CP0_TCHalt & 1) { 301 /* TC is in halt state. */ 302 active = 0; 303 } 304 305 return active; 306 } 307 308 static inline int mips_vp_active(CPUMIPSState *env) 309 { 310 CPUState *other_cs = first_cpu; 311 312 /* Check if the VP disabled other VPs (which means the VP is enabled) */ 313 if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) { 314 return 1; 315 } 316 317 /* Check if the virtual processor is disabled due to a DVP */ 318 CPU_FOREACH(other_cs) { 319 MIPSCPU *other_cpu = MIPS_CPU(other_cs); 320 if ((&other_cpu->env != env) && 321 ((other_cpu->env.CP0_VPControl >> CP0VPCtl_DIS) & 1)) { 322 return 0; 323 } 324 } 325 return 1; 326 } 327 328 static inline void compute_hflags(CPUMIPSState *env) 329 { 330 env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 | 331 MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU | 332 MIPS_HFLAG_AWRAP | MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2 | 333 MIPS_HFLAG_DSP_R3 | MIPS_HFLAG_SBRI | MIPS_HFLAG_MSA | 334 MIPS_HFLAG_FRE | MIPS_HFLAG_ELPA | MIPS_HFLAG_ERL); 335 if (env->CP0_Status & (1 << CP0St_ERL)) { 336 env->hflags |= MIPS_HFLAG_ERL; 337 } 338 if (!(env->CP0_Status & (1 << CP0St_EXL)) && 339 !(env->CP0_Status & (1 << CP0St_ERL)) && 340 !(env->hflags & MIPS_HFLAG_DM)) { 341 env->hflags |= (env->CP0_Status >> CP0St_KSU) & 342 MIPS_HFLAG_KSU; 343 } 344 #if defined(TARGET_MIPS64) 345 if ((env->insn_flags & ISA_MIPS3) && 346 (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) || 347 (env->CP0_Status & (1 << CP0St_PX)) || 348 (env->CP0_Status & (1 << CP0St_UX)))) { 349 env->hflags |= MIPS_HFLAG_64; 350 } 351 352 if (!(env->insn_flags & ISA_MIPS3)) { 353 env->hflags |= MIPS_HFLAG_AWRAP; 354 } else if (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_UM) && 355 !(env->CP0_Status & (1 << CP0St_UX))) { 356 env->hflags |= MIPS_HFLAG_AWRAP; 357 } else if (env->insn_flags & ISA_MIPS64R6) { 358 /* Address wrapping for Supervisor and Kernel is specified in R6 */ 359 if ((((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_SM) && 360 !(env->CP0_Status & (1 << CP0St_SX))) || 361 (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_KM) && 362 !(env->CP0_Status & (1 << CP0St_KX)))) { 363 env->hflags |= MIPS_HFLAG_AWRAP; 364 } 365 } 366 #endif 367 if (((env->CP0_Status & (1 << CP0St_CU0)) && 368 !(env->insn_flags & ISA_MIPS32R6)) || 369 !(env->hflags & MIPS_HFLAG_KSU)) { 370 env->hflags |= MIPS_HFLAG_CP0; 371 } 372 if (env->CP0_Status & (1 << CP0St_CU1)) { 373 env->hflags |= MIPS_HFLAG_FPU; 374 } 375 if (env->CP0_Status & (1 << CP0St_FR)) { 376 env->hflags |= MIPS_HFLAG_F64; 377 } 378 if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_KM) && 379 (env->CP0_Config5 & (1 << CP0C5_SBRI))) { 380 env->hflags |= MIPS_HFLAG_SBRI; 381 } 382 if (env->insn_flags & ASE_DSP_R3) { 383 /* 384 * Our cpu supports DSP R3 ASE, so enable 385 * access to DSP R3 resources. 386 */ 387 if (env->CP0_Status & (1 << CP0St_MX)) { 388 env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2 | 389 MIPS_HFLAG_DSP_R3; 390 } 391 } else if (env->insn_flags & ASE_DSP_R2) { 392 /* 393 * Our cpu supports DSP R2 ASE, so enable 394 * access to DSP R2 resources. 395 */ 396 if (env->CP0_Status & (1 << CP0St_MX)) { 397 env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2; 398 } 399 400 } else if (env->insn_flags & ASE_DSP) { 401 /* 402 * Our cpu supports DSP ASE, so enable 403 * access to DSP resources. 404 */ 405 if (env->CP0_Status & (1 << CP0St_MX)) { 406 env->hflags |= MIPS_HFLAG_DSP; 407 } 408 409 } 410 if (env->insn_flags & ISA_MIPS32R2) { 411 if (env->active_fpu.fcr0 & (1 << FCR0_F64)) { 412 env->hflags |= MIPS_HFLAG_COP1X; 413 } 414 } else if (env->insn_flags & ISA_MIPS32) { 415 if (env->hflags & MIPS_HFLAG_64) { 416 env->hflags |= MIPS_HFLAG_COP1X; 417 } 418 } else if (env->insn_flags & ISA_MIPS4) { 419 /* 420 * All supported MIPS IV CPUs use the XX (CU3) to enable 421 * and disable the MIPS IV extensions to the MIPS III ISA. 422 * Some other MIPS IV CPUs ignore the bit, so the check here 423 * would be too restrictive for them. 424 */ 425 if (env->CP0_Status & (1U << CP0St_CU3)) { 426 env->hflags |= MIPS_HFLAG_COP1X; 427 } 428 } 429 if (env->insn_flags & ASE_MSA) { 430 if (env->CP0_Config5 & (1 << CP0C5_MSAEn)) { 431 env->hflags |= MIPS_HFLAG_MSA; 432 } 433 } 434 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) { 435 if (env->CP0_Config5 & (1 << CP0C5_FRE)) { 436 env->hflags |= MIPS_HFLAG_FRE; 437 } 438 } 439 if (env->CP0_Config3 & (1 << CP0C3_LPA)) { 440 if (env->CP0_PageGrain & (1 << CP0PG_ELPA)) { 441 env->hflags |= MIPS_HFLAG_ELPA; 442 } 443 } 444 } 445 446 void cpu_mips_tlb_flush(CPUMIPSState *env); 447 void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc); 448 void cpu_mips_store_status(CPUMIPSState *env, target_ulong val); 449 void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val); 450 451 void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env, uint32_t exception, 452 int error_code, uintptr_t pc); 453 454 static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env, 455 uint32_t exception, 456 uintptr_t pc) 457 { 458 do_raise_exception_err(env, exception, 0, pc); 459 } 460 461 #endif 462