1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * QEMU LoongArch CPU 4 * 5 * Copyright (c) 2021 Loongson Technology Corporation Limited 6 */ 7 8 #include "qemu/osdep.h" 9 #include "qemu/log.h" 10 #include "qemu/qemu-print.h" 11 #include "qapi/error.h" 12 #include "qemu/module.h" 13 #include "sysemu/qtest.h" 14 #include "sysemu/tcg.h" 15 #include "sysemu/kvm.h" 16 #include "kvm/kvm_loongarch.h" 17 #include "exec/exec-all.h" 18 #include "cpu.h" 19 #include "internals.h" 20 #include "fpu/softfloat-helpers.h" 21 #include "cpu-csr.h" 22 #ifndef CONFIG_USER_ONLY 23 #include "sysemu/reset.h" 24 #endif 25 #include "vec.h" 26 #ifdef CONFIG_KVM 27 #include <linux/kvm.h> 28 #endif 29 #ifdef CONFIG_TCG 30 #include "exec/cpu_ldst.h" 31 #include "tcg/tcg.h" 32 #endif 33 34 const char * const regnames[32] = { 35 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 36 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 37 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 38 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", 39 }; 40 41 const char * const fregnames[32] = { 42 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", 43 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", 44 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", 45 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", 46 }; 47 48 struct TypeExcp { 49 int32_t exccode; 50 const char * const name; 51 }; 52 53 static const struct TypeExcp excp_names[] = { 54 {EXCCODE_INT, "Interrupt"}, 55 {EXCCODE_PIL, "Page invalid exception for load"}, 56 {EXCCODE_PIS, "Page invalid exception for store"}, 57 {EXCCODE_PIF, "Page invalid exception for fetch"}, 58 {EXCCODE_PME, "Page modified exception"}, 59 {EXCCODE_PNR, "Page Not Readable exception"}, 60 {EXCCODE_PNX, "Page Not Executable exception"}, 61 {EXCCODE_PPI, "Page Privilege error"}, 62 {EXCCODE_ADEF, "Address error for instruction fetch"}, 63 {EXCCODE_ADEM, "Address error for Memory access"}, 64 {EXCCODE_SYS, "Syscall"}, 65 {EXCCODE_BRK, "Break"}, 66 {EXCCODE_INE, "Instruction Non-Existent"}, 67 {EXCCODE_IPE, "Instruction privilege error"}, 68 {EXCCODE_FPD, "Floating Point Disabled"}, 69 {EXCCODE_FPE, "Floating Point Exception"}, 70 {EXCCODE_DBP, "Debug breakpoint"}, 71 {EXCCODE_BCE, "Bound Check Exception"}, 72 {EXCCODE_SXD, "128 bit vector instructions Disable exception"}, 73 {EXCCODE_ASXD, "256 bit vector instructions Disable exception"}, 74 {EXCP_HLT, "EXCP_HLT"}, 75 }; 76 77 const char *loongarch_exception_name(int32_t exception) 78 { 79 int i; 80 81 for (i = 0; i < ARRAY_SIZE(excp_names); i++) { 82 if (excp_names[i].exccode == exception) { 83 return excp_names[i].name; 84 } 85 } 86 return "Unknown"; 87 } 88 89 void G_NORETURN do_raise_exception(CPULoongArchState *env, 90 uint32_t exception, 91 uintptr_t pc) 92 { 93 CPUState *cs = env_cpu(env); 94 95 qemu_log_mask(CPU_LOG_INT, "%s: expection: %d (%s)\n", 96 __func__, 97 exception, 98 loongarch_exception_name(exception)); 99 cs->exception_index = exception; 100 101 cpu_loop_exit_restore(cs, pc); 102 } 103 104 static void loongarch_cpu_set_pc(CPUState *cs, vaddr value) 105 { 106 set_pc(cpu_env(cs), value); 107 } 108 109 static vaddr loongarch_cpu_get_pc(CPUState *cs) 110 { 111 return cpu_env(cs)->pc; 112 } 113 114 #ifndef CONFIG_USER_ONLY 115 #include "hw/loongarch/virt.h" 116 117 void loongarch_cpu_set_irq(void *opaque, int irq, int level) 118 { 119 LoongArchCPU *cpu = opaque; 120 CPULoongArchState *env = &cpu->env; 121 CPUState *cs = CPU(cpu); 122 123 if (irq < 0 || irq >= N_IRQS) { 124 return; 125 } 126 127 if (kvm_enabled()) { 128 kvm_loongarch_set_interrupt(cpu, irq, level); 129 } else if (tcg_enabled()) { 130 env->CSR_ESTAT = deposit64(env->CSR_ESTAT, irq, 1, level != 0); 131 if (FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS)) { 132 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 133 } else { 134 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 135 } 136 } 137 } 138 139 static inline bool cpu_loongarch_hw_interrupts_enabled(CPULoongArchState *env) 140 { 141 bool ret = 0; 142 143 ret = (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE) && 144 !(FIELD_EX64(env->CSR_DBG, CSR_DBG, DST))); 145 146 return ret; 147 } 148 149 /* Check if there is pending and not masked out interrupt */ 150 static inline bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env) 151 { 152 uint32_t pending; 153 uint32_t status; 154 155 pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS); 156 status = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE); 157 158 return (pending & status) != 0; 159 } 160 #endif 161 162 #ifdef CONFIG_TCG 163 #ifndef CONFIG_USER_ONLY 164 static void loongarch_cpu_do_interrupt(CPUState *cs) 165 { 166 CPULoongArchState *env = cpu_env(cs); 167 bool update_badinstr = 1; 168 int cause = -1; 169 bool tlbfill = FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR); 170 uint32_t vec_size = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, VS); 171 172 if (cs->exception_index != EXCCODE_INT) { 173 qemu_log_mask(CPU_LOG_INT, 174 "%s enter: pc " TARGET_FMT_lx " ERA " TARGET_FMT_lx 175 " TLBRERA " TARGET_FMT_lx " exception: %d (%s)\n", 176 __func__, env->pc, env->CSR_ERA, env->CSR_TLBRERA, 177 cs->exception_index, 178 loongarch_exception_name(cs->exception_index)); 179 } 180 181 switch (cs->exception_index) { 182 case EXCCODE_DBP: 183 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DCL, 1); 184 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, ECODE, 0xC); 185 goto set_DERA; 186 set_DERA: 187 env->CSR_DERA = env->pc; 188 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DST, 1); 189 set_pc(env, env->CSR_EENTRY + 0x480); 190 break; 191 case EXCCODE_INT: 192 if (FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) { 193 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DEI, 1); 194 goto set_DERA; 195 } 196 QEMU_FALLTHROUGH; 197 case EXCCODE_PIF: 198 case EXCCODE_ADEF: 199 cause = cs->exception_index; 200 update_badinstr = 0; 201 break; 202 case EXCCODE_SYS: 203 case EXCCODE_BRK: 204 case EXCCODE_INE: 205 case EXCCODE_IPE: 206 case EXCCODE_FPD: 207 case EXCCODE_FPE: 208 case EXCCODE_SXD: 209 case EXCCODE_ASXD: 210 env->CSR_BADV = env->pc; 211 QEMU_FALLTHROUGH; 212 case EXCCODE_BCE: 213 case EXCCODE_ADEM: 214 case EXCCODE_PIL: 215 case EXCCODE_PIS: 216 case EXCCODE_PME: 217 case EXCCODE_PNR: 218 case EXCCODE_PNX: 219 case EXCCODE_PPI: 220 cause = cs->exception_index; 221 break; 222 default: 223 qemu_log("Error: exception(%d) has not been supported\n", 224 cs->exception_index); 225 abort(); 226 } 227 228 if (update_badinstr) { 229 env->CSR_BADI = cpu_ldl_code(env, env->pc); 230 } 231 232 /* Save PLV and IE */ 233 if (tlbfill) { 234 env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PPLV, 235 FIELD_EX64(env->CSR_CRMD, 236 CSR_CRMD, PLV)); 237 env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PIE, 238 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE)); 239 /* set the DA mode */ 240 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1); 241 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0); 242 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, 243 PC, (env->pc >> 2)); 244 } else { 245 env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ECODE, 246 EXCODE_MCODE(cause)); 247 env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ESUBCODE, 248 EXCODE_SUBCODE(cause)); 249 env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PPLV, 250 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV)); 251 env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PIE, 252 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE)); 253 env->CSR_ERA = env->pc; 254 } 255 256 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0); 257 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0); 258 259 if (vec_size) { 260 vec_size = (1 << vec_size) * 4; 261 } 262 263 if (cs->exception_index == EXCCODE_INT) { 264 /* Interrupt */ 265 uint32_t vector = 0; 266 uint32_t pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS); 267 pending &= FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE); 268 269 /* Find the highest-priority interrupt. */ 270 vector = 31 - clz32(pending); 271 set_pc(env, env->CSR_EENTRY + \ 272 (EXCCODE_EXTERNAL_INT + vector) * vec_size); 273 qemu_log_mask(CPU_LOG_INT, 274 "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx 275 " cause %d\n" " A " TARGET_FMT_lx " D " 276 TARGET_FMT_lx " vector = %d ExC " TARGET_FMT_lx "ExS" 277 TARGET_FMT_lx "\n", 278 __func__, env->pc, env->CSR_ERA, 279 cause, env->CSR_BADV, env->CSR_DERA, vector, 280 env->CSR_ECFG, env->CSR_ESTAT); 281 } else { 282 if (tlbfill) { 283 set_pc(env, env->CSR_TLBRENTRY); 284 } else { 285 set_pc(env, env->CSR_EENTRY + EXCODE_MCODE(cause) * vec_size); 286 } 287 qemu_log_mask(CPU_LOG_INT, 288 "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx 289 " cause %d%s\n, ESTAT " TARGET_FMT_lx 290 " EXCFG " TARGET_FMT_lx " BADVA " TARGET_FMT_lx 291 "BADI " TARGET_FMT_lx " SYS_NUM " TARGET_FMT_lu 292 " cpu %d asid " TARGET_FMT_lx "\n", __func__, env->pc, 293 tlbfill ? env->CSR_TLBRERA : env->CSR_ERA, 294 cause, tlbfill ? "(refill)" : "", env->CSR_ESTAT, 295 env->CSR_ECFG, 296 tlbfill ? env->CSR_TLBRBADV : env->CSR_BADV, 297 env->CSR_BADI, env->gpr[11], cs->cpu_index, 298 env->CSR_ASID); 299 } 300 cs->exception_index = -1; 301 } 302 303 static void loongarch_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 304 vaddr addr, unsigned size, 305 MMUAccessType access_type, 306 int mmu_idx, MemTxAttrs attrs, 307 MemTxResult response, 308 uintptr_t retaddr) 309 { 310 CPULoongArchState *env = cpu_env(cs); 311 312 if (access_type == MMU_INST_FETCH) { 313 do_raise_exception(env, EXCCODE_ADEF, retaddr); 314 } else { 315 do_raise_exception(env, EXCCODE_ADEM, retaddr); 316 } 317 } 318 319 static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 320 { 321 if (interrupt_request & CPU_INTERRUPT_HARD) { 322 CPULoongArchState *env = cpu_env(cs); 323 324 if (cpu_loongarch_hw_interrupts_enabled(env) && 325 cpu_loongarch_hw_interrupts_pending(env)) { 326 /* Raise it */ 327 cs->exception_index = EXCCODE_INT; 328 loongarch_cpu_do_interrupt(cs); 329 return true; 330 } 331 } 332 return false; 333 } 334 #endif 335 336 static void loongarch_cpu_synchronize_from_tb(CPUState *cs, 337 const TranslationBlock *tb) 338 { 339 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 340 set_pc(cpu_env(cs), tb->pc); 341 } 342 343 static void loongarch_restore_state_to_opc(CPUState *cs, 344 const TranslationBlock *tb, 345 const uint64_t *data) 346 { 347 set_pc(cpu_env(cs), data[0]); 348 } 349 #endif /* CONFIG_TCG */ 350 351 static bool loongarch_cpu_has_work(CPUState *cs) 352 { 353 #ifdef CONFIG_USER_ONLY 354 return true; 355 #else 356 bool has_work = false; 357 358 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && 359 cpu_loongarch_hw_interrupts_pending(cpu_env(cs))) { 360 has_work = true; 361 } 362 363 return has_work; 364 #endif 365 } 366 367 static int loongarch_cpu_mmu_index(CPUState *cs, bool ifetch) 368 { 369 CPULoongArchState *env = cpu_env(cs); 370 371 if (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG)) { 372 return FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV); 373 } 374 return MMU_DA_IDX; 375 } 376 377 static void loongarch_la464_initfn(Object *obj) 378 { 379 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 380 CPULoongArchState *env = &cpu->env; 381 int i; 382 383 for (i = 0; i < 21; i++) { 384 env->cpucfg[i] = 0x0; 385 } 386 387 cpu->dtb_compatible = "loongarch,Loongson-3A5000"; 388 env->cpucfg[0] = 0x14c010; /* PRID */ 389 390 uint32_t data = 0; 391 data = FIELD_DP32(data, CPUCFG1, ARCH, 2); 392 data = FIELD_DP32(data, CPUCFG1, PGMMU, 1); 393 data = FIELD_DP32(data, CPUCFG1, IOCSR, 1); 394 data = FIELD_DP32(data, CPUCFG1, PALEN, 0x2f); 395 data = FIELD_DP32(data, CPUCFG1, VALEN, 0x2f); 396 data = FIELD_DP32(data, CPUCFG1, UAL, 1); 397 data = FIELD_DP32(data, CPUCFG1, RI, 1); 398 data = FIELD_DP32(data, CPUCFG1, EP, 1); 399 data = FIELD_DP32(data, CPUCFG1, RPLV, 1); 400 data = FIELD_DP32(data, CPUCFG1, HP, 1); 401 data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1); 402 env->cpucfg[1] = data; 403 404 data = 0; 405 data = FIELD_DP32(data, CPUCFG2, FP, 1); 406 data = FIELD_DP32(data, CPUCFG2, FP_SP, 1); 407 data = FIELD_DP32(data, CPUCFG2, FP_DP, 1); 408 data = FIELD_DP32(data, CPUCFG2, FP_VER, 1); 409 data = FIELD_DP32(data, CPUCFG2, LSX, 1), 410 data = FIELD_DP32(data, CPUCFG2, LASX, 1), 411 data = FIELD_DP32(data, CPUCFG2, LLFTP, 1); 412 data = FIELD_DP32(data, CPUCFG2, LLFTP_VER, 1); 413 data = FIELD_DP32(data, CPUCFG2, LSPW, 1); 414 data = FIELD_DP32(data, CPUCFG2, LAM, 1); 415 env->cpucfg[2] = data; 416 417 env->cpucfg[4] = 100 * 1000 * 1000; /* Crystal frequency */ 418 419 data = 0; 420 data = FIELD_DP32(data, CPUCFG5, CC_MUL, 1); 421 data = FIELD_DP32(data, CPUCFG5, CC_DIV, 1); 422 env->cpucfg[5] = data; 423 424 data = 0; 425 data = FIELD_DP32(data, CPUCFG16, L1_IUPRE, 1); 426 data = FIELD_DP32(data, CPUCFG16, L1_DPRE, 1); 427 data = FIELD_DP32(data, CPUCFG16, L2_IUPRE, 1); 428 data = FIELD_DP32(data, CPUCFG16, L2_IUUNIFY, 1); 429 data = FIELD_DP32(data, CPUCFG16, L2_IUPRIV, 1); 430 data = FIELD_DP32(data, CPUCFG16, L3_IUPRE, 1); 431 data = FIELD_DP32(data, CPUCFG16, L3_IUUNIFY, 1); 432 data = FIELD_DP32(data, CPUCFG16, L3_IUINCL, 1); 433 env->cpucfg[16] = data; 434 435 data = 0; 436 data = FIELD_DP32(data, CPUCFG17, L1IU_WAYS, 3); 437 data = FIELD_DP32(data, CPUCFG17, L1IU_SETS, 8); 438 data = FIELD_DP32(data, CPUCFG17, L1IU_SIZE, 6); 439 env->cpucfg[17] = data; 440 441 data = 0; 442 data = FIELD_DP32(data, CPUCFG18, L1D_WAYS, 3); 443 data = FIELD_DP32(data, CPUCFG18, L1D_SETS, 8); 444 data = FIELD_DP32(data, CPUCFG18, L1D_SIZE, 6); 445 env->cpucfg[18] = data; 446 447 data = 0; 448 data = FIELD_DP32(data, CPUCFG19, L2IU_WAYS, 15); 449 data = FIELD_DP32(data, CPUCFG19, L2IU_SETS, 8); 450 data = FIELD_DP32(data, CPUCFG19, L2IU_SIZE, 6); 451 env->cpucfg[19] = data; 452 453 data = 0; 454 data = FIELD_DP32(data, CPUCFG20, L3IU_WAYS, 15); 455 data = FIELD_DP32(data, CPUCFG20, L3IU_SETS, 14); 456 data = FIELD_DP32(data, CPUCFG20, L3IU_SIZE, 6); 457 env->cpucfg[20] = data; 458 459 env->CSR_ASID = FIELD_DP64(0, CSR_ASID, ASIDBITS, 0xa); 460 loongarch_cpu_post_init(obj); 461 } 462 463 static void loongarch_la132_initfn(Object *obj) 464 { 465 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 466 CPULoongArchState *env = &cpu->env; 467 468 int i; 469 470 for (i = 0; i < 21; i++) { 471 env->cpucfg[i] = 0x0; 472 } 473 474 cpu->dtb_compatible = "loongarch,Loongson-1C103"; 475 env->cpucfg[0] = 0x148042; /* PRID */ 476 477 uint32_t data = 0; 478 data = FIELD_DP32(data, CPUCFG1, ARCH, 1); /* LA32 */ 479 data = FIELD_DP32(data, CPUCFG1, PGMMU, 1); 480 data = FIELD_DP32(data, CPUCFG1, IOCSR, 1); 481 data = FIELD_DP32(data, CPUCFG1, PALEN, 0x1f); /* 32 bits */ 482 data = FIELD_DP32(data, CPUCFG1, VALEN, 0x1f); /* 32 bits */ 483 data = FIELD_DP32(data, CPUCFG1, UAL, 1); 484 data = FIELD_DP32(data, CPUCFG1, RI, 0); 485 data = FIELD_DP32(data, CPUCFG1, EP, 0); 486 data = FIELD_DP32(data, CPUCFG1, RPLV, 0); 487 data = FIELD_DP32(data, CPUCFG1, HP, 1); 488 data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1); 489 env->cpucfg[1] = data; 490 } 491 492 static void loongarch_max_initfn(Object *obj) 493 { 494 /* '-cpu max' for TCG: we use cpu la464. */ 495 loongarch_la464_initfn(obj); 496 } 497 498 static void loongarch_cpu_reset_hold(Object *obj) 499 { 500 CPUState *cs = CPU(obj); 501 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(obj); 502 CPULoongArchState *env = cpu_env(cs); 503 504 if (lacc->parent_phases.hold) { 505 lacc->parent_phases.hold(obj); 506 } 507 508 env->fcsr0_mask = FCSR0_M1 | FCSR0_M2 | FCSR0_M3; 509 env->fcsr0 = 0x0; 510 511 int n; 512 /* Set csr registers value after reset */ 513 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0); 514 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0); 515 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1); 516 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0); 517 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATF, 1); 518 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATM, 1); 519 520 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, FPE, 0); 521 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, SXE, 0); 522 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, ASXE, 0); 523 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, BTE, 0); 524 525 env->CSR_MISC = 0; 526 527 env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, VS, 0); 528 env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, LIE, 0); 529 530 env->CSR_ESTAT = env->CSR_ESTAT & (~MAKE_64BIT_MASK(0, 2)); 531 env->CSR_RVACFG = FIELD_DP64(env->CSR_RVACFG, CSR_RVACFG, RBITS, 0); 532 env->CSR_CPUID = cs->cpu_index; 533 env->CSR_TCFG = FIELD_DP64(env->CSR_TCFG, CSR_TCFG, EN, 0); 534 env->CSR_LLBCTL = FIELD_DP64(env->CSR_LLBCTL, CSR_LLBCTL, KLO, 0); 535 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 0); 536 env->CSR_MERRCTL = FIELD_DP64(env->CSR_MERRCTL, CSR_MERRCTL, ISMERR, 0); 537 env->CSR_TID = cs->cpu_index; 538 539 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, TLB_TYPE, 2); 540 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, MTLB_ENTRY, 63); 541 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_WAYS, 7); 542 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_SETS, 8); 543 544 for (n = 0; n < 4; n++) { 545 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV0, 0); 546 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV1, 0); 547 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV2, 0); 548 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV3, 0); 549 } 550 551 #ifndef CONFIG_USER_ONLY 552 env->pc = 0x1c000000; 553 memset(env->tlb, 0, sizeof(env->tlb)); 554 if (kvm_enabled()) { 555 kvm_arch_reset_vcpu(env); 556 } 557 #endif 558 559 #ifdef CONFIG_TCG 560 restore_fp_status(env); 561 #endif 562 cs->exception_index = -1; 563 } 564 565 static void loongarch_cpu_disas_set_info(CPUState *s, disassemble_info *info) 566 { 567 info->print_insn = print_insn_loongarch; 568 } 569 570 static void loongarch_cpu_realizefn(DeviceState *dev, Error **errp) 571 { 572 CPUState *cs = CPU(dev); 573 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(dev); 574 Error *local_err = NULL; 575 576 cpu_exec_realizefn(cs, &local_err); 577 if (local_err != NULL) { 578 error_propagate(errp, local_err); 579 return; 580 } 581 582 loongarch_cpu_register_gdb_regs_for_features(cs); 583 584 cpu_reset(cs); 585 qemu_init_vcpu(cs); 586 587 lacc->parent_realize(dev, errp); 588 } 589 590 static bool loongarch_get_lsx(Object *obj, Error **errp) 591 { 592 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 593 bool ret; 594 595 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) { 596 ret = true; 597 } else { 598 ret = false; 599 } 600 return ret; 601 } 602 603 static void loongarch_set_lsx(Object *obj, bool value, Error **errp) 604 { 605 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 606 607 if (value) { 608 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 1); 609 } else { 610 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 0); 611 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 0); 612 } 613 } 614 615 static bool loongarch_get_lasx(Object *obj, Error **errp) 616 { 617 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 618 bool ret; 619 620 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) { 621 ret = true; 622 } else { 623 ret = false; 624 } 625 return ret; 626 } 627 628 static void loongarch_set_lasx(Object *obj, bool value, Error **errp) 629 { 630 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 631 632 if (value) { 633 if (!FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) { 634 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 1); 635 } 636 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 1); 637 } else { 638 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 0); 639 } 640 } 641 642 void loongarch_cpu_post_init(Object *obj) 643 { 644 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 645 646 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) { 647 object_property_add_bool(obj, "lsx", loongarch_get_lsx, 648 loongarch_set_lsx); 649 } 650 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) { 651 object_property_add_bool(obj, "lasx", loongarch_get_lasx, 652 loongarch_set_lasx); 653 } 654 } 655 656 static void loongarch_cpu_init(Object *obj) 657 { 658 #ifndef CONFIG_USER_ONLY 659 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 660 661 qdev_init_gpio_in(DEVICE(cpu), loongarch_cpu_set_irq, N_IRQS); 662 #ifdef CONFIG_TCG 663 timer_init_ns(&cpu->timer, QEMU_CLOCK_VIRTUAL, 664 &loongarch_constant_timer_cb, cpu); 665 #endif 666 #endif 667 } 668 669 static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model) 670 { 671 ObjectClass *oc; 672 673 oc = object_class_by_name(cpu_model); 674 if (!oc) { 675 g_autofree char *typename 676 = g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model); 677 oc = object_class_by_name(typename); 678 } 679 680 return oc; 681 } 682 683 void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) 684 { 685 CPULoongArchState *env = cpu_env(cs); 686 int i; 687 688 qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc); 689 qemu_fprintf(f, " FCSR0 0x%08x fp_status 0x%02x\n", env->fcsr0, 690 get_float_exception_flags(&env->fp_status)); 691 692 /* gpr */ 693 for (i = 0; i < 32; i++) { 694 if ((i & 3) == 0) { 695 qemu_fprintf(f, " GPR%02d:", i); 696 } 697 qemu_fprintf(f, " %s %016" PRIx64, regnames[i], env->gpr[i]); 698 if ((i & 3) == 3) { 699 qemu_fprintf(f, "\n"); 700 } 701 } 702 703 qemu_fprintf(f, "CRMD=%016" PRIx64 "\n", env->CSR_CRMD); 704 qemu_fprintf(f, "PRMD=%016" PRIx64 "\n", env->CSR_PRMD); 705 qemu_fprintf(f, "EUEN=%016" PRIx64 "\n", env->CSR_EUEN); 706 qemu_fprintf(f, "ESTAT=%016" PRIx64 "\n", env->CSR_ESTAT); 707 qemu_fprintf(f, "ERA=%016" PRIx64 "\n", env->CSR_ERA); 708 qemu_fprintf(f, "BADV=%016" PRIx64 "\n", env->CSR_BADV); 709 qemu_fprintf(f, "BADI=%016" PRIx64 "\n", env->CSR_BADI); 710 qemu_fprintf(f, "EENTRY=%016" PRIx64 "\n", env->CSR_EENTRY); 711 qemu_fprintf(f, "PRCFG1=%016" PRIx64 ", PRCFG2=%016" PRIx64 "," 712 " PRCFG3=%016" PRIx64 "\n", 713 env->CSR_PRCFG1, env->CSR_PRCFG3, env->CSR_PRCFG3); 714 qemu_fprintf(f, "TLBRENTRY=%016" PRIx64 "\n", env->CSR_TLBRENTRY); 715 qemu_fprintf(f, "TLBRBADV=%016" PRIx64 "\n", env->CSR_TLBRBADV); 716 qemu_fprintf(f, "TLBRERA=%016" PRIx64 "\n", env->CSR_TLBRERA); 717 qemu_fprintf(f, "TCFG=%016" PRIx64 "\n", env->CSR_TCFG); 718 qemu_fprintf(f, "TVAL=%016" PRIx64 "\n", env->CSR_TVAL); 719 720 /* fpr */ 721 if (flags & CPU_DUMP_FPU) { 722 for (i = 0; i < 32; i++) { 723 qemu_fprintf(f, " %s %016" PRIx64, fregnames[i], env->fpr[i].vreg.D(0)); 724 if ((i & 3) == 3) { 725 qemu_fprintf(f, "\n"); 726 } 727 } 728 } 729 } 730 731 #ifdef CONFIG_TCG 732 #include "hw/core/tcg-cpu-ops.h" 733 734 static const TCGCPUOps loongarch_tcg_ops = { 735 .initialize = loongarch_translate_init, 736 .synchronize_from_tb = loongarch_cpu_synchronize_from_tb, 737 .restore_state_to_opc = loongarch_restore_state_to_opc, 738 739 #ifndef CONFIG_USER_ONLY 740 .tlb_fill = loongarch_cpu_tlb_fill, 741 .cpu_exec_interrupt = loongarch_cpu_exec_interrupt, 742 .do_interrupt = loongarch_cpu_do_interrupt, 743 .do_transaction_failed = loongarch_cpu_do_transaction_failed, 744 #endif 745 }; 746 #endif /* CONFIG_TCG */ 747 748 #ifndef CONFIG_USER_ONLY 749 #include "hw/core/sysemu-cpu-ops.h" 750 751 static const struct SysemuCPUOps loongarch_sysemu_ops = { 752 .get_phys_page_debug = loongarch_cpu_get_phys_page_debug, 753 }; 754 755 static int64_t loongarch_cpu_get_arch_id(CPUState *cs) 756 { 757 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 758 759 return cpu->phy_id; 760 } 761 #endif 762 763 static void loongarch_cpu_class_init(ObjectClass *c, void *data) 764 { 765 LoongArchCPUClass *lacc = LOONGARCH_CPU_CLASS(c); 766 CPUClass *cc = CPU_CLASS(c); 767 DeviceClass *dc = DEVICE_CLASS(c); 768 ResettableClass *rc = RESETTABLE_CLASS(c); 769 770 device_class_set_parent_realize(dc, loongarch_cpu_realizefn, 771 &lacc->parent_realize); 772 resettable_class_set_parent_phases(rc, NULL, loongarch_cpu_reset_hold, NULL, 773 &lacc->parent_phases); 774 775 cc->class_by_name = loongarch_cpu_class_by_name; 776 cc->has_work = loongarch_cpu_has_work; 777 cc->mmu_index = loongarch_cpu_mmu_index; 778 cc->dump_state = loongarch_cpu_dump_state; 779 cc->set_pc = loongarch_cpu_set_pc; 780 cc->get_pc = loongarch_cpu_get_pc; 781 #ifndef CONFIG_USER_ONLY 782 cc->get_arch_id = loongarch_cpu_get_arch_id; 783 dc->vmsd = &vmstate_loongarch_cpu; 784 cc->sysemu_ops = &loongarch_sysemu_ops; 785 #endif 786 cc->disas_set_info = loongarch_cpu_disas_set_info; 787 cc->gdb_read_register = loongarch_cpu_gdb_read_register; 788 cc->gdb_write_register = loongarch_cpu_gdb_write_register; 789 cc->gdb_stop_before_watchpoint = true; 790 791 #ifdef CONFIG_TCG 792 cc->tcg_ops = &loongarch_tcg_ops; 793 #endif 794 } 795 796 static const gchar *loongarch32_gdb_arch_name(CPUState *cs) 797 { 798 return "loongarch32"; 799 } 800 801 static void loongarch32_cpu_class_init(ObjectClass *c, void *data) 802 { 803 CPUClass *cc = CPU_CLASS(c); 804 805 cc->gdb_core_xml_file = "loongarch-base32.xml"; 806 cc->gdb_arch_name = loongarch32_gdb_arch_name; 807 } 808 809 static const gchar *loongarch64_gdb_arch_name(CPUState *cs) 810 { 811 return "loongarch64"; 812 } 813 814 static void loongarch64_cpu_class_init(ObjectClass *c, void *data) 815 { 816 CPUClass *cc = CPU_CLASS(c); 817 818 cc->gdb_core_xml_file = "loongarch-base64.xml"; 819 cc->gdb_arch_name = loongarch64_gdb_arch_name; 820 } 821 822 #define DEFINE_LOONGARCH_CPU_TYPE(size, model, initfn) \ 823 { \ 824 .parent = TYPE_LOONGARCH##size##_CPU, \ 825 .instance_init = initfn, \ 826 .name = LOONGARCH_CPU_TYPE_NAME(model), \ 827 } 828 829 static const TypeInfo loongarch_cpu_type_infos[] = { 830 { 831 .name = TYPE_LOONGARCH_CPU, 832 .parent = TYPE_CPU, 833 .instance_size = sizeof(LoongArchCPU), 834 .instance_align = __alignof(LoongArchCPU), 835 .instance_init = loongarch_cpu_init, 836 837 .abstract = true, 838 .class_size = sizeof(LoongArchCPUClass), 839 .class_init = loongarch_cpu_class_init, 840 }, 841 { 842 .name = TYPE_LOONGARCH32_CPU, 843 .parent = TYPE_LOONGARCH_CPU, 844 845 .abstract = true, 846 .class_init = loongarch32_cpu_class_init, 847 }, 848 { 849 .name = TYPE_LOONGARCH64_CPU, 850 .parent = TYPE_LOONGARCH_CPU, 851 852 .abstract = true, 853 .class_init = loongarch64_cpu_class_init, 854 }, 855 DEFINE_LOONGARCH_CPU_TYPE(64, "la464", loongarch_la464_initfn), 856 DEFINE_LOONGARCH_CPU_TYPE(32, "la132", loongarch_la132_initfn), 857 DEFINE_LOONGARCH_CPU_TYPE(64, "max", loongarch_max_initfn), 858 }; 859 860 DEFINE_TYPES(loongarch_cpu_type_infos) 861