1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * QEMU LoongArch CPU 4 * 5 * Copyright (c) 2021 Loongson Technology Corporation Limited 6 */ 7 8 #include "qemu/osdep.h" 9 #include "qemu/log.h" 10 #include "qemu/qemu-print.h" 11 #include "qapi/error.h" 12 #include "qemu/module.h" 13 #include "sysemu/qtest.h" 14 #include "exec/exec-all.h" 15 #include "cpu.h" 16 #include "internals.h" 17 #include "fpu/softfloat-helpers.h" 18 #include "cpu-csr.h" 19 #include "sysemu/reset.h" 20 #include "tcg/tcg.h" 21 22 const char * const regnames[32] = { 23 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 24 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 25 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 26 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", 27 }; 28 29 const char * const fregnames[32] = { 30 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", 31 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", 32 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", 33 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", 34 }; 35 36 static const char * const excp_names[] = { 37 [EXCCODE_INT] = "Interrupt", 38 [EXCCODE_PIL] = "Page invalid exception for load", 39 [EXCCODE_PIS] = "Page invalid exception for store", 40 [EXCCODE_PIF] = "Page invalid exception for fetch", 41 [EXCCODE_PME] = "Page modified exception", 42 [EXCCODE_PNR] = "Page Not Readable exception", 43 [EXCCODE_PNX] = "Page Not Executable exception", 44 [EXCCODE_PPI] = "Page Privilege error", 45 [EXCCODE_ADEF] = "Address error for instruction fetch", 46 [EXCCODE_ADEM] = "Address error for Memory access", 47 [EXCCODE_SYS] = "Syscall", 48 [EXCCODE_BRK] = "Break", 49 [EXCCODE_INE] = "Instruction Non-Existent", 50 [EXCCODE_IPE] = "Instruction privilege error", 51 [EXCCODE_FPD] = "Floating Point Disabled", 52 [EXCCODE_FPE] = "Floating Point Exception", 53 [EXCCODE_DBP] = "Debug breakpoint", 54 [EXCCODE_BCE] = "Bound Check Exception", 55 [EXCCODE_SXD] = "128 bit vector instructions Disable exception", 56 }; 57 58 const char *loongarch_exception_name(int32_t exception) 59 { 60 assert(excp_names[exception]); 61 return excp_names[exception]; 62 } 63 64 void G_NORETURN do_raise_exception(CPULoongArchState *env, 65 uint32_t exception, 66 uintptr_t pc) 67 { 68 CPUState *cs = env_cpu(env); 69 70 qemu_log_mask(CPU_LOG_INT, "%s: %d (%s)\n", 71 __func__, 72 exception, 73 loongarch_exception_name(exception)); 74 cs->exception_index = exception; 75 76 cpu_loop_exit_restore(cs, pc); 77 } 78 79 static void loongarch_cpu_set_pc(CPUState *cs, vaddr value) 80 { 81 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 82 CPULoongArchState *env = &cpu->env; 83 84 set_pc(env, value); 85 } 86 87 static vaddr loongarch_cpu_get_pc(CPUState *cs) 88 { 89 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 90 CPULoongArchState *env = &cpu->env; 91 92 return env->pc; 93 } 94 95 #ifndef CONFIG_USER_ONLY 96 #include "hw/loongarch/virt.h" 97 98 void loongarch_cpu_set_irq(void *opaque, int irq, int level) 99 { 100 LoongArchCPU *cpu = opaque; 101 CPULoongArchState *env = &cpu->env; 102 CPUState *cs = CPU(cpu); 103 104 if (irq < 0 || irq >= N_IRQS) { 105 return; 106 } 107 108 env->CSR_ESTAT = deposit64(env->CSR_ESTAT, irq, 1, level != 0); 109 110 if (FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS)) { 111 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 112 } else { 113 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 114 } 115 } 116 117 static inline bool cpu_loongarch_hw_interrupts_enabled(CPULoongArchState *env) 118 { 119 bool ret = 0; 120 121 ret = (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE) && 122 !(FIELD_EX64(env->CSR_DBG, CSR_DBG, DST))); 123 124 return ret; 125 } 126 127 /* Check if there is pending and not masked out interrupt */ 128 static inline bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env) 129 { 130 uint32_t pending; 131 uint32_t status; 132 133 pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS); 134 status = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE); 135 136 return (pending & status) != 0; 137 } 138 139 static void loongarch_cpu_do_interrupt(CPUState *cs) 140 { 141 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 142 CPULoongArchState *env = &cpu->env; 143 bool update_badinstr = 1; 144 int cause = -1; 145 const char *name; 146 bool tlbfill = FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR); 147 uint32_t vec_size = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, VS); 148 149 if (cs->exception_index != EXCCODE_INT) { 150 if (cs->exception_index < 0 || 151 cs->exception_index >= ARRAY_SIZE(excp_names)) { 152 name = "unknown"; 153 } else { 154 name = excp_names[cs->exception_index]; 155 } 156 157 qemu_log_mask(CPU_LOG_INT, 158 "%s enter: pc " TARGET_FMT_lx " ERA " TARGET_FMT_lx 159 " TLBRERA " TARGET_FMT_lx " %s exception\n", __func__, 160 env->pc, env->CSR_ERA, env->CSR_TLBRERA, name); 161 } 162 163 switch (cs->exception_index) { 164 case EXCCODE_DBP: 165 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DCL, 1); 166 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, ECODE, 0xC); 167 goto set_DERA; 168 set_DERA: 169 env->CSR_DERA = env->pc; 170 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DST, 1); 171 set_pc(env, env->CSR_EENTRY + 0x480); 172 break; 173 case EXCCODE_INT: 174 if (FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) { 175 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DEI, 1); 176 goto set_DERA; 177 } 178 QEMU_FALLTHROUGH; 179 case EXCCODE_PIF: 180 case EXCCODE_ADEF: 181 cause = cs->exception_index; 182 update_badinstr = 0; 183 break; 184 case EXCCODE_SYS: 185 case EXCCODE_BRK: 186 case EXCCODE_INE: 187 case EXCCODE_IPE: 188 case EXCCODE_FPD: 189 case EXCCODE_FPE: 190 case EXCCODE_SXD: 191 env->CSR_BADV = env->pc; 192 QEMU_FALLTHROUGH; 193 case EXCCODE_BCE: 194 case EXCCODE_ADEM: 195 case EXCCODE_PIL: 196 case EXCCODE_PIS: 197 case EXCCODE_PME: 198 case EXCCODE_PNR: 199 case EXCCODE_PNX: 200 case EXCCODE_PPI: 201 cause = cs->exception_index; 202 break; 203 default: 204 qemu_log("Error: exception(%d) has not been supported\n", 205 cs->exception_index); 206 abort(); 207 } 208 209 if (update_badinstr) { 210 env->CSR_BADI = cpu_ldl_code(env, env->pc); 211 } 212 213 /* Save PLV and IE */ 214 if (tlbfill) { 215 env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PPLV, 216 FIELD_EX64(env->CSR_CRMD, 217 CSR_CRMD, PLV)); 218 env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PIE, 219 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE)); 220 /* set the DA mode */ 221 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1); 222 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0); 223 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, 224 PC, (env->pc >> 2)); 225 } else { 226 env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ECODE, 227 EXCODE_MCODE(cause)); 228 env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ESUBCODE, 229 EXCODE_SUBCODE(cause)); 230 env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PPLV, 231 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV)); 232 env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PIE, 233 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE)); 234 env->CSR_ERA = env->pc; 235 } 236 237 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0); 238 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0); 239 240 if (vec_size) { 241 vec_size = (1 << vec_size) * 4; 242 } 243 244 if (cs->exception_index == EXCCODE_INT) { 245 /* Interrupt */ 246 uint32_t vector = 0; 247 uint32_t pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS); 248 pending &= FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE); 249 250 /* Find the highest-priority interrupt. */ 251 vector = 31 - clz32(pending); 252 set_pc(env, env->CSR_EENTRY + \ 253 (EXCCODE_EXTERNAL_INT + vector) * vec_size); 254 qemu_log_mask(CPU_LOG_INT, 255 "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx 256 " cause %d\n" " A " TARGET_FMT_lx " D " 257 TARGET_FMT_lx " vector = %d ExC " TARGET_FMT_lx "ExS" 258 TARGET_FMT_lx "\n", 259 __func__, env->pc, env->CSR_ERA, 260 cause, env->CSR_BADV, env->CSR_DERA, vector, 261 env->CSR_ECFG, env->CSR_ESTAT); 262 } else { 263 if (tlbfill) { 264 set_pc(env, env->CSR_TLBRENTRY); 265 } else { 266 set_pc(env, env->CSR_EENTRY + EXCODE_MCODE(cause) * vec_size); 267 } 268 qemu_log_mask(CPU_LOG_INT, 269 "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx 270 " cause %d%s\n, ESTAT " TARGET_FMT_lx 271 " EXCFG " TARGET_FMT_lx " BADVA " TARGET_FMT_lx 272 "BADI " TARGET_FMT_lx " SYS_NUM " TARGET_FMT_lu 273 " cpu %d asid " TARGET_FMT_lx "\n", __func__, env->pc, 274 tlbfill ? env->CSR_TLBRERA : env->CSR_ERA, 275 cause, tlbfill ? "(refill)" : "", env->CSR_ESTAT, 276 env->CSR_ECFG, 277 tlbfill ? env->CSR_TLBRBADV : env->CSR_BADV, 278 env->CSR_BADI, env->gpr[11], cs->cpu_index, 279 env->CSR_ASID); 280 } 281 cs->exception_index = -1; 282 } 283 284 static void loongarch_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 285 vaddr addr, unsigned size, 286 MMUAccessType access_type, 287 int mmu_idx, MemTxAttrs attrs, 288 MemTxResult response, 289 uintptr_t retaddr) 290 { 291 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 292 CPULoongArchState *env = &cpu->env; 293 294 if (access_type == MMU_INST_FETCH) { 295 do_raise_exception(env, EXCCODE_ADEF, retaddr); 296 } else { 297 do_raise_exception(env, EXCCODE_ADEM, retaddr); 298 } 299 } 300 301 static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 302 { 303 if (interrupt_request & CPU_INTERRUPT_HARD) { 304 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 305 CPULoongArchState *env = &cpu->env; 306 307 if (cpu_loongarch_hw_interrupts_enabled(env) && 308 cpu_loongarch_hw_interrupts_pending(env)) { 309 /* Raise it */ 310 cs->exception_index = EXCCODE_INT; 311 loongarch_cpu_do_interrupt(cs); 312 return true; 313 } 314 } 315 return false; 316 } 317 #endif 318 319 #ifdef CONFIG_TCG 320 static void loongarch_cpu_synchronize_from_tb(CPUState *cs, 321 const TranslationBlock *tb) 322 { 323 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 324 CPULoongArchState *env = &cpu->env; 325 326 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 327 set_pc(env, tb->pc); 328 } 329 330 static void loongarch_restore_state_to_opc(CPUState *cs, 331 const TranslationBlock *tb, 332 const uint64_t *data) 333 { 334 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 335 CPULoongArchState *env = &cpu->env; 336 337 set_pc(env, data[0]); 338 } 339 #endif /* CONFIG_TCG */ 340 341 static bool loongarch_cpu_has_work(CPUState *cs) 342 { 343 #ifdef CONFIG_USER_ONLY 344 return true; 345 #else 346 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 347 CPULoongArchState *env = &cpu->env; 348 bool has_work = false; 349 350 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && 351 cpu_loongarch_hw_interrupts_pending(env)) { 352 has_work = true; 353 } 354 355 return has_work; 356 #endif 357 } 358 359 static void loongarch_la464_initfn(Object *obj) 360 { 361 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 362 CPULoongArchState *env = &cpu->env; 363 int i; 364 365 for (i = 0; i < 21; i++) { 366 env->cpucfg[i] = 0x0; 367 } 368 369 cpu->dtb_compatible = "loongarch,Loongson-3A5000"; 370 env->cpucfg[0] = 0x14c010; /* PRID */ 371 372 uint32_t data = 0; 373 data = FIELD_DP32(data, CPUCFG1, ARCH, 2); 374 data = FIELD_DP32(data, CPUCFG1, PGMMU, 1); 375 data = FIELD_DP32(data, CPUCFG1, IOCSR, 1); 376 data = FIELD_DP32(data, CPUCFG1, PALEN, 0x2f); 377 data = FIELD_DP32(data, CPUCFG1, VALEN, 0x2f); 378 data = FIELD_DP32(data, CPUCFG1, UAL, 1); 379 data = FIELD_DP32(data, CPUCFG1, RI, 1); 380 data = FIELD_DP32(data, CPUCFG1, EP, 1); 381 data = FIELD_DP32(data, CPUCFG1, RPLV, 1); 382 data = FIELD_DP32(data, CPUCFG1, HP, 1); 383 data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1); 384 env->cpucfg[1] = data; 385 386 data = 0; 387 data = FIELD_DP32(data, CPUCFG2, FP, 1); 388 data = FIELD_DP32(data, CPUCFG2, FP_SP, 1); 389 data = FIELD_DP32(data, CPUCFG2, FP_DP, 1); 390 data = FIELD_DP32(data, CPUCFG2, FP_VER, 1); 391 data = FIELD_DP32(data, CPUCFG2, LSX, 1), 392 data = FIELD_DP32(data, CPUCFG2, LLFTP, 1); 393 data = FIELD_DP32(data, CPUCFG2, LLFTP_VER, 1); 394 data = FIELD_DP32(data, CPUCFG2, LSPW, 1); 395 data = FIELD_DP32(data, CPUCFG2, LAM, 1); 396 env->cpucfg[2] = data; 397 398 env->cpucfg[4] = 100 * 1000 * 1000; /* Crystal frequency */ 399 400 data = 0; 401 data = FIELD_DP32(data, CPUCFG5, CC_MUL, 1); 402 data = FIELD_DP32(data, CPUCFG5, CC_DIV, 1); 403 env->cpucfg[5] = data; 404 405 data = 0; 406 data = FIELD_DP32(data, CPUCFG16, L1_IUPRE, 1); 407 data = FIELD_DP32(data, CPUCFG16, L1_DPRE, 1); 408 data = FIELD_DP32(data, CPUCFG16, L2_IUPRE, 1); 409 data = FIELD_DP32(data, CPUCFG16, L2_IUUNIFY, 1); 410 data = FIELD_DP32(data, CPUCFG16, L2_IUPRIV, 1); 411 data = FIELD_DP32(data, CPUCFG16, L3_IUPRE, 1); 412 data = FIELD_DP32(data, CPUCFG16, L3_IUUNIFY, 1); 413 data = FIELD_DP32(data, CPUCFG16, L3_IUINCL, 1); 414 env->cpucfg[16] = data; 415 416 data = 0; 417 data = FIELD_DP32(data, CPUCFG17, L1IU_WAYS, 3); 418 data = FIELD_DP32(data, CPUCFG17, L1IU_SETS, 8); 419 data = FIELD_DP32(data, CPUCFG17, L1IU_SIZE, 6); 420 env->cpucfg[17] = data; 421 422 data = 0; 423 data = FIELD_DP32(data, CPUCFG18, L1D_WAYS, 3); 424 data = FIELD_DP32(data, CPUCFG18, L1D_SETS, 8); 425 data = FIELD_DP32(data, CPUCFG18, L1D_SIZE, 6); 426 env->cpucfg[18] = data; 427 428 data = 0; 429 data = FIELD_DP32(data, CPUCFG19, L2IU_WAYS, 15); 430 data = FIELD_DP32(data, CPUCFG19, L2IU_SETS, 8); 431 data = FIELD_DP32(data, CPUCFG19, L2IU_SIZE, 6); 432 env->cpucfg[19] = data; 433 434 data = 0; 435 data = FIELD_DP32(data, CPUCFG20, L3IU_WAYS, 15); 436 data = FIELD_DP32(data, CPUCFG20, L3IU_SETS, 14); 437 data = FIELD_DP32(data, CPUCFG20, L3IU_SIZE, 6); 438 env->cpucfg[20] = data; 439 440 env->CSR_ASID = FIELD_DP64(0, CSR_ASID, ASIDBITS, 0xa); 441 } 442 443 static void loongarch_la132_initfn(Object *obj) 444 { 445 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 446 CPULoongArchState *env = &cpu->env; 447 448 int i; 449 450 for (i = 0; i < 21; i++) { 451 env->cpucfg[i] = 0x0; 452 } 453 454 cpu->dtb_compatible = "loongarch,Loongson-1C103"; 455 env->cpucfg[0] = 0x148042; /* PRID */ 456 457 uint32_t data = 0; 458 data = FIELD_DP32(data, CPUCFG1, ARCH, 1); /* LA32 */ 459 data = FIELD_DP32(data, CPUCFG1, PGMMU, 1); 460 data = FIELD_DP32(data, CPUCFG1, IOCSR, 1); 461 data = FIELD_DP32(data, CPUCFG1, PALEN, 0x1f); /* 32 bits */ 462 data = FIELD_DP32(data, CPUCFG1, VALEN, 0x1f); /* 32 bits */ 463 data = FIELD_DP32(data, CPUCFG1, UAL, 1); 464 data = FIELD_DP32(data, CPUCFG1, RI, 0); 465 data = FIELD_DP32(data, CPUCFG1, EP, 0); 466 data = FIELD_DP32(data, CPUCFG1, RPLV, 0); 467 data = FIELD_DP32(data, CPUCFG1, HP, 1); 468 data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1); 469 env->cpucfg[1] = data; 470 } 471 472 static void loongarch_cpu_list_entry(gpointer data, gpointer user_data) 473 { 474 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 475 476 qemu_printf("%s\n", typename); 477 } 478 479 void loongarch_cpu_list(void) 480 { 481 GSList *list; 482 list = object_class_get_list_sorted(TYPE_LOONGARCH_CPU, false); 483 g_slist_foreach(list, loongarch_cpu_list_entry, NULL); 484 g_slist_free(list); 485 } 486 487 static void loongarch_cpu_reset_hold(Object *obj) 488 { 489 CPUState *cs = CPU(obj); 490 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 491 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(cpu); 492 CPULoongArchState *env = &cpu->env; 493 494 if (lacc->parent_phases.hold) { 495 lacc->parent_phases.hold(obj); 496 } 497 498 env->fcsr0_mask = FCSR0_M1 | FCSR0_M2 | FCSR0_M3; 499 env->fcsr0 = 0x0; 500 501 int n; 502 /* Set csr registers value after reset */ 503 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0); 504 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0); 505 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1); 506 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0); 507 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATF, 1); 508 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATM, 1); 509 510 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, FPE, 0); 511 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, SXE, 0); 512 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, ASXE, 0); 513 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, BTE, 0); 514 515 env->CSR_MISC = 0; 516 517 env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, VS, 0); 518 env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, LIE, 0); 519 520 env->CSR_ESTAT = env->CSR_ESTAT & (~MAKE_64BIT_MASK(0, 2)); 521 env->CSR_RVACFG = FIELD_DP64(env->CSR_RVACFG, CSR_RVACFG, RBITS, 0); 522 env->CSR_TCFG = FIELD_DP64(env->CSR_TCFG, CSR_TCFG, EN, 0); 523 env->CSR_LLBCTL = FIELD_DP64(env->CSR_LLBCTL, CSR_LLBCTL, KLO, 0); 524 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 0); 525 env->CSR_MERRCTL = FIELD_DP64(env->CSR_MERRCTL, CSR_MERRCTL, ISMERR, 0); 526 527 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, TLB_TYPE, 2); 528 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, MTLB_ENTRY, 63); 529 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_WAYS, 7); 530 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_SETS, 8); 531 532 for (n = 0; n < 4; n++) { 533 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV0, 0); 534 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV1, 0); 535 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV2, 0); 536 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV3, 0); 537 } 538 539 #ifndef CONFIG_USER_ONLY 540 env->pc = 0x1c000000; 541 memset(env->tlb, 0, sizeof(env->tlb)); 542 #endif 543 544 restore_fp_status(env); 545 cs->exception_index = -1; 546 } 547 548 static void loongarch_cpu_disas_set_info(CPUState *s, disassemble_info *info) 549 { 550 info->print_insn = print_insn_loongarch; 551 } 552 553 static void loongarch_cpu_realizefn(DeviceState *dev, Error **errp) 554 { 555 CPUState *cs = CPU(dev); 556 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(dev); 557 Error *local_err = NULL; 558 559 cpu_exec_realizefn(cs, &local_err); 560 if (local_err != NULL) { 561 error_propagate(errp, local_err); 562 return; 563 } 564 565 loongarch_cpu_register_gdb_regs_for_features(cs); 566 567 cpu_reset(cs); 568 qemu_init_vcpu(cs); 569 570 lacc->parent_realize(dev, errp); 571 } 572 573 #ifndef CONFIG_USER_ONLY 574 static void loongarch_qemu_write(void *opaque, hwaddr addr, 575 uint64_t val, unsigned size) 576 { 577 qemu_log_mask(LOG_UNIMP, "[%s]: Unimplemented reg 0x%" HWADDR_PRIx "\n", 578 __func__, addr); 579 } 580 581 static uint64_t loongarch_qemu_read(void *opaque, hwaddr addr, unsigned size) 582 { 583 switch (addr) { 584 case VERSION_REG: 585 return 0x11ULL; 586 case FEATURE_REG: 587 return 1ULL << IOCSRF_MSI | 1ULL << IOCSRF_EXTIOI | 588 1ULL << IOCSRF_CSRIPI; 589 case VENDOR_REG: 590 return 0x6e6f73676e6f6f4cULL; /* "Loongson" */ 591 case CPUNAME_REG: 592 return 0x303030354133ULL; /* "3A5000" */ 593 case MISC_FUNC_REG: 594 return 1ULL << IOCSRM_EXTIOI_EN; 595 } 596 return 0ULL; 597 } 598 599 static const MemoryRegionOps loongarch_qemu_ops = { 600 .read = loongarch_qemu_read, 601 .write = loongarch_qemu_write, 602 .endianness = DEVICE_LITTLE_ENDIAN, 603 .valid = { 604 .min_access_size = 4, 605 .max_access_size = 8, 606 }, 607 .impl = { 608 .min_access_size = 8, 609 .max_access_size = 8, 610 }, 611 }; 612 #endif 613 614 static void loongarch_cpu_init(Object *obj) 615 { 616 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 617 618 cpu_set_cpustate_pointers(cpu); 619 620 #ifndef CONFIG_USER_ONLY 621 CPULoongArchState *env = &cpu->env; 622 qdev_init_gpio_in(DEVICE(cpu), loongarch_cpu_set_irq, N_IRQS); 623 timer_init_ns(&cpu->timer, QEMU_CLOCK_VIRTUAL, 624 &loongarch_constant_timer_cb, cpu); 625 memory_region_init_io(&env->system_iocsr, OBJECT(cpu), NULL, 626 env, "iocsr", UINT64_MAX); 627 address_space_init(&env->address_space_iocsr, &env->system_iocsr, "IOCSR"); 628 memory_region_init_io(&env->iocsr_mem, OBJECT(cpu), &loongarch_qemu_ops, 629 NULL, "iocsr_misc", 0x428); 630 memory_region_add_subregion(&env->system_iocsr, 0, &env->iocsr_mem); 631 #endif 632 } 633 634 static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model) 635 { 636 ObjectClass *oc; 637 638 oc = object_class_by_name(cpu_model); 639 if (!oc) { 640 g_autofree char *typename 641 = g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model); 642 oc = object_class_by_name(typename); 643 if (!oc) { 644 return NULL; 645 } 646 } 647 648 if (object_class_dynamic_cast(oc, TYPE_LOONGARCH_CPU) 649 && !object_class_is_abstract(oc)) { 650 return oc; 651 } 652 return NULL; 653 } 654 655 void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) 656 { 657 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 658 CPULoongArchState *env = &cpu->env; 659 int i; 660 661 qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc); 662 qemu_fprintf(f, " FCSR0 0x%08x fp_status 0x%02x\n", env->fcsr0, 663 get_float_exception_flags(&env->fp_status)); 664 665 /* gpr */ 666 for (i = 0; i < 32; i++) { 667 if ((i & 3) == 0) { 668 qemu_fprintf(f, " GPR%02d:", i); 669 } 670 qemu_fprintf(f, " %s %016" PRIx64, regnames[i], env->gpr[i]); 671 if ((i & 3) == 3) { 672 qemu_fprintf(f, "\n"); 673 } 674 } 675 676 qemu_fprintf(f, "CRMD=%016" PRIx64 "\n", env->CSR_CRMD); 677 qemu_fprintf(f, "PRMD=%016" PRIx64 "\n", env->CSR_PRMD); 678 qemu_fprintf(f, "EUEN=%016" PRIx64 "\n", env->CSR_EUEN); 679 qemu_fprintf(f, "ESTAT=%016" PRIx64 "\n", env->CSR_ESTAT); 680 qemu_fprintf(f, "ERA=%016" PRIx64 "\n", env->CSR_ERA); 681 qemu_fprintf(f, "BADV=%016" PRIx64 "\n", env->CSR_BADV); 682 qemu_fprintf(f, "BADI=%016" PRIx64 "\n", env->CSR_BADI); 683 qemu_fprintf(f, "EENTRY=%016" PRIx64 "\n", env->CSR_EENTRY); 684 qemu_fprintf(f, "PRCFG1=%016" PRIx64 ", PRCFG2=%016" PRIx64 "," 685 " PRCFG3=%016" PRIx64 "\n", 686 env->CSR_PRCFG1, env->CSR_PRCFG3, env->CSR_PRCFG3); 687 qemu_fprintf(f, "TLBRENTRY=%016" PRIx64 "\n", env->CSR_TLBRENTRY); 688 qemu_fprintf(f, "TLBRBADV=%016" PRIx64 "\n", env->CSR_TLBRBADV); 689 qemu_fprintf(f, "TLBRERA=%016" PRIx64 "\n", env->CSR_TLBRERA); 690 691 /* fpr */ 692 if (flags & CPU_DUMP_FPU) { 693 for (i = 0; i < 32; i++) { 694 qemu_fprintf(f, " %s %016" PRIx64, fregnames[i], env->fpr[i].vreg.D(0)); 695 if ((i & 3) == 3) { 696 qemu_fprintf(f, "\n"); 697 } 698 } 699 } 700 } 701 702 #ifdef CONFIG_TCG 703 #include "hw/core/tcg-cpu-ops.h" 704 705 static struct TCGCPUOps loongarch_tcg_ops = { 706 .initialize = loongarch_translate_init, 707 .synchronize_from_tb = loongarch_cpu_synchronize_from_tb, 708 .restore_state_to_opc = loongarch_restore_state_to_opc, 709 710 #ifndef CONFIG_USER_ONLY 711 .tlb_fill = loongarch_cpu_tlb_fill, 712 .cpu_exec_interrupt = loongarch_cpu_exec_interrupt, 713 .do_interrupt = loongarch_cpu_do_interrupt, 714 .do_transaction_failed = loongarch_cpu_do_transaction_failed, 715 #endif 716 }; 717 #endif /* CONFIG_TCG */ 718 719 #ifndef CONFIG_USER_ONLY 720 #include "hw/core/sysemu-cpu-ops.h" 721 722 static const struct SysemuCPUOps loongarch_sysemu_ops = { 723 .get_phys_page_debug = loongarch_cpu_get_phys_page_debug, 724 }; 725 726 static int64_t loongarch_cpu_get_arch_id(CPUState *cs) 727 { 728 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 729 730 return cpu->phy_id; 731 } 732 #endif 733 734 static void loongarch_cpu_class_init(ObjectClass *c, void *data) 735 { 736 LoongArchCPUClass *lacc = LOONGARCH_CPU_CLASS(c); 737 CPUClass *cc = CPU_CLASS(c); 738 DeviceClass *dc = DEVICE_CLASS(c); 739 ResettableClass *rc = RESETTABLE_CLASS(c); 740 741 device_class_set_parent_realize(dc, loongarch_cpu_realizefn, 742 &lacc->parent_realize); 743 resettable_class_set_parent_phases(rc, NULL, loongarch_cpu_reset_hold, NULL, 744 &lacc->parent_phases); 745 746 cc->class_by_name = loongarch_cpu_class_by_name; 747 cc->has_work = loongarch_cpu_has_work; 748 cc->dump_state = loongarch_cpu_dump_state; 749 cc->set_pc = loongarch_cpu_set_pc; 750 cc->get_pc = loongarch_cpu_get_pc; 751 #ifndef CONFIG_USER_ONLY 752 cc->get_arch_id = loongarch_cpu_get_arch_id; 753 dc->vmsd = &vmstate_loongarch_cpu; 754 cc->sysemu_ops = &loongarch_sysemu_ops; 755 #endif 756 cc->disas_set_info = loongarch_cpu_disas_set_info; 757 cc->gdb_read_register = loongarch_cpu_gdb_read_register; 758 cc->gdb_write_register = loongarch_cpu_gdb_write_register; 759 cc->gdb_stop_before_watchpoint = true; 760 761 #ifdef CONFIG_TCG 762 cc->tcg_ops = &loongarch_tcg_ops; 763 #endif 764 } 765 766 static gchar *loongarch32_gdb_arch_name(CPUState *cs) 767 { 768 return g_strdup("loongarch32"); 769 } 770 771 static void loongarch32_cpu_class_init(ObjectClass *c, void *data) 772 { 773 CPUClass *cc = CPU_CLASS(c); 774 775 cc->gdb_num_core_regs = 35; 776 cc->gdb_core_xml_file = "loongarch-base32.xml"; 777 cc->gdb_arch_name = loongarch32_gdb_arch_name; 778 } 779 780 static gchar *loongarch64_gdb_arch_name(CPUState *cs) 781 { 782 return g_strdup("loongarch64"); 783 } 784 785 static void loongarch64_cpu_class_init(ObjectClass *c, void *data) 786 { 787 CPUClass *cc = CPU_CLASS(c); 788 789 cc->gdb_num_core_regs = 35; 790 cc->gdb_core_xml_file = "loongarch-base64.xml"; 791 cc->gdb_arch_name = loongarch64_gdb_arch_name; 792 } 793 794 #define DEFINE_LOONGARCH_CPU_TYPE(size, model, initfn) \ 795 { \ 796 .parent = TYPE_LOONGARCH##size##_CPU, \ 797 .instance_init = initfn, \ 798 .name = LOONGARCH_CPU_TYPE_NAME(model), \ 799 } 800 801 static const TypeInfo loongarch_cpu_type_infos[] = { 802 { 803 .name = TYPE_LOONGARCH_CPU, 804 .parent = TYPE_CPU, 805 .instance_size = sizeof(LoongArchCPU), 806 .instance_init = loongarch_cpu_init, 807 808 .abstract = true, 809 .class_size = sizeof(LoongArchCPUClass), 810 .class_init = loongarch_cpu_class_init, 811 }, 812 { 813 .name = TYPE_LOONGARCH32_CPU, 814 .parent = TYPE_LOONGARCH_CPU, 815 816 .abstract = true, 817 .class_init = loongarch32_cpu_class_init, 818 }, 819 { 820 .name = TYPE_LOONGARCH64_CPU, 821 .parent = TYPE_LOONGARCH_CPU, 822 823 .abstract = true, 824 .class_init = loongarch64_cpu_class_init, 825 }, 826 DEFINE_LOONGARCH_CPU_TYPE(64, "la464", loongarch_la464_initfn), 827 DEFINE_LOONGARCH_CPU_TYPE(32, "la132", loongarch_la132_initfn), 828 }; 829 830 DEFINE_TYPES(loongarch_cpu_type_infos) 831