1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * QEMU LoongArch CPU 4 * 5 * Copyright (c) 2021 Loongson Technology Corporation Limited 6 */ 7 8 #include "qemu/osdep.h" 9 #include "qemu/log.h" 10 #include "qemu/qemu-print.h" 11 #include "qapi/error.h" 12 #include "qemu/module.h" 13 #include "sysemu/qtest.h" 14 #include "exec/exec-all.h" 15 #include "qapi/qapi-commands-machine-target.h" 16 #include "cpu.h" 17 #include "internals.h" 18 #include "fpu/softfloat-helpers.h" 19 #include "cpu-csr.h" 20 #include "sysemu/reset.h" 21 22 const char * const regnames[32] = { 23 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 24 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 25 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 26 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", 27 }; 28 29 const char * const fregnames[32] = { 30 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", 31 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", 32 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", 33 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", 34 }; 35 36 static const char * const excp_names[] = { 37 [EXCCODE_INT] = "Interrupt", 38 [EXCCODE_PIL] = "Page invalid exception for load", 39 [EXCCODE_PIS] = "Page invalid exception for store", 40 [EXCCODE_PIF] = "Page invalid exception for fetch", 41 [EXCCODE_PME] = "Page modified exception", 42 [EXCCODE_PNR] = "Page Not Readable exception", 43 [EXCCODE_PNX] = "Page Not Executable exception", 44 [EXCCODE_PPI] = "Page Privilege error", 45 [EXCCODE_ADEF] = "Address error for instruction fetch", 46 [EXCCODE_ADEM] = "Address error for Memory access", 47 [EXCCODE_SYS] = "Syscall", 48 [EXCCODE_BRK] = "Break", 49 [EXCCODE_INE] = "Instruction Non-Existent", 50 [EXCCODE_IPE] = "Instruction privilege error", 51 [EXCCODE_FPD] = "Floating Point Disabled", 52 [EXCCODE_FPE] = "Floating Point Exception", 53 [EXCCODE_DBP] = "Debug breakpoint", 54 [EXCCODE_BCE] = "Bound Check Exception", 55 }; 56 57 const char *loongarch_exception_name(int32_t exception) 58 { 59 assert(excp_names[exception]); 60 return excp_names[exception]; 61 } 62 63 void G_NORETURN do_raise_exception(CPULoongArchState *env, 64 uint32_t exception, 65 uintptr_t pc) 66 { 67 CPUState *cs = env_cpu(env); 68 69 qemu_log_mask(CPU_LOG_INT, "%s: %d (%s)\n", 70 __func__, 71 exception, 72 loongarch_exception_name(exception)); 73 cs->exception_index = exception; 74 75 cpu_loop_exit_restore(cs, pc); 76 } 77 78 static void loongarch_cpu_set_pc(CPUState *cs, vaddr value) 79 { 80 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 81 CPULoongArchState *env = &cpu->env; 82 83 env->pc = value; 84 } 85 86 static vaddr loongarch_cpu_get_pc(CPUState *cs) 87 { 88 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 89 CPULoongArchState *env = &cpu->env; 90 91 return env->pc; 92 } 93 94 #ifndef CONFIG_USER_ONLY 95 #include "hw/loongarch/virt.h" 96 97 void loongarch_cpu_set_irq(void *opaque, int irq, int level) 98 { 99 LoongArchCPU *cpu = opaque; 100 CPULoongArchState *env = &cpu->env; 101 CPUState *cs = CPU(cpu); 102 103 if (irq < 0 || irq >= N_IRQS) { 104 return; 105 } 106 107 env->CSR_ESTAT = deposit64(env->CSR_ESTAT, irq, 1, level != 0); 108 109 if (FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS)) { 110 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 111 } else { 112 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 113 } 114 } 115 116 static inline bool cpu_loongarch_hw_interrupts_enabled(CPULoongArchState *env) 117 { 118 bool ret = 0; 119 120 ret = (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE) && 121 !(FIELD_EX64(env->CSR_DBG, CSR_DBG, DST))); 122 123 return ret; 124 } 125 126 /* Check if there is pending and not masked out interrupt */ 127 static inline bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env) 128 { 129 uint32_t pending; 130 uint32_t status; 131 132 pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS); 133 status = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE); 134 135 return (pending & status) != 0; 136 } 137 138 static void loongarch_cpu_do_interrupt(CPUState *cs) 139 { 140 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 141 CPULoongArchState *env = &cpu->env; 142 bool update_badinstr = 1; 143 int cause = -1; 144 const char *name; 145 bool tlbfill = FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR); 146 uint32_t vec_size = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, VS); 147 148 if (cs->exception_index != EXCCODE_INT) { 149 if (cs->exception_index < 0 || 150 cs->exception_index >= ARRAY_SIZE(excp_names)) { 151 name = "unknown"; 152 } else { 153 name = excp_names[cs->exception_index]; 154 } 155 156 qemu_log_mask(CPU_LOG_INT, 157 "%s enter: pc " TARGET_FMT_lx " ERA " TARGET_FMT_lx 158 " TLBRERA " TARGET_FMT_lx " %s exception\n", __func__, 159 env->pc, env->CSR_ERA, env->CSR_TLBRERA, name); 160 } 161 162 switch (cs->exception_index) { 163 case EXCCODE_DBP: 164 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DCL, 1); 165 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, ECODE, 0xC); 166 goto set_DERA; 167 set_DERA: 168 env->CSR_DERA = env->pc; 169 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DST, 1); 170 env->pc = env->CSR_EENTRY + 0x480; 171 break; 172 case EXCCODE_INT: 173 if (FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) { 174 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DEI, 1); 175 goto set_DERA; 176 } 177 QEMU_FALLTHROUGH; 178 case EXCCODE_PIF: 179 case EXCCODE_ADEF: 180 cause = cs->exception_index; 181 update_badinstr = 0; 182 break; 183 case EXCCODE_SYS: 184 case EXCCODE_BRK: 185 case EXCCODE_INE: 186 case EXCCODE_IPE: 187 case EXCCODE_FPD: 188 case EXCCODE_FPE: 189 case EXCCODE_BCE: 190 env->CSR_BADV = env->pc; 191 QEMU_FALLTHROUGH; 192 case EXCCODE_ADEM: 193 case EXCCODE_PIL: 194 case EXCCODE_PIS: 195 case EXCCODE_PME: 196 case EXCCODE_PNR: 197 case EXCCODE_PNX: 198 case EXCCODE_PPI: 199 cause = cs->exception_index; 200 break; 201 default: 202 qemu_log("Error: exception(%d) has not been supported\n", 203 cs->exception_index); 204 abort(); 205 } 206 207 if (update_badinstr) { 208 env->CSR_BADI = cpu_ldl_code(env, env->pc); 209 } 210 211 /* Save PLV and IE */ 212 if (tlbfill) { 213 env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PPLV, 214 FIELD_EX64(env->CSR_CRMD, 215 CSR_CRMD, PLV)); 216 env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PIE, 217 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE)); 218 /* set the DA mode */ 219 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1); 220 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0); 221 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, 222 PC, (env->pc >> 2)); 223 } else { 224 env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ECODE, 225 EXCODE_MCODE(cause)); 226 env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ESUBCODE, 227 EXCODE_SUBCODE(cause)); 228 env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PPLV, 229 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV)); 230 env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PIE, 231 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE)); 232 env->CSR_ERA = env->pc; 233 } 234 235 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0); 236 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0); 237 238 if (vec_size) { 239 vec_size = (1 << vec_size) * 4; 240 } 241 242 if (cs->exception_index == EXCCODE_INT) { 243 /* Interrupt */ 244 uint32_t vector = 0; 245 uint32_t pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS); 246 pending &= FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE); 247 248 /* Find the highest-priority interrupt. */ 249 vector = 31 - clz32(pending); 250 env->pc = env->CSR_EENTRY + (EXCCODE_EXTERNAL_INT + vector) * vec_size; 251 qemu_log_mask(CPU_LOG_INT, 252 "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx 253 " cause %d\n" " A " TARGET_FMT_lx " D " 254 TARGET_FMT_lx " vector = %d ExC " TARGET_FMT_lx "ExS" 255 TARGET_FMT_lx "\n", 256 __func__, env->pc, env->CSR_ERA, 257 cause, env->CSR_BADV, env->CSR_DERA, vector, 258 env->CSR_ECFG, env->CSR_ESTAT); 259 } else { 260 if (tlbfill) { 261 env->pc = env->CSR_TLBRENTRY; 262 } else { 263 env->pc = env->CSR_EENTRY; 264 env->pc += EXCODE_MCODE(cause) * vec_size; 265 } 266 qemu_log_mask(CPU_LOG_INT, 267 "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx 268 " cause %d%s\n, ESTAT " TARGET_FMT_lx 269 " EXCFG " TARGET_FMT_lx " BADVA " TARGET_FMT_lx 270 "BADI " TARGET_FMT_lx " SYS_NUM " TARGET_FMT_lu 271 " cpu %d asid " TARGET_FMT_lx "\n", __func__, env->pc, 272 tlbfill ? env->CSR_TLBRERA : env->CSR_ERA, 273 cause, tlbfill ? "(refill)" : "", env->CSR_ESTAT, 274 env->CSR_ECFG, 275 tlbfill ? env->CSR_TLBRBADV : env->CSR_BADV, 276 env->CSR_BADI, env->gpr[11], cs->cpu_index, 277 env->CSR_ASID); 278 } 279 cs->exception_index = -1; 280 } 281 282 static void loongarch_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 283 vaddr addr, unsigned size, 284 MMUAccessType access_type, 285 int mmu_idx, MemTxAttrs attrs, 286 MemTxResult response, 287 uintptr_t retaddr) 288 { 289 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 290 CPULoongArchState *env = &cpu->env; 291 292 if (access_type == MMU_INST_FETCH) { 293 do_raise_exception(env, EXCCODE_ADEF, retaddr); 294 } else { 295 do_raise_exception(env, EXCCODE_ADEM, retaddr); 296 } 297 } 298 299 static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 300 { 301 if (interrupt_request & CPU_INTERRUPT_HARD) { 302 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 303 CPULoongArchState *env = &cpu->env; 304 305 if (cpu_loongarch_hw_interrupts_enabled(env) && 306 cpu_loongarch_hw_interrupts_pending(env)) { 307 /* Raise it */ 308 cs->exception_index = EXCCODE_INT; 309 loongarch_cpu_do_interrupt(cs); 310 return true; 311 } 312 } 313 return false; 314 } 315 #endif 316 317 #ifdef CONFIG_TCG 318 static void loongarch_cpu_synchronize_from_tb(CPUState *cs, 319 const TranslationBlock *tb) 320 { 321 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 322 CPULoongArchState *env = &cpu->env; 323 324 env->pc = tb_pc(tb); 325 } 326 327 static void loongarch_restore_state_to_opc(CPUState *cs, 328 const TranslationBlock *tb, 329 const uint64_t *data) 330 { 331 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 332 CPULoongArchState *env = &cpu->env; 333 334 env->pc = data[0]; 335 } 336 #endif /* CONFIG_TCG */ 337 338 static bool loongarch_cpu_has_work(CPUState *cs) 339 { 340 #ifdef CONFIG_USER_ONLY 341 return true; 342 #else 343 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 344 CPULoongArchState *env = &cpu->env; 345 bool has_work = false; 346 347 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && 348 cpu_loongarch_hw_interrupts_pending(env)) { 349 has_work = true; 350 } 351 352 return has_work; 353 #endif 354 } 355 356 static void loongarch_la464_initfn(Object *obj) 357 { 358 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 359 CPULoongArchState *env = &cpu->env; 360 int i; 361 362 for (i = 0; i < 21; i++) { 363 env->cpucfg[i] = 0x0; 364 } 365 366 cpu->dtb_compatible = "loongarch,Loongson-3A5000"; 367 env->cpucfg[0] = 0x14c010; /* PRID */ 368 369 uint32_t data = 0; 370 data = FIELD_DP32(data, CPUCFG1, ARCH, 2); 371 data = FIELD_DP32(data, CPUCFG1, PGMMU, 1); 372 data = FIELD_DP32(data, CPUCFG1, IOCSR, 1); 373 data = FIELD_DP32(data, CPUCFG1, PALEN, 0x2f); 374 data = FIELD_DP32(data, CPUCFG1, VALEN, 0x2f); 375 data = FIELD_DP32(data, CPUCFG1, UAL, 1); 376 data = FIELD_DP32(data, CPUCFG1, RI, 1); 377 data = FIELD_DP32(data, CPUCFG1, EP, 1); 378 data = FIELD_DP32(data, CPUCFG1, RPLV, 1); 379 data = FIELD_DP32(data, CPUCFG1, HP, 1); 380 data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1); 381 env->cpucfg[1] = data; 382 383 data = 0; 384 data = FIELD_DP32(data, CPUCFG2, FP, 1); 385 data = FIELD_DP32(data, CPUCFG2, FP_SP, 1); 386 data = FIELD_DP32(data, CPUCFG2, FP_DP, 1); 387 data = FIELD_DP32(data, CPUCFG2, FP_VER, 1); 388 data = FIELD_DP32(data, CPUCFG2, LLFTP, 1); 389 data = FIELD_DP32(data, CPUCFG2, LLFTP_VER, 1); 390 data = FIELD_DP32(data, CPUCFG2, LAM, 1); 391 env->cpucfg[2] = data; 392 393 env->cpucfg[4] = 100 * 1000 * 1000; /* Crystal frequency */ 394 395 data = 0; 396 data = FIELD_DP32(data, CPUCFG5, CC_MUL, 1); 397 data = FIELD_DP32(data, CPUCFG5, CC_DIV, 1); 398 env->cpucfg[5] = data; 399 400 data = 0; 401 data = FIELD_DP32(data, CPUCFG16, L1_IUPRE, 1); 402 data = FIELD_DP32(data, CPUCFG16, L1_DPRE, 1); 403 data = FIELD_DP32(data, CPUCFG16, L2_IUPRE, 1); 404 data = FIELD_DP32(data, CPUCFG16, L2_IUUNIFY, 1); 405 data = FIELD_DP32(data, CPUCFG16, L2_IUPRIV, 1); 406 data = FIELD_DP32(data, CPUCFG16, L3_IUPRE, 1); 407 data = FIELD_DP32(data, CPUCFG16, L3_IUUNIFY, 1); 408 data = FIELD_DP32(data, CPUCFG16, L3_IUINCL, 1); 409 env->cpucfg[16] = data; 410 411 data = 0; 412 data = FIELD_DP32(data, CPUCFG17, L1IU_WAYS, 3); 413 data = FIELD_DP32(data, CPUCFG17, L1IU_SETS, 8); 414 data = FIELD_DP32(data, CPUCFG17, L1IU_SIZE, 6); 415 env->cpucfg[17] = data; 416 417 data = 0; 418 data = FIELD_DP32(data, CPUCFG18, L1D_WAYS, 3); 419 data = FIELD_DP32(data, CPUCFG18, L1D_SETS, 8); 420 data = FIELD_DP32(data, CPUCFG18, L1D_SIZE, 6); 421 env->cpucfg[18] = data; 422 423 data = 0; 424 data = FIELD_DP32(data, CPUCFG19, L2IU_WAYS, 15); 425 data = FIELD_DP32(data, CPUCFG19, L2IU_SETS, 8); 426 data = FIELD_DP32(data, CPUCFG19, L2IU_SIZE, 6); 427 env->cpucfg[19] = data; 428 429 data = 0; 430 data = FIELD_DP32(data, CPUCFG20, L3IU_WAYS, 15); 431 data = FIELD_DP32(data, CPUCFG20, L3IU_SETS, 14); 432 data = FIELD_DP32(data, CPUCFG20, L3IU_SIZE, 6); 433 env->cpucfg[20] = data; 434 435 env->CSR_ASID = FIELD_DP64(0, CSR_ASID, ASIDBITS, 0xa); 436 } 437 438 static void loongarch_cpu_list_entry(gpointer data, gpointer user_data) 439 { 440 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 441 442 qemu_printf("%s\n", typename); 443 } 444 445 void loongarch_cpu_list(void) 446 { 447 GSList *list; 448 list = object_class_get_list_sorted(TYPE_LOONGARCH_CPU, false); 449 g_slist_foreach(list, loongarch_cpu_list_entry, NULL); 450 g_slist_free(list); 451 } 452 453 static void loongarch_cpu_reset_hold(Object *obj) 454 { 455 CPUState *cs = CPU(obj); 456 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 457 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(cpu); 458 CPULoongArchState *env = &cpu->env; 459 460 if (lacc->parent_phases.hold) { 461 lacc->parent_phases.hold(obj); 462 } 463 464 env->fcsr0_mask = FCSR0_M1 | FCSR0_M2 | FCSR0_M3; 465 env->fcsr0 = 0x0; 466 467 int n; 468 /* Set csr registers value after reset */ 469 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0); 470 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0); 471 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1); 472 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0); 473 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATF, 1); 474 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATM, 1); 475 476 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, FPE, 0); 477 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, SXE, 0); 478 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, ASXE, 0); 479 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, BTE, 0); 480 481 env->CSR_MISC = 0; 482 483 env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, VS, 0); 484 env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, LIE, 0); 485 486 env->CSR_ESTAT = env->CSR_ESTAT & (~MAKE_64BIT_MASK(0, 2)); 487 env->CSR_RVACFG = FIELD_DP64(env->CSR_RVACFG, CSR_RVACFG, RBITS, 0); 488 env->CSR_TCFG = FIELD_DP64(env->CSR_TCFG, CSR_TCFG, EN, 0); 489 env->CSR_LLBCTL = FIELD_DP64(env->CSR_LLBCTL, CSR_LLBCTL, KLO, 0); 490 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 0); 491 env->CSR_MERRCTL = FIELD_DP64(env->CSR_MERRCTL, CSR_MERRCTL, ISMERR, 0); 492 493 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, TLB_TYPE, 2); 494 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, MTLB_ENTRY, 63); 495 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_WAYS, 7); 496 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_SETS, 8); 497 498 for (n = 0; n < 4; n++) { 499 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV0, 0); 500 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV1, 0); 501 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV2, 0); 502 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV3, 0); 503 } 504 505 #ifndef CONFIG_USER_ONLY 506 env->pc = 0x1c000000; 507 memset(env->tlb, 0, sizeof(env->tlb)); 508 #endif 509 510 restore_fp_status(env); 511 cs->exception_index = -1; 512 } 513 514 static void loongarch_cpu_disas_set_info(CPUState *s, disassemble_info *info) 515 { 516 info->print_insn = print_insn_loongarch; 517 } 518 519 static void loongarch_cpu_realizefn(DeviceState *dev, Error **errp) 520 { 521 CPUState *cs = CPU(dev); 522 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(dev); 523 Error *local_err = NULL; 524 525 cpu_exec_realizefn(cs, &local_err); 526 if (local_err != NULL) { 527 error_propagate(errp, local_err); 528 return; 529 } 530 531 loongarch_cpu_register_gdb_regs_for_features(cs); 532 533 cpu_reset(cs); 534 qemu_init_vcpu(cs); 535 536 lacc->parent_realize(dev, errp); 537 } 538 539 #ifndef CONFIG_USER_ONLY 540 static void loongarch_qemu_write(void *opaque, hwaddr addr, 541 uint64_t val, unsigned size) 542 { 543 } 544 545 static uint64_t loongarch_qemu_read(void *opaque, hwaddr addr, unsigned size) 546 { 547 switch (addr) { 548 case FEATURE_REG: 549 return 1ULL << IOCSRF_MSI | 1ULL << IOCSRF_EXTIOI | 550 1ULL << IOCSRF_CSRIPI; 551 case VENDOR_REG: 552 return 0x6e6f73676e6f6f4cULL; /* "Loongson" */ 553 case CPUNAME_REG: 554 return 0x303030354133ULL; /* "3A5000" */ 555 case MISC_FUNC_REG: 556 return 1ULL << IOCSRM_EXTIOI_EN; 557 } 558 return 0ULL; 559 } 560 561 static const MemoryRegionOps loongarch_qemu_ops = { 562 .read = loongarch_qemu_read, 563 .write = loongarch_qemu_write, 564 .endianness = DEVICE_LITTLE_ENDIAN, 565 .valid = { 566 .min_access_size = 4, 567 .max_access_size = 8, 568 }, 569 .impl = { 570 .min_access_size = 8, 571 .max_access_size = 8, 572 }, 573 }; 574 #endif 575 576 static void loongarch_cpu_init(Object *obj) 577 { 578 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 579 580 cpu_set_cpustate_pointers(cpu); 581 582 #ifndef CONFIG_USER_ONLY 583 CPULoongArchState *env = &cpu->env; 584 qdev_init_gpio_in(DEVICE(cpu), loongarch_cpu_set_irq, N_IRQS); 585 timer_init_ns(&cpu->timer, QEMU_CLOCK_VIRTUAL, 586 &loongarch_constant_timer_cb, cpu); 587 memory_region_init_io(&env->system_iocsr, OBJECT(cpu), NULL, 588 env, "iocsr", UINT64_MAX); 589 address_space_init(&env->address_space_iocsr, &env->system_iocsr, "IOCSR"); 590 memory_region_init_io(&env->iocsr_mem, OBJECT(cpu), &loongarch_qemu_ops, 591 NULL, "iocsr_misc", 0x428); 592 memory_region_add_subregion(&env->system_iocsr, 0, &env->iocsr_mem); 593 #endif 594 } 595 596 static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model) 597 { 598 ObjectClass *oc; 599 600 oc = object_class_by_name(cpu_model); 601 if (!oc) { 602 g_autofree char *typename 603 = g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model); 604 oc = object_class_by_name(typename); 605 if (!oc) { 606 return NULL; 607 } 608 } 609 610 if (object_class_dynamic_cast(oc, TYPE_LOONGARCH_CPU) 611 && !object_class_is_abstract(oc)) { 612 return oc; 613 } 614 return NULL; 615 } 616 617 void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) 618 { 619 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 620 CPULoongArchState *env = &cpu->env; 621 int i; 622 623 qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc); 624 qemu_fprintf(f, " FCSR0 0x%08x fp_status 0x%02x\n", env->fcsr0, 625 get_float_exception_flags(&env->fp_status)); 626 627 /* gpr */ 628 for (i = 0; i < 32; i++) { 629 if ((i & 3) == 0) { 630 qemu_fprintf(f, " GPR%02d:", i); 631 } 632 qemu_fprintf(f, " %s %016" PRIx64, regnames[i], env->gpr[i]); 633 if ((i & 3) == 3) { 634 qemu_fprintf(f, "\n"); 635 } 636 } 637 638 qemu_fprintf(f, "CRMD=%016" PRIx64 "\n", env->CSR_CRMD); 639 qemu_fprintf(f, "PRMD=%016" PRIx64 "\n", env->CSR_PRMD); 640 qemu_fprintf(f, "EUEN=%016" PRIx64 "\n", env->CSR_EUEN); 641 qemu_fprintf(f, "ESTAT=%016" PRIx64 "\n", env->CSR_ESTAT); 642 qemu_fprintf(f, "ERA=%016" PRIx64 "\n", env->CSR_ERA); 643 qemu_fprintf(f, "BADV=%016" PRIx64 "\n", env->CSR_BADV); 644 qemu_fprintf(f, "BADI=%016" PRIx64 "\n", env->CSR_BADI); 645 qemu_fprintf(f, "EENTRY=%016" PRIx64 "\n", env->CSR_EENTRY); 646 qemu_fprintf(f, "PRCFG1=%016" PRIx64 ", PRCFG2=%016" PRIx64 "," 647 " PRCFG3=%016" PRIx64 "\n", 648 env->CSR_PRCFG1, env->CSR_PRCFG3, env->CSR_PRCFG3); 649 qemu_fprintf(f, "TLBRENTRY=%016" PRIx64 "\n", env->CSR_TLBRENTRY); 650 qemu_fprintf(f, "TLBRBADV=%016" PRIx64 "\n", env->CSR_TLBRBADV); 651 qemu_fprintf(f, "TLBRERA=%016" PRIx64 "\n", env->CSR_TLBRERA); 652 653 /* fpr */ 654 if (flags & CPU_DUMP_FPU) { 655 for (i = 0; i < 32; i++) { 656 qemu_fprintf(f, " %s %016" PRIx64, fregnames[i], env->fpr[i]); 657 if ((i & 3) == 3) { 658 qemu_fprintf(f, "\n"); 659 } 660 } 661 } 662 } 663 664 #ifdef CONFIG_TCG 665 #include "hw/core/tcg-cpu-ops.h" 666 667 static struct TCGCPUOps loongarch_tcg_ops = { 668 .initialize = loongarch_translate_init, 669 .synchronize_from_tb = loongarch_cpu_synchronize_from_tb, 670 .restore_state_to_opc = loongarch_restore_state_to_opc, 671 672 #ifndef CONFIG_USER_ONLY 673 .tlb_fill = loongarch_cpu_tlb_fill, 674 .cpu_exec_interrupt = loongarch_cpu_exec_interrupt, 675 .do_interrupt = loongarch_cpu_do_interrupt, 676 .do_transaction_failed = loongarch_cpu_do_transaction_failed, 677 #endif 678 }; 679 #endif /* CONFIG_TCG */ 680 681 #ifndef CONFIG_USER_ONLY 682 #include "hw/core/sysemu-cpu-ops.h" 683 684 static const struct SysemuCPUOps loongarch_sysemu_ops = { 685 .get_phys_page_debug = loongarch_cpu_get_phys_page_debug, 686 }; 687 #endif 688 689 static gchar *loongarch_gdb_arch_name(CPUState *cs) 690 { 691 return g_strdup("loongarch64"); 692 } 693 694 static void loongarch_cpu_class_init(ObjectClass *c, void *data) 695 { 696 LoongArchCPUClass *lacc = LOONGARCH_CPU_CLASS(c); 697 CPUClass *cc = CPU_CLASS(c); 698 DeviceClass *dc = DEVICE_CLASS(c); 699 ResettableClass *rc = RESETTABLE_CLASS(c); 700 701 device_class_set_parent_realize(dc, loongarch_cpu_realizefn, 702 &lacc->parent_realize); 703 resettable_class_set_parent_phases(rc, NULL, loongarch_cpu_reset_hold, NULL, 704 &lacc->parent_phases); 705 706 cc->class_by_name = loongarch_cpu_class_by_name; 707 cc->has_work = loongarch_cpu_has_work; 708 cc->dump_state = loongarch_cpu_dump_state; 709 cc->set_pc = loongarch_cpu_set_pc; 710 cc->get_pc = loongarch_cpu_get_pc; 711 #ifndef CONFIG_USER_ONLY 712 dc->vmsd = &vmstate_loongarch_cpu; 713 cc->sysemu_ops = &loongarch_sysemu_ops; 714 #endif 715 cc->disas_set_info = loongarch_cpu_disas_set_info; 716 cc->gdb_read_register = loongarch_cpu_gdb_read_register; 717 cc->gdb_write_register = loongarch_cpu_gdb_write_register; 718 cc->disas_set_info = loongarch_cpu_disas_set_info; 719 cc->gdb_num_core_regs = 35; 720 cc->gdb_core_xml_file = "loongarch-base64.xml"; 721 cc->gdb_stop_before_watchpoint = true; 722 cc->gdb_arch_name = loongarch_gdb_arch_name; 723 724 #ifdef CONFIG_TCG 725 cc->tcg_ops = &loongarch_tcg_ops; 726 #endif 727 } 728 729 #define DEFINE_LOONGARCH_CPU_TYPE(model, initfn) \ 730 { \ 731 .parent = TYPE_LOONGARCH_CPU, \ 732 .instance_init = initfn, \ 733 .name = LOONGARCH_CPU_TYPE_NAME(model), \ 734 } 735 736 static const TypeInfo loongarch_cpu_type_infos[] = { 737 { 738 .name = TYPE_LOONGARCH_CPU, 739 .parent = TYPE_CPU, 740 .instance_size = sizeof(LoongArchCPU), 741 .instance_init = loongarch_cpu_init, 742 743 .abstract = true, 744 .class_size = sizeof(LoongArchCPUClass), 745 .class_init = loongarch_cpu_class_init, 746 }, 747 DEFINE_LOONGARCH_CPU_TYPE("la464", loongarch_la464_initfn), 748 }; 749 750 DEFINE_TYPES(loongarch_cpu_type_infos) 751 752 static void loongarch_cpu_add_definition(gpointer data, gpointer user_data) 753 { 754 ObjectClass *oc = data; 755 CpuDefinitionInfoList **cpu_list = user_data; 756 CpuDefinitionInfo *info = g_new0(CpuDefinitionInfo, 1); 757 const char *typename = object_class_get_name(oc); 758 759 info->name = g_strndup(typename, 760 strlen(typename) - strlen("-" TYPE_LOONGARCH_CPU)); 761 info->q_typename = g_strdup(typename); 762 763 QAPI_LIST_PREPEND(*cpu_list, info); 764 } 765 766 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 767 { 768 CpuDefinitionInfoList *cpu_list = NULL; 769 GSList *list; 770 771 list = object_class_get_list(TYPE_LOONGARCH_CPU, false); 772 g_slist_foreach(list, loongarch_cpu_add_definition, &cpu_list); 773 g_slist_free(list); 774 775 return cpu_list; 776 } 777