1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * QEMU LoongArch CPU 4 * 5 * Copyright (c) 2021 Loongson Technology Corporation Limited 6 */ 7 8 #include "qemu/osdep.h" 9 #include "qemu/log.h" 10 #include "qemu/qemu-print.h" 11 #include "qapi/error.h" 12 #include "qemu/module.h" 13 #include "sysemu/qtest.h" 14 #include "exec/cpu_ldst.h" 15 #include "exec/exec-all.h" 16 #include "cpu.h" 17 #include "internals.h" 18 #include "fpu/softfloat-helpers.h" 19 #include "cpu-csr.h" 20 #include "sysemu/reset.h" 21 #include "tcg/tcg.h" 22 #include "vec.h" 23 24 const char * const regnames[32] = { 25 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 26 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 27 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 28 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", 29 }; 30 31 const char * const fregnames[32] = { 32 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", 33 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", 34 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", 35 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", 36 }; 37 38 static const char * const excp_names[] = { 39 [EXCCODE_INT] = "Interrupt", 40 [EXCCODE_PIL] = "Page invalid exception for load", 41 [EXCCODE_PIS] = "Page invalid exception for store", 42 [EXCCODE_PIF] = "Page invalid exception for fetch", 43 [EXCCODE_PME] = "Page modified exception", 44 [EXCCODE_PNR] = "Page Not Readable exception", 45 [EXCCODE_PNX] = "Page Not Executable exception", 46 [EXCCODE_PPI] = "Page Privilege error", 47 [EXCCODE_ADEF] = "Address error for instruction fetch", 48 [EXCCODE_ADEM] = "Address error for Memory access", 49 [EXCCODE_SYS] = "Syscall", 50 [EXCCODE_BRK] = "Break", 51 [EXCCODE_INE] = "Instruction Non-Existent", 52 [EXCCODE_IPE] = "Instruction privilege error", 53 [EXCCODE_FPD] = "Floating Point Disabled", 54 [EXCCODE_FPE] = "Floating Point Exception", 55 [EXCCODE_DBP] = "Debug breakpoint", 56 [EXCCODE_BCE] = "Bound Check Exception", 57 [EXCCODE_SXD] = "128 bit vector instructions Disable exception", 58 [EXCCODE_ASXD] = "256 bit vector instructions Disable exception", 59 }; 60 61 const char *loongarch_exception_name(int32_t exception) 62 { 63 assert(excp_names[exception]); 64 return excp_names[exception]; 65 } 66 67 void G_NORETURN do_raise_exception(CPULoongArchState *env, 68 uint32_t exception, 69 uintptr_t pc) 70 { 71 CPUState *cs = env_cpu(env); 72 73 qemu_log_mask(CPU_LOG_INT, "%s: %d (%s)\n", 74 __func__, 75 exception, 76 loongarch_exception_name(exception)); 77 cs->exception_index = exception; 78 79 cpu_loop_exit_restore(cs, pc); 80 } 81 82 static void loongarch_cpu_set_pc(CPUState *cs, vaddr value) 83 { 84 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 85 CPULoongArchState *env = &cpu->env; 86 87 set_pc(env, value); 88 } 89 90 static vaddr loongarch_cpu_get_pc(CPUState *cs) 91 { 92 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 93 CPULoongArchState *env = &cpu->env; 94 95 return env->pc; 96 } 97 98 #ifndef CONFIG_USER_ONLY 99 #include "hw/loongarch/virt.h" 100 101 void loongarch_cpu_set_irq(void *opaque, int irq, int level) 102 { 103 LoongArchCPU *cpu = opaque; 104 CPULoongArchState *env = &cpu->env; 105 CPUState *cs = CPU(cpu); 106 107 if (irq < 0 || irq >= N_IRQS) { 108 return; 109 } 110 111 env->CSR_ESTAT = deposit64(env->CSR_ESTAT, irq, 1, level != 0); 112 113 if (FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS)) { 114 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 115 } else { 116 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 117 } 118 } 119 120 static inline bool cpu_loongarch_hw_interrupts_enabled(CPULoongArchState *env) 121 { 122 bool ret = 0; 123 124 ret = (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE) && 125 !(FIELD_EX64(env->CSR_DBG, CSR_DBG, DST))); 126 127 return ret; 128 } 129 130 /* Check if there is pending and not masked out interrupt */ 131 static inline bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env) 132 { 133 uint32_t pending; 134 uint32_t status; 135 136 pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS); 137 status = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE); 138 139 return (pending & status) != 0; 140 } 141 142 static void loongarch_cpu_do_interrupt(CPUState *cs) 143 { 144 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 145 CPULoongArchState *env = &cpu->env; 146 bool update_badinstr = 1; 147 int cause = -1; 148 const char *name; 149 bool tlbfill = FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR); 150 uint32_t vec_size = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, VS); 151 152 if (cs->exception_index != EXCCODE_INT) { 153 if (cs->exception_index < 0 || 154 cs->exception_index >= ARRAY_SIZE(excp_names)) { 155 name = "unknown"; 156 } else { 157 name = excp_names[cs->exception_index]; 158 } 159 160 qemu_log_mask(CPU_LOG_INT, 161 "%s enter: pc " TARGET_FMT_lx " ERA " TARGET_FMT_lx 162 " TLBRERA " TARGET_FMT_lx " %s exception\n", __func__, 163 env->pc, env->CSR_ERA, env->CSR_TLBRERA, name); 164 } 165 166 switch (cs->exception_index) { 167 case EXCCODE_DBP: 168 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DCL, 1); 169 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, ECODE, 0xC); 170 goto set_DERA; 171 set_DERA: 172 env->CSR_DERA = env->pc; 173 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DST, 1); 174 set_pc(env, env->CSR_EENTRY + 0x480); 175 break; 176 case EXCCODE_INT: 177 if (FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) { 178 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DEI, 1); 179 goto set_DERA; 180 } 181 QEMU_FALLTHROUGH; 182 case EXCCODE_PIF: 183 case EXCCODE_ADEF: 184 cause = cs->exception_index; 185 update_badinstr = 0; 186 break; 187 case EXCCODE_SYS: 188 case EXCCODE_BRK: 189 case EXCCODE_INE: 190 case EXCCODE_IPE: 191 case EXCCODE_FPD: 192 case EXCCODE_FPE: 193 case EXCCODE_SXD: 194 case EXCCODE_ASXD: 195 env->CSR_BADV = env->pc; 196 QEMU_FALLTHROUGH; 197 case EXCCODE_BCE: 198 case EXCCODE_ADEM: 199 case EXCCODE_PIL: 200 case EXCCODE_PIS: 201 case EXCCODE_PME: 202 case EXCCODE_PNR: 203 case EXCCODE_PNX: 204 case EXCCODE_PPI: 205 cause = cs->exception_index; 206 break; 207 default: 208 qemu_log("Error: exception(%d) has not been supported\n", 209 cs->exception_index); 210 abort(); 211 } 212 213 if (update_badinstr) { 214 env->CSR_BADI = cpu_ldl_code(env, env->pc); 215 } 216 217 /* Save PLV and IE */ 218 if (tlbfill) { 219 env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PPLV, 220 FIELD_EX64(env->CSR_CRMD, 221 CSR_CRMD, PLV)); 222 env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PIE, 223 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE)); 224 /* set the DA mode */ 225 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1); 226 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0); 227 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, 228 PC, (env->pc >> 2)); 229 } else { 230 env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ECODE, 231 EXCODE_MCODE(cause)); 232 env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ESUBCODE, 233 EXCODE_SUBCODE(cause)); 234 env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PPLV, 235 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV)); 236 env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PIE, 237 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE)); 238 env->CSR_ERA = env->pc; 239 } 240 241 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0); 242 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0); 243 244 if (vec_size) { 245 vec_size = (1 << vec_size) * 4; 246 } 247 248 if (cs->exception_index == EXCCODE_INT) { 249 /* Interrupt */ 250 uint32_t vector = 0; 251 uint32_t pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS); 252 pending &= FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE); 253 254 /* Find the highest-priority interrupt. */ 255 vector = 31 - clz32(pending); 256 set_pc(env, env->CSR_EENTRY + \ 257 (EXCCODE_EXTERNAL_INT + vector) * vec_size); 258 qemu_log_mask(CPU_LOG_INT, 259 "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx 260 " cause %d\n" " A " TARGET_FMT_lx " D " 261 TARGET_FMT_lx " vector = %d ExC " TARGET_FMT_lx "ExS" 262 TARGET_FMT_lx "\n", 263 __func__, env->pc, env->CSR_ERA, 264 cause, env->CSR_BADV, env->CSR_DERA, vector, 265 env->CSR_ECFG, env->CSR_ESTAT); 266 } else { 267 if (tlbfill) { 268 set_pc(env, env->CSR_TLBRENTRY); 269 } else { 270 set_pc(env, env->CSR_EENTRY + EXCODE_MCODE(cause) * vec_size); 271 } 272 qemu_log_mask(CPU_LOG_INT, 273 "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx 274 " cause %d%s\n, ESTAT " TARGET_FMT_lx 275 " EXCFG " TARGET_FMT_lx " BADVA " TARGET_FMT_lx 276 "BADI " TARGET_FMT_lx " SYS_NUM " TARGET_FMT_lu 277 " cpu %d asid " TARGET_FMT_lx "\n", __func__, env->pc, 278 tlbfill ? env->CSR_TLBRERA : env->CSR_ERA, 279 cause, tlbfill ? "(refill)" : "", env->CSR_ESTAT, 280 env->CSR_ECFG, 281 tlbfill ? env->CSR_TLBRBADV : env->CSR_BADV, 282 env->CSR_BADI, env->gpr[11], cs->cpu_index, 283 env->CSR_ASID); 284 } 285 cs->exception_index = -1; 286 } 287 288 static void loongarch_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 289 vaddr addr, unsigned size, 290 MMUAccessType access_type, 291 int mmu_idx, MemTxAttrs attrs, 292 MemTxResult response, 293 uintptr_t retaddr) 294 { 295 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 296 CPULoongArchState *env = &cpu->env; 297 298 if (access_type == MMU_INST_FETCH) { 299 do_raise_exception(env, EXCCODE_ADEF, retaddr); 300 } else { 301 do_raise_exception(env, EXCCODE_ADEM, retaddr); 302 } 303 } 304 305 static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 306 { 307 if (interrupt_request & CPU_INTERRUPT_HARD) { 308 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 309 CPULoongArchState *env = &cpu->env; 310 311 if (cpu_loongarch_hw_interrupts_enabled(env) && 312 cpu_loongarch_hw_interrupts_pending(env)) { 313 /* Raise it */ 314 cs->exception_index = EXCCODE_INT; 315 loongarch_cpu_do_interrupt(cs); 316 return true; 317 } 318 } 319 return false; 320 } 321 #endif 322 323 #ifdef CONFIG_TCG 324 static void loongarch_cpu_synchronize_from_tb(CPUState *cs, 325 const TranslationBlock *tb) 326 { 327 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 328 CPULoongArchState *env = &cpu->env; 329 330 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 331 set_pc(env, tb->pc); 332 } 333 334 static void loongarch_restore_state_to_opc(CPUState *cs, 335 const TranslationBlock *tb, 336 const uint64_t *data) 337 { 338 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 339 CPULoongArchState *env = &cpu->env; 340 341 set_pc(env, data[0]); 342 } 343 #endif /* CONFIG_TCG */ 344 345 static bool loongarch_cpu_has_work(CPUState *cs) 346 { 347 #ifdef CONFIG_USER_ONLY 348 return true; 349 #else 350 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 351 CPULoongArchState *env = &cpu->env; 352 bool has_work = false; 353 354 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && 355 cpu_loongarch_hw_interrupts_pending(env)) { 356 has_work = true; 357 } 358 359 return has_work; 360 #endif 361 } 362 363 static void loongarch_la464_initfn(Object *obj) 364 { 365 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 366 CPULoongArchState *env = &cpu->env; 367 int i; 368 369 for (i = 0; i < 21; i++) { 370 env->cpucfg[i] = 0x0; 371 } 372 373 cpu->dtb_compatible = "loongarch,Loongson-3A5000"; 374 env->cpucfg[0] = 0x14c010; /* PRID */ 375 376 uint32_t data = 0; 377 data = FIELD_DP32(data, CPUCFG1, ARCH, 2); 378 data = FIELD_DP32(data, CPUCFG1, PGMMU, 1); 379 data = FIELD_DP32(data, CPUCFG1, IOCSR, 1); 380 data = FIELD_DP32(data, CPUCFG1, PALEN, 0x2f); 381 data = FIELD_DP32(data, CPUCFG1, VALEN, 0x2f); 382 data = FIELD_DP32(data, CPUCFG1, UAL, 1); 383 data = FIELD_DP32(data, CPUCFG1, RI, 1); 384 data = FIELD_DP32(data, CPUCFG1, EP, 1); 385 data = FIELD_DP32(data, CPUCFG1, RPLV, 1); 386 data = FIELD_DP32(data, CPUCFG1, HP, 1); 387 data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1); 388 env->cpucfg[1] = data; 389 390 data = 0; 391 data = FIELD_DP32(data, CPUCFG2, FP, 1); 392 data = FIELD_DP32(data, CPUCFG2, FP_SP, 1); 393 data = FIELD_DP32(data, CPUCFG2, FP_DP, 1); 394 data = FIELD_DP32(data, CPUCFG2, FP_VER, 1); 395 data = FIELD_DP32(data, CPUCFG2, LSX, 1), 396 data = FIELD_DP32(data, CPUCFG2, LASX, 1), 397 data = FIELD_DP32(data, CPUCFG2, LLFTP, 1); 398 data = FIELD_DP32(data, CPUCFG2, LLFTP_VER, 1); 399 data = FIELD_DP32(data, CPUCFG2, LSPW, 1); 400 data = FIELD_DP32(data, CPUCFG2, LAM, 1); 401 env->cpucfg[2] = data; 402 403 env->cpucfg[4] = 100 * 1000 * 1000; /* Crystal frequency */ 404 405 data = 0; 406 data = FIELD_DP32(data, CPUCFG5, CC_MUL, 1); 407 data = FIELD_DP32(data, CPUCFG5, CC_DIV, 1); 408 env->cpucfg[5] = data; 409 410 data = 0; 411 data = FIELD_DP32(data, CPUCFG16, L1_IUPRE, 1); 412 data = FIELD_DP32(data, CPUCFG16, L1_DPRE, 1); 413 data = FIELD_DP32(data, CPUCFG16, L2_IUPRE, 1); 414 data = FIELD_DP32(data, CPUCFG16, L2_IUUNIFY, 1); 415 data = FIELD_DP32(data, CPUCFG16, L2_IUPRIV, 1); 416 data = FIELD_DP32(data, CPUCFG16, L3_IUPRE, 1); 417 data = FIELD_DP32(data, CPUCFG16, L3_IUUNIFY, 1); 418 data = FIELD_DP32(data, CPUCFG16, L3_IUINCL, 1); 419 env->cpucfg[16] = data; 420 421 data = 0; 422 data = FIELD_DP32(data, CPUCFG17, L1IU_WAYS, 3); 423 data = FIELD_DP32(data, CPUCFG17, L1IU_SETS, 8); 424 data = FIELD_DP32(data, CPUCFG17, L1IU_SIZE, 6); 425 env->cpucfg[17] = data; 426 427 data = 0; 428 data = FIELD_DP32(data, CPUCFG18, L1D_WAYS, 3); 429 data = FIELD_DP32(data, CPUCFG18, L1D_SETS, 8); 430 data = FIELD_DP32(data, CPUCFG18, L1D_SIZE, 6); 431 env->cpucfg[18] = data; 432 433 data = 0; 434 data = FIELD_DP32(data, CPUCFG19, L2IU_WAYS, 15); 435 data = FIELD_DP32(data, CPUCFG19, L2IU_SETS, 8); 436 data = FIELD_DP32(data, CPUCFG19, L2IU_SIZE, 6); 437 env->cpucfg[19] = data; 438 439 data = 0; 440 data = FIELD_DP32(data, CPUCFG20, L3IU_WAYS, 15); 441 data = FIELD_DP32(data, CPUCFG20, L3IU_SETS, 14); 442 data = FIELD_DP32(data, CPUCFG20, L3IU_SIZE, 6); 443 env->cpucfg[20] = data; 444 445 env->CSR_ASID = FIELD_DP64(0, CSR_ASID, ASIDBITS, 0xa); 446 loongarch_cpu_post_init(obj); 447 } 448 449 static void loongarch_la132_initfn(Object *obj) 450 { 451 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 452 CPULoongArchState *env = &cpu->env; 453 454 int i; 455 456 for (i = 0; i < 21; i++) { 457 env->cpucfg[i] = 0x0; 458 } 459 460 cpu->dtb_compatible = "loongarch,Loongson-1C103"; 461 env->cpucfg[0] = 0x148042; /* PRID */ 462 463 uint32_t data = 0; 464 data = FIELD_DP32(data, CPUCFG1, ARCH, 1); /* LA32 */ 465 data = FIELD_DP32(data, CPUCFG1, PGMMU, 1); 466 data = FIELD_DP32(data, CPUCFG1, IOCSR, 1); 467 data = FIELD_DP32(data, CPUCFG1, PALEN, 0x1f); /* 32 bits */ 468 data = FIELD_DP32(data, CPUCFG1, VALEN, 0x1f); /* 32 bits */ 469 data = FIELD_DP32(data, CPUCFG1, UAL, 1); 470 data = FIELD_DP32(data, CPUCFG1, RI, 0); 471 data = FIELD_DP32(data, CPUCFG1, EP, 0); 472 data = FIELD_DP32(data, CPUCFG1, RPLV, 0); 473 data = FIELD_DP32(data, CPUCFG1, HP, 1); 474 data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1); 475 env->cpucfg[1] = data; 476 } 477 478 static void loongarch_max_initfn(Object *obj) 479 { 480 /* '-cpu max' for TCG: we use cpu la464. */ 481 loongarch_la464_initfn(obj); 482 } 483 484 static void loongarch_cpu_list_entry(gpointer data, gpointer user_data) 485 { 486 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 487 488 qemu_printf("%s\n", typename); 489 } 490 491 void loongarch_cpu_list(void) 492 { 493 GSList *list; 494 list = object_class_get_list_sorted(TYPE_LOONGARCH_CPU, false); 495 g_slist_foreach(list, loongarch_cpu_list_entry, NULL); 496 g_slist_free(list); 497 } 498 499 static void loongarch_cpu_reset_hold(Object *obj) 500 { 501 CPUState *cs = CPU(obj); 502 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 503 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(cpu); 504 CPULoongArchState *env = &cpu->env; 505 506 if (lacc->parent_phases.hold) { 507 lacc->parent_phases.hold(obj); 508 } 509 510 env->fcsr0_mask = FCSR0_M1 | FCSR0_M2 | FCSR0_M3; 511 env->fcsr0 = 0x0; 512 513 int n; 514 /* Set csr registers value after reset */ 515 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0); 516 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0); 517 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1); 518 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0); 519 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATF, 1); 520 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATM, 1); 521 522 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, FPE, 0); 523 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, SXE, 0); 524 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, ASXE, 0); 525 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, BTE, 0); 526 527 env->CSR_MISC = 0; 528 529 env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, VS, 0); 530 env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, LIE, 0); 531 532 env->CSR_ESTAT = env->CSR_ESTAT & (~MAKE_64BIT_MASK(0, 2)); 533 env->CSR_RVACFG = FIELD_DP64(env->CSR_RVACFG, CSR_RVACFG, RBITS, 0); 534 env->CSR_TCFG = FIELD_DP64(env->CSR_TCFG, CSR_TCFG, EN, 0); 535 env->CSR_LLBCTL = FIELD_DP64(env->CSR_LLBCTL, CSR_LLBCTL, KLO, 0); 536 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 0); 537 env->CSR_MERRCTL = FIELD_DP64(env->CSR_MERRCTL, CSR_MERRCTL, ISMERR, 0); 538 539 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, TLB_TYPE, 2); 540 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, MTLB_ENTRY, 63); 541 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_WAYS, 7); 542 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_SETS, 8); 543 544 for (n = 0; n < 4; n++) { 545 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV0, 0); 546 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV1, 0); 547 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV2, 0); 548 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV3, 0); 549 } 550 551 #ifndef CONFIG_USER_ONLY 552 env->pc = 0x1c000000; 553 memset(env->tlb, 0, sizeof(env->tlb)); 554 #endif 555 556 restore_fp_status(env); 557 cs->exception_index = -1; 558 } 559 560 static void loongarch_cpu_disas_set_info(CPUState *s, disassemble_info *info) 561 { 562 info->print_insn = print_insn_loongarch; 563 } 564 565 static void loongarch_cpu_realizefn(DeviceState *dev, Error **errp) 566 { 567 CPUState *cs = CPU(dev); 568 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(dev); 569 Error *local_err = NULL; 570 571 cpu_exec_realizefn(cs, &local_err); 572 if (local_err != NULL) { 573 error_propagate(errp, local_err); 574 return; 575 } 576 577 loongarch_cpu_register_gdb_regs_for_features(cs); 578 579 cpu_reset(cs); 580 qemu_init_vcpu(cs); 581 582 lacc->parent_realize(dev, errp); 583 } 584 585 #ifndef CONFIG_USER_ONLY 586 static void loongarch_qemu_write(void *opaque, hwaddr addr, 587 uint64_t val, unsigned size) 588 { 589 qemu_log_mask(LOG_UNIMP, "[%s]: Unimplemented reg 0x%" HWADDR_PRIx "\n", 590 __func__, addr); 591 } 592 593 static uint64_t loongarch_qemu_read(void *opaque, hwaddr addr, unsigned size) 594 { 595 switch (addr) { 596 case VERSION_REG: 597 return 0x11ULL; 598 case FEATURE_REG: 599 return 1ULL << IOCSRF_MSI | 1ULL << IOCSRF_EXTIOI | 600 1ULL << IOCSRF_CSRIPI; 601 case VENDOR_REG: 602 return 0x6e6f73676e6f6f4cULL; /* "Loongson" */ 603 case CPUNAME_REG: 604 return 0x303030354133ULL; /* "3A5000" */ 605 case MISC_FUNC_REG: 606 return 1ULL << IOCSRM_EXTIOI_EN; 607 } 608 return 0ULL; 609 } 610 611 static const MemoryRegionOps loongarch_qemu_ops = { 612 .read = loongarch_qemu_read, 613 .write = loongarch_qemu_write, 614 .endianness = DEVICE_LITTLE_ENDIAN, 615 .valid = { 616 .min_access_size = 4, 617 .max_access_size = 8, 618 }, 619 .impl = { 620 .min_access_size = 8, 621 .max_access_size = 8, 622 }, 623 }; 624 #endif 625 626 static bool loongarch_get_lsx(Object *obj, Error **errp) 627 { 628 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 629 bool ret; 630 631 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) { 632 ret = true; 633 } else { 634 ret = false; 635 } 636 return ret; 637 } 638 639 static void loongarch_set_lsx(Object *obj, bool value, Error **errp) 640 { 641 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 642 643 if (value) { 644 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 1); 645 } else { 646 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 0); 647 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 0); 648 } 649 } 650 651 static bool loongarch_get_lasx(Object *obj, Error **errp) 652 { 653 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 654 bool ret; 655 656 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) { 657 ret = true; 658 } else { 659 ret = false; 660 } 661 return ret; 662 } 663 664 static void loongarch_set_lasx(Object *obj, bool value, Error **errp) 665 { 666 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 667 668 if (value) { 669 if (!FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) { 670 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 1); 671 } 672 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 1); 673 } else { 674 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 0); 675 } 676 } 677 678 void loongarch_cpu_post_init(Object *obj) 679 { 680 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 681 682 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) { 683 object_property_add_bool(obj, "lsx", loongarch_get_lsx, 684 loongarch_set_lsx); 685 } 686 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) { 687 object_property_add_bool(obj, "lasx", loongarch_get_lasx, 688 loongarch_set_lasx); 689 } 690 } 691 692 static void loongarch_cpu_init(Object *obj) 693 { 694 #ifndef CONFIG_USER_ONLY 695 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 696 CPULoongArchState *env = &cpu->env; 697 698 qdev_init_gpio_in(DEVICE(cpu), loongarch_cpu_set_irq, N_IRQS); 699 timer_init_ns(&cpu->timer, QEMU_CLOCK_VIRTUAL, 700 &loongarch_constant_timer_cb, cpu); 701 memory_region_init_io(&env->system_iocsr, OBJECT(cpu), NULL, 702 env, "iocsr", UINT64_MAX); 703 address_space_init(&env->address_space_iocsr, &env->system_iocsr, "IOCSR"); 704 memory_region_init_io(&env->iocsr_mem, OBJECT(cpu), &loongarch_qemu_ops, 705 NULL, "iocsr_misc", 0x428); 706 memory_region_add_subregion(&env->system_iocsr, 0, &env->iocsr_mem); 707 #endif 708 } 709 710 static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model) 711 { 712 ObjectClass *oc; 713 714 oc = object_class_by_name(cpu_model); 715 if (!oc) { 716 g_autofree char *typename 717 = g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model); 718 oc = object_class_by_name(typename); 719 if (!oc) { 720 return NULL; 721 } 722 } 723 724 if (object_class_dynamic_cast(oc, TYPE_LOONGARCH_CPU) 725 && !object_class_is_abstract(oc)) { 726 return oc; 727 } 728 return NULL; 729 } 730 731 void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) 732 { 733 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 734 CPULoongArchState *env = &cpu->env; 735 int i; 736 737 qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc); 738 qemu_fprintf(f, " FCSR0 0x%08x fp_status 0x%02x\n", env->fcsr0, 739 get_float_exception_flags(&env->fp_status)); 740 741 /* gpr */ 742 for (i = 0; i < 32; i++) { 743 if ((i & 3) == 0) { 744 qemu_fprintf(f, " GPR%02d:", i); 745 } 746 qemu_fprintf(f, " %s %016" PRIx64, regnames[i], env->gpr[i]); 747 if ((i & 3) == 3) { 748 qemu_fprintf(f, "\n"); 749 } 750 } 751 752 qemu_fprintf(f, "CRMD=%016" PRIx64 "\n", env->CSR_CRMD); 753 qemu_fprintf(f, "PRMD=%016" PRIx64 "\n", env->CSR_PRMD); 754 qemu_fprintf(f, "EUEN=%016" PRIx64 "\n", env->CSR_EUEN); 755 qemu_fprintf(f, "ESTAT=%016" PRIx64 "\n", env->CSR_ESTAT); 756 qemu_fprintf(f, "ERA=%016" PRIx64 "\n", env->CSR_ERA); 757 qemu_fprintf(f, "BADV=%016" PRIx64 "\n", env->CSR_BADV); 758 qemu_fprintf(f, "BADI=%016" PRIx64 "\n", env->CSR_BADI); 759 qemu_fprintf(f, "EENTRY=%016" PRIx64 "\n", env->CSR_EENTRY); 760 qemu_fprintf(f, "PRCFG1=%016" PRIx64 ", PRCFG2=%016" PRIx64 "," 761 " PRCFG3=%016" PRIx64 "\n", 762 env->CSR_PRCFG1, env->CSR_PRCFG3, env->CSR_PRCFG3); 763 qemu_fprintf(f, "TLBRENTRY=%016" PRIx64 "\n", env->CSR_TLBRENTRY); 764 qemu_fprintf(f, "TLBRBADV=%016" PRIx64 "\n", env->CSR_TLBRBADV); 765 qemu_fprintf(f, "TLBRERA=%016" PRIx64 "\n", env->CSR_TLBRERA); 766 767 /* fpr */ 768 if (flags & CPU_DUMP_FPU) { 769 for (i = 0; i < 32; i++) { 770 qemu_fprintf(f, " %s %016" PRIx64, fregnames[i], env->fpr[i].vreg.D(0)); 771 if ((i & 3) == 3) { 772 qemu_fprintf(f, "\n"); 773 } 774 } 775 } 776 } 777 778 #ifdef CONFIG_TCG 779 #include "hw/core/tcg-cpu-ops.h" 780 781 static struct TCGCPUOps loongarch_tcg_ops = { 782 .initialize = loongarch_translate_init, 783 .synchronize_from_tb = loongarch_cpu_synchronize_from_tb, 784 .restore_state_to_opc = loongarch_restore_state_to_opc, 785 786 #ifndef CONFIG_USER_ONLY 787 .tlb_fill = loongarch_cpu_tlb_fill, 788 .cpu_exec_interrupt = loongarch_cpu_exec_interrupt, 789 .do_interrupt = loongarch_cpu_do_interrupt, 790 .do_transaction_failed = loongarch_cpu_do_transaction_failed, 791 #endif 792 }; 793 #endif /* CONFIG_TCG */ 794 795 #ifndef CONFIG_USER_ONLY 796 #include "hw/core/sysemu-cpu-ops.h" 797 798 static const struct SysemuCPUOps loongarch_sysemu_ops = { 799 .get_phys_page_debug = loongarch_cpu_get_phys_page_debug, 800 }; 801 802 static int64_t loongarch_cpu_get_arch_id(CPUState *cs) 803 { 804 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 805 806 return cpu->phy_id; 807 } 808 #endif 809 810 static void loongarch_cpu_class_init(ObjectClass *c, void *data) 811 { 812 LoongArchCPUClass *lacc = LOONGARCH_CPU_CLASS(c); 813 CPUClass *cc = CPU_CLASS(c); 814 DeviceClass *dc = DEVICE_CLASS(c); 815 ResettableClass *rc = RESETTABLE_CLASS(c); 816 817 device_class_set_parent_realize(dc, loongarch_cpu_realizefn, 818 &lacc->parent_realize); 819 resettable_class_set_parent_phases(rc, NULL, loongarch_cpu_reset_hold, NULL, 820 &lacc->parent_phases); 821 822 cc->class_by_name = loongarch_cpu_class_by_name; 823 cc->has_work = loongarch_cpu_has_work; 824 cc->dump_state = loongarch_cpu_dump_state; 825 cc->set_pc = loongarch_cpu_set_pc; 826 cc->get_pc = loongarch_cpu_get_pc; 827 #ifndef CONFIG_USER_ONLY 828 cc->get_arch_id = loongarch_cpu_get_arch_id; 829 dc->vmsd = &vmstate_loongarch_cpu; 830 cc->sysemu_ops = &loongarch_sysemu_ops; 831 #endif 832 cc->disas_set_info = loongarch_cpu_disas_set_info; 833 cc->gdb_read_register = loongarch_cpu_gdb_read_register; 834 cc->gdb_write_register = loongarch_cpu_gdb_write_register; 835 cc->gdb_stop_before_watchpoint = true; 836 837 #ifdef CONFIG_TCG 838 cc->tcg_ops = &loongarch_tcg_ops; 839 #endif 840 } 841 842 static const gchar *loongarch32_gdb_arch_name(CPUState *cs) 843 { 844 return "loongarch32"; 845 } 846 847 static void loongarch32_cpu_class_init(ObjectClass *c, void *data) 848 { 849 CPUClass *cc = CPU_CLASS(c); 850 851 cc->gdb_num_core_regs = 35; 852 cc->gdb_core_xml_file = "loongarch-base32.xml"; 853 cc->gdb_arch_name = loongarch32_gdb_arch_name; 854 } 855 856 static const gchar *loongarch64_gdb_arch_name(CPUState *cs) 857 { 858 return "loongarch64"; 859 } 860 861 static void loongarch64_cpu_class_init(ObjectClass *c, void *data) 862 { 863 CPUClass *cc = CPU_CLASS(c); 864 865 cc->gdb_num_core_regs = 35; 866 cc->gdb_core_xml_file = "loongarch-base64.xml"; 867 cc->gdb_arch_name = loongarch64_gdb_arch_name; 868 } 869 870 #define DEFINE_LOONGARCH_CPU_TYPE(size, model, initfn) \ 871 { \ 872 .parent = TYPE_LOONGARCH##size##_CPU, \ 873 .instance_init = initfn, \ 874 .name = LOONGARCH_CPU_TYPE_NAME(model), \ 875 } 876 877 static const TypeInfo loongarch_cpu_type_infos[] = { 878 { 879 .name = TYPE_LOONGARCH_CPU, 880 .parent = TYPE_CPU, 881 .instance_size = sizeof(LoongArchCPU), 882 .instance_align = __alignof(LoongArchCPU), 883 .instance_init = loongarch_cpu_init, 884 885 .abstract = true, 886 .class_size = sizeof(LoongArchCPUClass), 887 .class_init = loongarch_cpu_class_init, 888 }, 889 { 890 .name = TYPE_LOONGARCH32_CPU, 891 .parent = TYPE_LOONGARCH_CPU, 892 893 .abstract = true, 894 .class_init = loongarch32_cpu_class_init, 895 }, 896 { 897 .name = TYPE_LOONGARCH64_CPU, 898 .parent = TYPE_LOONGARCH_CPU, 899 900 .abstract = true, 901 .class_init = loongarch64_cpu_class_init, 902 }, 903 DEFINE_LOONGARCH_CPU_TYPE(64, "la464", loongarch_la464_initfn), 904 DEFINE_LOONGARCH_CPU_TYPE(32, "la132", loongarch_la132_initfn), 905 DEFINE_LOONGARCH_CPU_TYPE(64, "max", loongarch_max_initfn), 906 }; 907 908 DEFINE_TYPES(loongarch_cpu_type_infos) 909