1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * QEMU LoongArch CPU 4 * 5 * Copyright (c) 2021 Loongson Technology Corporation Limited 6 */ 7 8 #include "qemu/osdep.h" 9 #include "qemu/log.h" 10 #include "qemu/qemu-print.h" 11 #include "qapi/error.h" 12 #include "qemu/module.h" 13 #include "sysemu/qtest.h" 14 #include "exec/exec-all.h" 15 #include "qapi/qapi-commands-machine-target.h" 16 #include "cpu.h" 17 #include "internals.h" 18 #include "fpu/softfloat-helpers.h" 19 #include "cpu-csr.h" 20 #include "sysemu/reset.h" 21 22 const char * const regnames[32] = { 23 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 24 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 25 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 26 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", 27 }; 28 29 const char * const fregnames[32] = { 30 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", 31 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", 32 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", 33 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", 34 }; 35 36 static const char * const excp_names[] = { 37 [EXCCODE_INT] = "Interrupt", 38 [EXCCODE_PIL] = "Page invalid exception for load", 39 [EXCCODE_PIS] = "Page invalid exception for store", 40 [EXCCODE_PIF] = "Page invalid exception for fetch", 41 [EXCCODE_PME] = "Page modified exception", 42 [EXCCODE_PNR] = "Page Not Readable exception", 43 [EXCCODE_PNX] = "Page Not Executable exception", 44 [EXCCODE_PPI] = "Page Privilege error", 45 [EXCCODE_ADEF] = "Address error for instruction fetch", 46 [EXCCODE_ADEM] = "Address error for Memory access", 47 [EXCCODE_SYS] = "Syscall", 48 [EXCCODE_BRK] = "Break", 49 [EXCCODE_INE] = "Instruction Non-Existent", 50 [EXCCODE_IPE] = "Instruction privilege error", 51 [EXCCODE_FPE] = "Floating Point Exception", 52 [EXCCODE_DBP] = "Debug breakpoint", 53 [EXCCODE_BCE] = "Bound Check Exception", 54 }; 55 56 const char *loongarch_exception_name(int32_t exception) 57 { 58 assert(excp_names[exception]); 59 return excp_names[exception]; 60 } 61 62 void G_NORETURN do_raise_exception(CPULoongArchState *env, 63 uint32_t exception, 64 uintptr_t pc) 65 { 66 CPUState *cs = env_cpu(env); 67 68 qemu_log_mask(CPU_LOG_INT, "%s: %d (%s)\n", 69 __func__, 70 exception, 71 loongarch_exception_name(exception)); 72 cs->exception_index = exception; 73 74 cpu_loop_exit_restore(cs, pc); 75 } 76 77 static void loongarch_cpu_set_pc(CPUState *cs, vaddr value) 78 { 79 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 80 CPULoongArchState *env = &cpu->env; 81 82 env->pc = value; 83 } 84 85 #ifndef CONFIG_USER_ONLY 86 #include "hw/loongarch/virt.h" 87 88 void loongarch_cpu_set_irq(void *opaque, int irq, int level) 89 { 90 LoongArchCPU *cpu = opaque; 91 CPULoongArchState *env = &cpu->env; 92 CPUState *cs = CPU(cpu); 93 94 if (irq < 0 || irq >= N_IRQS) { 95 return; 96 } 97 98 env->CSR_ESTAT = deposit64(env->CSR_ESTAT, irq, 1, level != 0); 99 100 if (FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS)) { 101 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 102 } else { 103 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); 104 } 105 } 106 107 static inline bool cpu_loongarch_hw_interrupts_enabled(CPULoongArchState *env) 108 { 109 bool ret = 0; 110 111 ret = (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE) && 112 !(FIELD_EX64(env->CSR_DBG, CSR_DBG, DST))); 113 114 return ret; 115 } 116 117 /* Check if there is pending and not masked out interrupt */ 118 static inline bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env) 119 { 120 uint32_t pending; 121 uint32_t status; 122 bool r; 123 124 pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS); 125 status = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE); 126 127 r = (pending & status) != 0; 128 return r; 129 } 130 131 static void loongarch_cpu_do_interrupt(CPUState *cs) 132 { 133 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 134 CPULoongArchState *env = &cpu->env; 135 bool update_badinstr = 1; 136 int cause = -1; 137 const char *name; 138 bool tlbfill = FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR); 139 uint32_t vec_size = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, VS); 140 141 if (cs->exception_index != EXCCODE_INT) { 142 if (cs->exception_index < 0 || 143 cs->exception_index > ARRAY_SIZE(excp_names)) { 144 name = "unknown"; 145 } else { 146 name = excp_names[cs->exception_index]; 147 } 148 149 qemu_log_mask(CPU_LOG_INT, 150 "%s enter: pc " TARGET_FMT_lx " ERA " TARGET_FMT_lx 151 " TLBRERA " TARGET_FMT_lx " %s exception\n", __func__, 152 env->pc, env->CSR_ERA, env->CSR_TLBRERA, name); 153 } 154 155 switch (cs->exception_index) { 156 case EXCCODE_DBP: 157 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DCL, 1); 158 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, ECODE, 0xC); 159 goto set_DERA; 160 set_DERA: 161 env->CSR_DERA = env->pc; 162 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DST, 1); 163 env->pc = env->CSR_EENTRY + 0x480; 164 break; 165 case EXCCODE_INT: 166 if (FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) { 167 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DEI, 1); 168 goto set_DERA; 169 } 170 QEMU_FALLTHROUGH; 171 case EXCCODE_PIF: 172 cause = cs->exception_index; 173 update_badinstr = 0; 174 break; 175 case EXCCODE_SYS: 176 case EXCCODE_BRK: 177 case EXCCODE_INE: 178 case EXCCODE_IPE: 179 case EXCCODE_FPE: 180 case EXCCODE_BCE: 181 env->CSR_BADV = env->pc; 182 QEMU_FALLTHROUGH; 183 case EXCCODE_ADEM: 184 case EXCCODE_PIL: 185 case EXCCODE_PIS: 186 case EXCCODE_PME: 187 case EXCCODE_PNR: 188 case EXCCODE_PNX: 189 case EXCCODE_PPI: 190 cause = cs->exception_index; 191 break; 192 default: 193 qemu_log("Error: exception(%d) '%s' has not been supported\n", 194 cs->exception_index, excp_names[cs->exception_index]); 195 abort(); 196 } 197 198 if (update_badinstr) { 199 env->CSR_BADI = cpu_ldl_code(env, env->pc); 200 } 201 202 /* Save PLV and IE */ 203 if (tlbfill) { 204 env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PPLV, 205 FIELD_EX64(env->CSR_CRMD, 206 CSR_CRMD, PLV)); 207 env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PIE, 208 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE)); 209 /* set the DA mode */ 210 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1); 211 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0); 212 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, 213 PC, (env->pc >> 2)); 214 } else { 215 env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ECODE, cause); 216 env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PPLV, 217 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV)); 218 env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PIE, 219 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE)); 220 env->CSR_ERA = env->pc; 221 } 222 223 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0); 224 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0); 225 226 if (cs->exception_index == EXCCODE_INT) { 227 /* Interrupt */ 228 uint32_t vector = 0; 229 uint32_t pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS); 230 pending &= FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE); 231 232 /* Find the highest-priority interrupt. */ 233 vector = 31 - clz32(pending); 234 env->pc = env->CSR_EENTRY + (EXCCODE_EXTERNAL_INT + vector) * vec_size; 235 qemu_log_mask(CPU_LOG_INT, 236 "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx 237 " cause %d\n" " A " TARGET_FMT_lx " D " 238 TARGET_FMT_lx " vector = %d ExC " TARGET_FMT_lx "ExS" 239 TARGET_FMT_lx "\n", 240 __func__, env->pc, env->CSR_ERA, 241 cause, env->CSR_BADV, env->CSR_DERA, vector, 242 env->CSR_ECFG, env->CSR_ESTAT); 243 } else { 244 if (tlbfill) { 245 env->pc = env->CSR_TLBRENTRY; 246 } else { 247 env->pc = env->CSR_EENTRY; 248 env->pc += cause * vec_size; 249 } 250 qemu_log_mask(CPU_LOG_INT, 251 "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx 252 " cause %d%s\n, ESTAT " TARGET_FMT_lx 253 " EXCFG " TARGET_FMT_lx " BADVA " TARGET_FMT_lx 254 "BADI " TARGET_FMT_lx " SYS_NUM " TARGET_FMT_lu 255 " cpu %d asid " TARGET_FMT_lx "\n", __func__, env->pc, 256 tlbfill ? env->CSR_TLBRERA : env->CSR_ERA, 257 cause, tlbfill ? "(refill)" : "", env->CSR_ESTAT, 258 env->CSR_ECFG, 259 tlbfill ? env->CSR_TLBRBADV : env->CSR_BADV, 260 env->CSR_BADI, env->gpr[11], cs->cpu_index, 261 env->CSR_ASID); 262 } 263 cs->exception_index = -1; 264 } 265 266 static void loongarch_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 267 vaddr addr, unsigned size, 268 MMUAccessType access_type, 269 int mmu_idx, MemTxAttrs attrs, 270 MemTxResult response, 271 uintptr_t retaddr) 272 { 273 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 274 CPULoongArchState *env = &cpu->env; 275 276 if (access_type == MMU_INST_FETCH) { 277 do_raise_exception(env, EXCCODE_ADEF, retaddr); 278 } else { 279 do_raise_exception(env, EXCCODE_ADEM, retaddr); 280 } 281 } 282 283 static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 284 { 285 if (interrupt_request & CPU_INTERRUPT_HARD) { 286 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 287 CPULoongArchState *env = &cpu->env; 288 289 if (cpu_loongarch_hw_interrupts_enabled(env) && 290 cpu_loongarch_hw_interrupts_pending(env)) { 291 /* Raise it */ 292 cs->exception_index = EXCCODE_INT; 293 loongarch_cpu_do_interrupt(cs); 294 return true; 295 } 296 } 297 return false; 298 } 299 #endif 300 301 #ifdef CONFIG_TCG 302 static void loongarch_cpu_synchronize_from_tb(CPUState *cs, 303 const TranslationBlock *tb) 304 { 305 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 306 CPULoongArchState *env = &cpu->env; 307 308 env->pc = tb->pc; 309 } 310 #endif /* CONFIG_TCG */ 311 312 static bool loongarch_cpu_has_work(CPUState *cs) 313 { 314 #ifdef CONFIG_USER_ONLY 315 return true; 316 #else 317 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 318 CPULoongArchState *env = &cpu->env; 319 bool has_work = false; 320 321 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && 322 cpu_loongarch_hw_interrupts_pending(env)) { 323 has_work = true; 324 } 325 326 return has_work; 327 #endif 328 } 329 330 static void loongarch_la464_initfn(Object *obj) 331 { 332 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 333 CPULoongArchState *env = &cpu->env; 334 int i; 335 336 for (i = 0; i < 21; i++) { 337 env->cpucfg[i] = 0x0; 338 } 339 340 env->cpucfg[0] = 0x14c010; /* PRID */ 341 342 uint32_t data = 0; 343 data = FIELD_DP32(data, CPUCFG1, ARCH, 2); 344 data = FIELD_DP32(data, CPUCFG1, PGMMU, 1); 345 data = FIELD_DP32(data, CPUCFG1, IOCSR, 1); 346 data = FIELD_DP32(data, CPUCFG1, PALEN, 0x2f); 347 data = FIELD_DP32(data, CPUCFG1, VALEN, 0x2f); 348 data = FIELD_DP32(data, CPUCFG1, UAL, 1); 349 data = FIELD_DP32(data, CPUCFG1, RI, 1); 350 data = FIELD_DP32(data, CPUCFG1, EP, 1); 351 data = FIELD_DP32(data, CPUCFG1, RPLV, 1); 352 data = FIELD_DP32(data, CPUCFG1, HP, 1); 353 data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1); 354 env->cpucfg[1] = data; 355 356 data = 0; 357 data = FIELD_DP32(data, CPUCFG2, FP, 1); 358 data = FIELD_DP32(data, CPUCFG2, FP_SP, 1); 359 data = FIELD_DP32(data, CPUCFG2, FP_DP, 1); 360 data = FIELD_DP32(data, CPUCFG2, FP_VER, 1); 361 data = FIELD_DP32(data, CPUCFG2, LLFTP, 1); 362 data = FIELD_DP32(data, CPUCFG2, LLFTP_VER, 1); 363 data = FIELD_DP32(data, CPUCFG2, LAM, 1); 364 env->cpucfg[2] = data; 365 366 env->cpucfg[4] = 100 * 1000 * 1000; /* Crystal frequency */ 367 368 data = 0; 369 data = FIELD_DP32(data, CPUCFG5, CC_MUL, 1); 370 data = FIELD_DP32(data, CPUCFG5, CC_DIV, 1); 371 env->cpucfg[5] = data; 372 373 data = 0; 374 data = FIELD_DP32(data, CPUCFG16, L1_IUPRE, 1); 375 data = FIELD_DP32(data, CPUCFG16, L1_DPRE, 1); 376 data = FIELD_DP32(data, CPUCFG16, L2_IUPRE, 1); 377 data = FIELD_DP32(data, CPUCFG16, L2_IUUNIFY, 1); 378 data = FIELD_DP32(data, CPUCFG16, L2_IUPRIV, 1); 379 data = FIELD_DP32(data, CPUCFG16, L3_IUPRE, 1); 380 data = FIELD_DP32(data, CPUCFG16, L3_IUUNIFY, 1); 381 data = FIELD_DP32(data, CPUCFG16, L3_IUINCL, 1); 382 env->cpucfg[16] = data; 383 384 data = 0; 385 data = FIELD_DP32(data, CPUCFG17, L1IU_WAYS, 3); 386 data = FIELD_DP32(data, CPUCFG17, L1IU_SETS, 8); 387 data = FIELD_DP32(data, CPUCFG17, L1IU_SIZE, 6); 388 env->cpucfg[17] = data; 389 390 data = 0; 391 data = FIELD_DP32(data, CPUCFG18, L1D_WAYS, 3); 392 data = FIELD_DP32(data, CPUCFG18, L1D_SETS, 8); 393 data = FIELD_DP32(data, CPUCFG18, L1D_SIZE, 6); 394 env->cpucfg[18] = data; 395 396 data = 0; 397 data = FIELD_DP32(data, CPUCFG19, L2IU_WAYS, 15); 398 data = FIELD_DP32(data, CPUCFG19, L2IU_SETS, 8); 399 data = FIELD_DP32(data, CPUCFG19, L2IU_SIZE, 6); 400 env->cpucfg[19] = data; 401 402 data = 0; 403 data = FIELD_DP32(data, CPUCFG20, L3IU_WAYS, 15); 404 data = FIELD_DP32(data, CPUCFG20, L3IU_SETS, 14); 405 data = FIELD_DP32(data, CPUCFG20, L3IU_SETS, 6); 406 env->cpucfg[20] = data; 407 408 env->CSR_ASID = FIELD_DP64(0, CSR_ASID, ASIDBITS, 0xa); 409 } 410 411 static void loongarch_cpu_list_entry(gpointer data, gpointer user_data) 412 { 413 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 414 415 qemu_printf("%s\n", typename); 416 } 417 418 void loongarch_cpu_list(void) 419 { 420 GSList *list; 421 list = object_class_get_list_sorted(TYPE_LOONGARCH_CPU, false); 422 g_slist_foreach(list, loongarch_cpu_list_entry, NULL); 423 g_slist_free(list); 424 } 425 426 static void loongarch_cpu_reset(DeviceState *dev) 427 { 428 CPUState *cs = CPU(dev); 429 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 430 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(cpu); 431 CPULoongArchState *env = &cpu->env; 432 433 lacc->parent_reset(dev); 434 435 env->fcsr0_mask = FCSR0_M1 | FCSR0_M2 | FCSR0_M3; 436 env->fcsr0 = 0x0; 437 438 int n; 439 /* Set csr registers value after reset */ 440 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0); 441 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0); 442 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1); 443 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0); 444 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATF, 1); 445 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATM, 1); 446 447 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, FPE, 0); 448 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, SXE, 0); 449 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, ASXE, 0); 450 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, BTE, 0); 451 452 env->CSR_MISC = 0; 453 454 env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, VS, 0); 455 env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, LIE, 0); 456 457 env->CSR_ESTAT = env->CSR_ESTAT & (~MAKE_64BIT_MASK(0, 2)); 458 env->CSR_RVACFG = FIELD_DP64(env->CSR_RVACFG, CSR_RVACFG, RBITS, 0); 459 env->CSR_TCFG = FIELD_DP64(env->CSR_TCFG, CSR_TCFG, EN, 0); 460 env->CSR_LLBCTL = FIELD_DP64(env->CSR_LLBCTL, CSR_LLBCTL, KLO, 0); 461 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 0); 462 env->CSR_MERRCTL = FIELD_DP64(env->CSR_MERRCTL, CSR_MERRCTL, ISMERR, 0); 463 464 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, TLB_TYPE, 2); 465 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, MTLB_ENTRY, 63); 466 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_WAYS, 7); 467 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_SETS, 8); 468 469 for (n = 0; n < 4; n++) { 470 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV0, 0); 471 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV1, 0); 472 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV2, 0); 473 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV3, 0); 474 } 475 476 #ifndef CONFIG_USER_ONLY 477 env->pc = 0x1c000000; 478 #endif 479 480 restore_fp_status(env); 481 cs->exception_index = -1; 482 } 483 484 static void loongarch_cpu_disas_set_info(CPUState *s, disassemble_info *info) 485 { 486 info->print_insn = print_insn_loongarch; 487 } 488 489 static void loongarch_cpu_realizefn(DeviceState *dev, Error **errp) 490 { 491 CPUState *cs = CPU(dev); 492 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(dev); 493 Error *local_err = NULL; 494 495 cpu_exec_realizefn(cs, &local_err); 496 if (local_err != NULL) { 497 error_propagate(errp, local_err); 498 return; 499 } 500 501 loongarch_cpu_register_gdb_regs_for_features(cs); 502 503 cpu_reset(cs); 504 qemu_init_vcpu(cs); 505 506 lacc->parent_realize(dev, errp); 507 } 508 509 #ifndef CONFIG_USER_ONLY 510 static void loongarch_qemu_write(void *opaque, hwaddr addr, 511 uint64_t val, unsigned size) 512 { 513 } 514 515 static uint64_t loongarch_qemu_read(void *opaque, hwaddr addr, unsigned size) 516 { 517 switch (addr) { 518 case FEATURE_REG: 519 return 1ULL << IOCSRF_MSI | 1ULL << IOCSRF_EXTIOI | 520 1ULL << IOCSRF_CSRIPI; 521 case VENDOR_REG: 522 return 0x6e6f73676e6f6f4cULL; /* "Loongson" */ 523 case CPUNAME_REG: 524 return 0x303030354133ULL; /* "3A5000" */ 525 case MISC_FUNC_REG: 526 return 1ULL << IOCSRM_EXTIOI_EN; 527 } 528 return 0ULL; 529 } 530 531 static const MemoryRegionOps loongarch_qemu_ops = { 532 .read = loongarch_qemu_read, 533 .write = loongarch_qemu_write, 534 .endianness = DEVICE_LITTLE_ENDIAN, 535 .valid = { 536 .min_access_size = 4, 537 .max_access_size = 8, 538 }, 539 .impl = { 540 .min_access_size = 8, 541 .max_access_size = 8, 542 }, 543 }; 544 #endif 545 546 static void loongarch_cpu_init(Object *obj) 547 { 548 LoongArchCPU *cpu = LOONGARCH_CPU(obj); 549 550 cpu_set_cpustate_pointers(cpu); 551 552 #ifndef CONFIG_USER_ONLY 553 CPULoongArchState *env = &cpu->env; 554 qdev_init_gpio_in(DEVICE(cpu), loongarch_cpu_set_irq, N_IRQS); 555 timer_init_ns(&cpu->timer, QEMU_CLOCK_VIRTUAL, 556 &loongarch_constant_timer_cb, cpu); 557 memory_region_init_io(&env->system_iocsr, OBJECT(cpu), NULL, 558 env, "iocsr", UINT64_MAX); 559 address_space_init(&env->address_space_iocsr, &env->system_iocsr, "IOCSR"); 560 memory_region_init_io(&env->iocsr_mem, OBJECT(cpu), &loongarch_qemu_ops, 561 NULL, "iocsr_misc", 0x428); 562 memory_region_add_subregion(&env->system_iocsr, 0, &env->iocsr_mem); 563 #endif 564 } 565 566 static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model) 567 { 568 ObjectClass *oc; 569 char *typename; 570 571 typename = g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model); 572 oc = object_class_by_name(typename); 573 g_free(typename); 574 return oc; 575 } 576 577 void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) 578 { 579 LoongArchCPU *cpu = LOONGARCH_CPU(cs); 580 CPULoongArchState *env = &cpu->env; 581 int i; 582 583 qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc); 584 qemu_fprintf(f, " FCSR0 0x%08x fp_status 0x%02x\n", env->fcsr0, 585 get_float_exception_flags(&env->fp_status)); 586 587 /* gpr */ 588 for (i = 0; i < 32; i++) { 589 if ((i & 3) == 0) { 590 qemu_fprintf(f, " GPR%02d:", i); 591 } 592 qemu_fprintf(f, " %s %016" PRIx64, regnames[i], env->gpr[i]); 593 if ((i & 3) == 3) { 594 qemu_fprintf(f, "\n"); 595 } 596 } 597 598 qemu_fprintf(f, "CRMD=%016" PRIx64 "\n", env->CSR_CRMD); 599 qemu_fprintf(f, "PRMD=%016" PRIx64 "\n", env->CSR_PRMD); 600 qemu_fprintf(f, "EUEN=%016" PRIx64 "\n", env->CSR_EUEN); 601 qemu_fprintf(f, "ESTAT=%016" PRIx64 "\n", env->CSR_ESTAT); 602 qemu_fprintf(f, "ERA=%016" PRIx64 "\n", env->CSR_ERA); 603 qemu_fprintf(f, "BADV=%016" PRIx64 "\n", env->CSR_BADV); 604 qemu_fprintf(f, "BADI=%016" PRIx64 "\n", env->CSR_BADI); 605 qemu_fprintf(f, "EENTRY=%016" PRIx64 "\n", env->CSR_EENTRY); 606 qemu_fprintf(f, "PRCFG1=%016" PRIx64 ", PRCFG2=%016" PRIx64 "," 607 " PRCFG3=%016" PRIx64 "\n", 608 env->CSR_PRCFG1, env->CSR_PRCFG3, env->CSR_PRCFG3); 609 qemu_fprintf(f, "TLBRENTRY=%016" PRIx64 "\n", env->CSR_TLBRENTRY); 610 qemu_fprintf(f, "TLBRBADV=%016" PRIx64 "\n", env->CSR_TLBRBADV); 611 qemu_fprintf(f, "TLBRERA=%016" PRIx64 "\n", env->CSR_TLBRERA); 612 613 /* fpr */ 614 if (flags & CPU_DUMP_FPU) { 615 for (i = 0; i < 32; i++) { 616 qemu_fprintf(f, " %s %016" PRIx64, fregnames[i], env->fpr[i]); 617 if ((i & 3) == 3) { 618 qemu_fprintf(f, "\n"); 619 } 620 } 621 } 622 } 623 624 #ifdef CONFIG_TCG 625 #include "hw/core/tcg-cpu-ops.h" 626 627 static struct TCGCPUOps loongarch_tcg_ops = { 628 .initialize = loongarch_translate_init, 629 .synchronize_from_tb = loongarch_cpu_synchronize_from_tb, 630 631 #ifndef CONFIG_USER_ONLY 632 .tlb_fill = loongarch_cpu_tlb_fill, 633 .cpu_exec_interrupt = loongarch_cpu_exec_interrupt, 634 .do_interrupt = loongarch_cpu_do_interrupt, 635 .do_transaction_failed = loongarch_cpu_do_transaction_failed, 636 #endif 637 }; 638 #endif /* CONFIG_TCG */ 639 640 #ifndef CONFIG_USER_ONLY 641 #include "hw/core/sysemu-cpu-ops.h" 642 643 static const struct SysemuCPUOps loongarch_sysemu_ops = { 644 .get_phys_page_debug = loongarch_cpu_get_phys_page_debug, 645 }; 646 #endif 647 648 static void loongarch_cpu_class_init(ObjectClass *c, void *data) 649 { 650 LoongArchCPUClass *lacc = LOONGARCH_CPU_CLASS(c); 651 CPUClass *cc = CPU_CLASS(c); 652 DeviceClass *dc = DEVICE_CLASS(c); 653 654 device_class_set_parent_realize(dc, loongarch_cpu_realizefn, 655 &lacc->parent_realize); 656 device_class_set_parent_reset(dc, loongarch_cpu_reset, &lacc->parent_reset); 657 658 cc->class_by_name = loongarch_cpu_class_by_name; 659 cc->has_work = loongarch_cpu_has_work; 660 cc->dump_state = loongarch_cpu_dump_state; 661 cc->set_pc = loongarch_cpu_set_pc; 662 #ifndef CONFIG_USER_ONLY 663 dc->vmsd = &vmstate_loongarch_cpu; 664 cc->sysemu_ops = &loongarch_sysemu_ops; 665 #endif 666 cc->disas_set_info = loongarch_cpu_disas_set_info; 667 cc->gdb_read_register = loongarch_cpu_gdb_read_register; 668 cc->gdb_write_register = loongarch_cpu_gdb_write_register; 669 cc->disas_set_info = loongarch_cpu_disas_set_info; 670 cc->gdb_num_core_regs = 34; 671 cc->gdb_core_xml_file = "loongarch-base64.xml"; 672 cc->gdb_stop_before_watchpoint = true; 673 674 #ifdef CONFIG_TCG 675 cc->tcg_ops = &loongarch_tcg_ops; 676 #endif 677 } 678 679 #define DEFINE_LOONGARCH_CPU_TYPE(model, initfn) \ 680 { \ 681 .parent = TYPE_LOONGARCH_CPU, \ 682 .instance_init = initfn, \ 683 .name = LOONGARCH_CPU_TYPE_NAME(model), \ 684 } 685 686 static const TypeInfo loongarch_cpu_type_infos[] = { 687 { 688 .name = TYPE_LOONGARCH_CPU, 689 .parent = TYPE_CPU, 690 .instance_size = sizeof(LoongArchCPU), 691 .instance_init = loongarch_cpu_init, 692 693 .abstract = true, 694 .class_size = sizeof(LoongArchCPUClass), 695 .class_init = loongarch_cpu_class_init, 696 }, 697 DEFINE_LOONGARCH_CPU_TYPE("la464", loongarch_la464_initfn), 698 }; 699 700 DEFINE_TYPES(loongarch_cpu_type_infos) 701 702 static void loongarch_cpu_add_definition(gpointer data, gpointer user_data) 703 { 704 ObjectClass *oc = data; 705 CpuDefinitionInfoList **cpu_list = user_data; 706 CpuDefinitionInfo *info = g_new0(CpuDefinitionInfo, 1); 707 const char *typename = object_class_get_name(oc); 708 709 info->name = g_strndup(typename, 710 strlen(typename) - strlen("-" TYPE_LOONGARCH_CPU)); 711 info->q_typename = g_strdup(typename); 712 713 QAPI_LIST_PREPEND(*cpu_list, info); 714 } 715 716 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 717 { 718 CpuDefinitionInfoList *cpu_list = NULL; 719 GSList *list; 720 721 list = object_class_get_list(TYPE_LOONGARCH_CPU, false); 722 g_slist_foreach(list, loongarch_cpu_add_definition, &cpu_list); 723 g_slist_free(list); 724 725 return cpu_list; 726 } 727