1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * QEMU LoongArch CPU
4 *
5 * Copyright (c) 2021 Loongson Technology Corporation Limited
6 */
7
8 #include "qemu/osdep.h"
9 #include "qemu/log.h"
10 #include "qemu/qemu-print.h"
11 #include "qapi/error.h"
12 #include "qemu/module.h"
13 #include "sysemu/qtest.h"
14 #include "sysemu/tcg.h"
15 #include "sysemu/kvm.h"
16 #include "kvm/kvm_loongarch.h"
17 #include "exec/exec-all.h"
18 #include "cpu.h"
19 #include "internals.h"
20 #include "fpu/softfloat-helpers.h"
21 #include "cpu-csr.h"
22 #ifndef CONFIG_USER_ONLY
23 #include "sysemu/reset.h"
24 #endif
25 #include "vec.h"
26 #ifdef CONFIG_KVM
27 #include <linux/kvm.h>
28 #endif
29 #ifdef CONFIG_TCG
30 #include "exec/cpu_ldst.h"
31 #include "tcg/tcg.h"
32 #endif
33
34 const char * const regnames[32] = {
35 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
36 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
37 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
38 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
39 };
40
41 const char * const fregnames[32] = {
42 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
43 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
44 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
45 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
46 };
47
48 struct TypeExcp {
49 int32_t exccode;
50 const char * const name;
51 };
52
53 static const struct TypeExcp excp_names[] = {
54 {EXCCODE_INT, "Interrupt"},
55 {EXCCODE_PIL, "Page invalid exception for load"},
56 {EXCCODE_PIS, "Page invalid exception for store"},
57 {EXCCODE_PIF, "Page invalid exception for fetch"},
58 {EXCCODE_PME, "Page modified exception"},
59 {EXCCODE_PNR, "Page Not Readable exception"},
60 {EXCCODE_PNX, "Page Not Executable exception"},
61 {EXCCODE_PPI, "Page Privilege error"},
62 {EXCCODE_ADEF, "Address error for instruction fetch"},
63 {EXCCODE_ADEM, "Address error for Memory access"},
64 {EXCCODE_SYS, "Syscall"},
65 {EXCCODE_BRK, "Break"},
66 {EXCCODE_INE, "Instruction Non-Existent"},
67 {EXCCODE_IPE, "Instruction privilege error"},
68 {EXCCODE_FPD, "Floating Point Disabled"},
69 {EXCCODE_FPE, "Floating Point Exception"},
70 {EXCCODE_DBP, "Debug breakpoint"},
71 {EXCCODE_BCE, "Bound Check Exception"},
72 {EXCCODE_SXD, "128 bit vector instructions Disable exception"},
73 {EXCCODE_ASXD, "256 bit vector instructions Disable exception"},
74 {EXCP_HLT, "EXCP_HLT"},
75 };
76
loongarch_exception_name(int32_t exception)77 const char *loongarch_exception_name(int32_t exception)
78 {
79 int i;
80
81 for (i = 0; i < ARRAY_SIZE(excp_names); i++) {
82 if (excp_names[i].exccode == exception) {
83 return excp_names[i].name;
84 }
85 }
86 return "Unknown";
87 }
88
do_raise_exception(CPULoongArchState * env,uint32_t exception,uintptr_t pc)89 void G_NORETURN do_raise_exception(CPULoongArchState *env,
90 uint32_t exception,
91 uintptr_t pc)
92 {
93 CPUState *cs = env_cpu(env);
94
95 qemu_log_mask(CPU_LOG_INT, "%s: exception: %d (%s)\n",
96 __func__,
97 exception,
98 loongarch_exception_name(exception));
99 cs->exception_index = exception;
100
101 cpu_loop_exit_restore(cs, pc);
102 }
103
loongarch_cpu_set_pc(CPUState * cs,vaddr value)104 static void loongarch_cpu_set_pc(CPUState *cs, vaddr value)
105 {
106 set_pc(cpu_env(cs), value);
107 }
108
loongarch_cpu_get_pc(CPUState * cs)109 static vaddr loongarch_cpu_get_pc(CPUState *cs)
110 {
111 return cpu_env(cs)->pc;
112 }
113
114 #ifndef CONFIG_USER_ONLY
115 #include "hw/loongarch/virt.h"
116
loongarch_cpu_set_irq(void * opaque,int irq,int level)117 void loongarch_cpu_set_irq(void *opaque, int irq, int level)
118 {
119 LoongArchCPU *cpu = opaque;
120 CPULoongArchState *env = &cpu->env;
121 CPUState *cs = CPU(cpu);
122
123 if (irq < 0 || irq >= N_IRQS) {
124 return;
125 }
126
127 if (kvm_enabled()) {
128 kvm_loongarch_set_interrupt(cpu, irq, level);
129 } else if (tcg_enabled()) {
130 env->CSR_ESTAT = deposit64(env->CSR_ESTAT, irq, 1, level != 0);
131 if (FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS)) {
132 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
133 } else {
134 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
135 }
136 }
137 }
138
cpu_loongarch_hw_interrupts_enabled(CPULoongArchState * env)139 static inline bool cpu_loongarch_hw_interrupts_enabled(CPULoongArchState *env)
140 {
141 bool ret = 0;
142
143 ret = (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE) &&
144 !(FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)));
145
146 return ret;
147 }
148
149 /* Check if there is pending and not masked out interrupt */
cpu_loongarch_hw_interrupts_pending(CPULoongArchState * env)150 static inline bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env)
151 {
152 uint32_t pending;
153 uint32_t status;
154
155 pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS);
156 status = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE);
157
158 return (pending & status) != 0;
159 }
160 #endif
161
162 #ifdef CONFIG_TCG
163 #ifndef CONFIG_USER_ONLY
loongarch_cpu_do_interrupt(CPUState * cs)164 static void loongarch_cpu_do_interrupt(CPUState *cs)
165 {
166 CPULoongArchState *env = cpu_env(cs);
167 bool update_badinstr = 1;
168 int cause = -1;
169 bool tlbfill = FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR);
170 uint32_t vec_size = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, VS);
171
172 if (cs->exception_index != EXCCODE_INT) {
173 qemu_log_mask(CPU_LOG_INT,
174 "%s enter: pc " TARGET_FMT_lx " ERA " TARGET_FMT_lx
175 " TLBRERA " TARGET_FMT_lx " exception: %d (%s)\n",
176 __func__, env->pc, env->CSR_ERA, env->CSR_TLBRERA,
177 cs->exception_index,
178 loongarch_exception_name(cs->exception_index));
179 }
180
181 switch (cs->exception_index) {
182 case EXCCODE_DBP:
183 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DCL, 1);
184 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, ECODE, 0xC);
185 goto set_DERA;
186 set_DERA:
187 env->CSR_DERA = env->pc;
188 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DST, 1);
189 set_pc(env, env->CSR_EENTRY + 0x480);
190 break;
191 case EXCCODE_INT:
192 if (FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) {
193 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DEI, 1);
194 goto set_DERA;
195 }
196 QEMU_FALLTHROUGH;
197 case EXCCODE_PIF:
198 case EXCCODE_ADEF:
199 cause = cs->exception_index;
200 update_badinstr = 0;
201 break;
202 case EXCCODE_SYS:
203 case EXCCODE_BRK:
204 case EXCCODE_INE:
205 case EXCCODE_IPE:
206 case EXCCODE_FPD:
207 case EXCCODE_FPE:
208 case EXCCODE_SXD:
209 case EXCCODE_ASXD:
210 env->CSR_BADV = env->pc;
211 QEMU_FALLTHROUGH;
212 case EXCCODE_BCE:
213 case EXCCODE_ADEM:
214 case EXCCODE_PIL:
215 case EXCCODE_PIS:
216 case EXCCODE_PME:
217 case EXCCODE_PNR:
218 case EXCCODE_PNX:
219 case EXCCODE_PPI:
220 cause = cs->exception_index;
221 break;
222 default:
223 qemu_log("Error: exception(%d) has not been supported\n",
224 cs->exception_index);
225 abort();
226 }
227
228 if (update_badinstr) {
229 env->CSR_BADI = cpu_ldl_code(env, env->pc);
230 }
231
232 /* Save PLV and IE */
233 if (tlbfill) {
234 env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PPLV,
235 FIELD_EX64(env->CSR_CRMD,
236 CSR_CRMD, PLV));
237 env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PIE,
238 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE));
239 /* set the DA mode */
240 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1);
241 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0);
242 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA,
243 PC, (env->pc >> 2));
244 } else {
245 env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ECODE,
246 EXCODE_MCODE(cause));
247 env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ESUBCODE,
248 EXCODE_SUBCODE(cause));
249 env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PPLV,
250 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV));
251 env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PIE,
252 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE));
253 env->CSR_ERA = env->pc;
254 }
255
256 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0);
257 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0);
258
259 if (vec_size) {
260 vec_size = (1 << vec_size) * 4;
261 }
262
263 if (cs->exception_index == EXCCODE_INT) {
264 /* Interrupt */
265 uint32_t vector = 0;
266 uint32_t pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS);
267 pending &= FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE);
268
269 /* Find the highest-priority interrupt. */
270 vector = 31 - clz32(pending);
271 set_pc(env, env->CSR_EENTRY + \
272 (EXCCODE_EXTERNAL_INT + vector) * vec_size);
273 qemu_log_mask(CPU_LOG_INT,
274 "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx
275 " cause %d\n" " A " TARGET_FMT_lx " D "
276 TARGET_FMT_lx " vector = %d ExC " TARGET_FMT_lx "ExS"
277 TARGET_FMT_lx "\n",
278 __func__, env->pc, env->CSR_ERA,
279 cause, env->CSR_BADV, env->CSR_DERA, vector,
280 env->CSR_ECFG, env->CSR_ESTAT);
281 } else {
282 if (tlbfill) {
283 set_pc(env, env->CSR_TLBRENTRY);
284 } else {
285 set_pc(env, env->CSR_EENTRY + EXCODE_MCODE(cause) * vec_size);
286 }
287 qemu_log_mask(CPU_LOG_INT,
288 "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx
289 " cause %d%s\n, ESTAT " TARGET_FMT_lx
290 " EXCFG " TARGET_FMT_lx " BADVA " TARGET_FMT_lx
291 "BADI " TARGET_FMT_lx " SYS_NUM " TARGET_FMT_lu
292 " cpu %d asid " TARGET_FMT_lx "\n", __func__, env->pc,
293 tlbfill ? env->CSR_TLBRERA : env->CSR_ERA,
294 cause, tlbfill ? "(refill)" : "", env->CSR_ESTAT,
295 env->CSR_ECFG,
296 tlbfill ? env->CSR_TLBRBADV : env->CSR_BADV,
297 env->CSR_BADI, env->gpr[11], cs->cpu_index,
298 env->CSR_ASID);
299 }
300 cs->exception_index = -1;
301 }
302
loongarch_cpu_do_transaction_failed(CPUState * cs,hwaddr physaddr,vaddr addr,unsigned size,MMUAccessType access_type,int mmu_idx,MemTxAttrs attrs,MemTxResult response,uintptr_t retaddr)303 static void loongarch_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
304 vaddr addr, unsigned size,
305 MMUAccessType access_type,
306 int mmu_idx, MemTxAttrs attrs,
307 MemTxResult response,
308 uintptr_t retaddr)
309 {
310 CPULoongArchState *env = cpu_env(cs);
311
312 if (access_type == MMU_INST_FETCH) {
313 do_raise_exception(env, EXCCODE_ADEF, retaddr);
314 } else {
315 do_raise_exception(env, EXCCODE_ADEM, retaddr);
316 }
317 }
318
loongarch_cpu_exec_interrupt(CPUState * cs,int interrupt_request)319 static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
320 {
321 if (interrupt_request & CPU_INTERRUPT_HARD) {
322 CPULoongArchState *env = cpu_env(cs);
323
324 if (cpu_loongarch_hw_interrupts_enabled(env) &&
325 cpu_loongarch_hw_interrupts_pending(env)) {
326 /* Raise it */
327 cs->exception_index = EXCCODE_INT;
328 loongarch_cpu_do_interrupt(cs);
329 return true;
330 }
331 }
332 return false;
333 }
334 #endif
335
loongarch_cpu_synchronize_from_tb(CPUState * cs,const TranslationBlock * tb)336 static void loongarch_cpu_synchronize_from_tb(CPUState *cs,
337 const TranslationBlock *tb)
338 {
339 tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL));
340 set_pc(cpu_env(cs), tb->pc);
341 }
342
loongarch_restore_state_to_opc(CPUState * cs,const TranslationBlock * tb,const uint64_t * data)343 static void loongarch_restore_state_to_opc(CPUState *cs,
344 const TranslationBlock *tb,
345 const uint64_t *data)
346 {
347 set_pc(cpu_env(cs), data[0]);
348 }
349 #endif /* CONFIG_TCG */
350
loongarch_cpu_has_work(CPUState * cs)351 static bool loongarch_cpu_has_work(CPUState *cs)
352 {
353 #ifdef CONFIG_USER_ONLY
354 return true;
355 #else
356 bool has_work = false;
357
358 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
359 cpu_loongarch_hw_interrupts_pending(cpu_env(cs))) {
360 has_work = true;
361 }
362
363 return has_work;
364 #endif
365 }
366
loongarch_cpu_mmu_index(CPUState * cs,bool ifetch)367 static int loongarch_cpu_mmu_index(CPUState *cs, bool ifetch)
368 {
369 CPULoongArchState *env = cpu_env(cs);
370
371 if (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG)) {
372 return FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV);
373 }
374 return MMU_DA_IDX;
375 }
376
loongarch_la464_initfn(Object * obj)377 static void loongarch_la464_initfn(Object *obj)
378 {
379 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
380 CPULoongArchState *env = &cpu->env;
381 int i;
382
383 for (i = 0; i < 21; i++) {
384 env->cpucfg[i] = 0x0;
385 }
386
387 cpu->dtb_compatible = "loongarch,Loongson-3A5000";
388 env->cpucfg[0] = 0x14c010; /* PRID */
389
390 uint32_t data = 0;
391 data = FIELD_DP32(data, CPUCFG1, ARCH, 2);
392 data = FIELD_DP32(data, CPUCFG1, PGMMU, 1);
393 data = FIELD_DP32(data, CPUCFG1, IOCSR, 1);
394 data = FIELD_DP32(data, CPUCFG1, PALEN, 0x2f);
395 data = FIELD_DP32(data, CPUCFG1, VALEN, 0x2f);
396 data = FIELD_DP32(data, CPUCFG1, UAL, 1);
397 data = FIELD_DP32(data, CPUCFG1, RI, 1);
398 data = FIELD_DP32(data, CPUCFG1, EP, 1);
399 data = FIELD_DP32(data, CPUCFG1, RPLV, 1);
400 data = FIELD_DP32(data, CPUCFG1, HP, 1);
401 data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1);
402 env->cpucfg[1] = data;
403
404 data = 0;
405 data = FIELD_DP32(data, CPUCFG2, FP, 1);
406 data = FIELD_DP32(data, CPUCFG2, FP_SP, 1);
407 data = FIELD_DP32(data, CPUCFG2, FP_DP, 1);
408 data = FIELD_DP32(data, CPUCFG2, FP_VER, 1);
409 data = FIELD_DP32(data, CPUCFG2, LSX, 1),
410 data = FIELD_DP32(data, CPUCFG2, LASX, 1),
411 data = FIELD_DP32(data, CPUCFG2, LLFTP, 1);
412 data = FIELD_DP32(data, CPUCFG2, LLFTP_VER, 1);
413 data = FIELD_DP32(data, CPUCFG2, LSPW, 1);
414 data = FIELD_DP32(data, CPUCFG2, LAM, 1);
415 env->cpucfg[2] = data;
416
417 env->cpucfg[4] = 100 * 1000 * 1000; /* Crystal frequency */
418
419 data = 0;
420 data = FIELD_DP32(data, CPUCFG5, CC_MUL, 1);
421 data = FIELD_DP32(data, CPUCFG5, CC_DIV, 1);
422 env->cpucfg[5] = data;
423
424 data = 0;
425 data = FIELD_DP32(data, CPUCFG16, L1_IUPRE, 1);
426 data = FIELD_DP32(data, CPUCFG16, L1_DPRE, 1);
427 data = FIELD_DP32(data, CPUCFG16, L2_IUPRE, 1);
428 data = FIELD_DP32(data, CPUCFG16, L2_IUUNIFY, 1);
429 data = FIELD_DP32(data, CPUCFG16, L2_IUPRIV, 1);
430 data = FIELD_DP32(data, CPUCFG16, L3_IUPRE, 1);
431 data = FIELD_DP32(data, CPUCFG16, L3_IUUNIFY, 1);
432 data = FIELD_DP32(data, CPUCFG16, L3_IUINCL, 1);
433 env->cpucfg[16] = data;
434
435 data = 0;
436 data = FIELD_DP32(data, CPUCFG17, L1IU_WAYS, 3);
437 data = FIELD_DP32(data, CPUCFG17, L1IU_SETS, 8);
438 data = FIELD_DP32(data, CPUCFG17, L1IU_SIZE, 6);
439 env->cpucfg[17] = data;
440
441 data = 0;
442 data = FIELD_DP32(data, CPUCFG18, L1D_WAYS, 3);
443 data = FIELD_DP32(data, CPUCFG18, L1D_SETS, 8);
444 data = FIELD_DP32(data, CPUCFG18, L1D_SIZE, 6);
445 env->cpucfg[18] = data;
446
447 data = 0;
448 data = FIELD_DP32(data, CPUCFG19, L2IU_WAYS, 15);
449 data = FIELD_DP32(data, CPUCFG19, L2IU_SETS, 8);
450 data = FIELD_DP32(data, CPUCFG19, L2IU_SIZE, 6);
451 env->cpucfg[19] = data;
452
453 data = 0;
454 data = FIELD_DP32(data, CPUCFG20, L3IU_WAYS, 15);
455 data = FIELD_DP32(data, CPUCFG20, L3IU_SETS, 14);
456 data = FIELD_DP32(data, CPUCFG20, L3IU_SIZE, 6);
457 env->cpucfg[20] = data;
458
459 env->CSR_ASID = FIELD_DP64(0, CSR_ASID, ASIDBITS, 0xa);
460
461 env->CSR_PRCFG1 = FIELD_DP64(env->CSR_PRCFG1, CSR_PRCFG1, SAVE_NUM, 8);
462 env->CSR_PRCFG1 = FIELD_DP64(env->CSR_PRCFG1, CSR_PRCFG1, TIMER_BITS, 0x2f);
463 env->CSR_PRCFG1 = FIELD_DP64(env->CSR_PRCFG1, CSR_PRCFG1, VSMAX, 7);
464
465 env->CSR_PRCFG2 = 0x3ffff000;
466
467 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, TLB_TYPE, 2);
468 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, MTLB_ENTRY, 63);
469 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_WAYS, 7);
470 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_SETS, 8);
471
472 loongarch_cpu_post_init(obj);
473 }
474
loongarch_la132_initfn(Object * obj)475 static void loongarch_la132_initfn(Object *obj)
476 {
477 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
478 CPULoongArchState *env = &cpu->env;
479
480 int i;
481
482 for (i = 0; i < 21; i++) {
483 env->cpucfg[i] = 0x0;
484 }
485
486 cpu->dtb_compatible = "loongarch,Loongson-1C103";
487 env->cpucfg[0] = 0x148042; /* PRID */
488
489 uint32_t data = 0;
490 data = FIELD_DP32(data, CPUCFG1, ARCH, 1); /* LA32 */
491 data = FIELD_DP32(data, CPUCFG1, PGMMU, 1);
492 data = FIELD_DP32(data, CPUCFG1, IOCSR, 1);
493 data = FIELD_DP32(data, CPUCFG1, PALEN, 0x1f); /* 32 bits */
494 data = FIELD_DP32(data, CPUCFG1, VALEN, 0x1f); /* 32 bits */
495 data = FIELD_DP32(data, CPUCFG1, UAL, 1);
496 data = FIELD_DP32(data, CPUCFG1, RI, 0);
497 data = FIELD_DP32(data, CPUCFG1, EP, 0);
498 data = FIELD_DP32(data, CPUCFG1, RPLV, 0);
499 data = FIELD_DP32(data, CPUCFG1, HP, 1);
500 data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1);
501 env->cpucfg[1] = data;
502 }
503
loongarch_max_initfn(Object * obj)504 static void loongarch_max_initfn(Object *obj)
505 {
506 /* '-cpu max' for TCG: we use cpu la464. */
507 loongarch_la464_initfn(obj);
508 }
509
loongarch_cpu_reset_hold(Object * obj,ResetType type)510 static void loongarch_cpu_reset_hold(Object *obj, ResetType type)
511 {
512 CPUState *cs = CPU(obj);
513 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(obj);
514 CPULoongArchState *env = cpu_env(cs);
515
516 if (lacc->parent_phases.hold) {
517 lacc->parent_phases.hold(obj, type);
518 }
519
520 #ifdef CONFIG_TCG
521 env->fcsr0_mask = FCSR0_M1 | FCSR0_M2 | FCSR0_M3;
522 #endif
523 env->fcsr0 = 0x0;
524
525 int n;
526 /* Set csr registers value after reset, see the manual 6.4. */
527 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0);
528 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0);
529 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1);
530 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0);
531 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATF, 0);
532 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATM, 0);
533
534 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, FPE, 0);
535 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, SXE, 0);
536 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, ASXE, 0);
537 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, BTE, 0);
538
539 env->CSR_MISC = 0;
540
541 env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, VS, 0);
542 env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, LIE, 0);
543
544 env->CSR_ESTAT = env->CSR_ESTAT & (~MAKE_64BIT_MASK(0, 2));
545 env->CSR_RVACFG = FIELD_DP64(env->CSR_RVACFG, CSR_RVACFG, RBITS, 0);
546 env->CSR_CPUID = cs->cpu_index;
547 env->CSR_TCFG = FIELD_DP64(env->CSR_TCFG, CSR_TCFG, EN, 0);
548 env->CSR_LLBCTL = FIELD_DP64(env->CSR_LLBCTL, CSR_LLBCTL, KLO, 0);
549 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 0);
550 env->CSR_MERRCTL = FIELD_DP64(env->CSR_MERRCTL, CSR_MERRCTL, ISMERR, 0);
551 env->CSR_TID = cs->cpu_index;
552
553 for (n = 0; n < 4; n++) {
554 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV0, 0);
555 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV1, 0);
556 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV2, 0);
557 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV3, 0);
558 }
559
560 #ifndef CONFIG_USER_ONLY
561 env->pc = 0x1c000000;
562 #ifdef CONFIG_TCG
563 memset(env->tlb, 0, sizeof(env->tlb));
564 #endif
565 if (kvm_enabled()) {
566 kvm_arch_reset_vcpu(env);
567 }
568 #endif
569
570 #ifdef CONFIG_TCG
571 restore_fp_status(env);
572 #endif
573 cs->exception_index = -1;
574 }
575
loongarch_cpu_disas_set_info(CPUState * s,disassemble_info * info)576 static void loongarch_cpu_disas_set_info(CPUState *s, disassemble_info *info)
577 {
578 info->print_insn = print_insn_loongarch;
579 }
580
loongarch_cpu_realizefn(DeviceState * dev,Error ** errp)581 static void loongarch_cpu_realizefn(DeviceState *dev, Error **errp)
582 {
583 CPUState *cs = CPU(dev);
584 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(dev);
585 Error *local_err = NULL;
586
587 cpu_exec_realizefn(cs, &local_err);
588 if (local_err != NULL) {
589 error_propagate(errp, local_err);
590 return;
591 }
592
593 loongarch_cpu_register_gdb_regs_for_features(cs);
594
595 cpu_reset(cs);
596 qemu_init_vcpu(cs);
597
598 lacc->parent_realize(dev, errp);
599 }
600
loongarch_get_lsx(Object * obj,Error ** errp)601 static bool loongarch_get_lsx(Object *obj, Error **errp)
602 {
603 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
604 bool ret;
605
606 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) {
607 ret = true;
608 } else {
609 ret = false;
610 }
611 return ret;
612 }
613
loongarch_set_lsx(Object * obj,bool value,Error ** errp)614 static void loongarch_set_lsx(Object *obj, bool value, Error **errp)
615 {
616 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
617
618 if (value) {
619 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 1);
620 } else {
621 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 0);
622 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 0);
623 }
624 }
625
loongarch_get_lasx(Object * obj,Error ** errp)626 static bool loongarch_get_lasx(Object *obj, Error **errp)
627 {
628 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
629 bool ret;
630
631 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) {
632 ret = true;
633 } else {
634 ret = false;
635 }
636 return ret;
637 }
638
loongarch_set_lasx(Object * obj,bool value,Error ** errp)639 static void loongarch_set_lasx(Object *obj, bool value, Error **errp)
640 {
641 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
642
643 if (value) {
644 if (!FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) {
645 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 1);
646 }
647 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 1);
648 } else {
649 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 0);
650 }
651 }
652
loongarch_cpu_post_init(Object * obj)653 void loongarch_cpu_post_init(Object *obj)
654 {
655 object_property_add_bool(obj, "lsx", loongarch_get_lsx,
656 loongarch_set_lsx);
657 object_property_add_bool(obj, "lasx", loongarch_get_lasx,
658 loongarch_set_lasx);
659 }
660
loongarch_cpu_init(Object * obj)661 static void loongarch_cpu_init(Object *obj)
662 {
663 #ifndef CONFIG_USER_ONLY
664 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
665
666 qdev_init_gpio_in(DEVICE(cpu), loongarch_cpu_set_irq, N_IRQS);
667 #ifdef CONFIG_TCG
668 timer_init_ns(&cpu->timer, QEMU_CLOCK_VIRTUAL,
669 &loongarch_constant_timer_cb, cpu);
670 #endif
671 #endif
672 }
673
loongarch_cpu_class_by_name(const char * cpu_model)674 static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model)
675 {
676 ObjectClass *oc;
677
678 oc = object_class_by_name(cpu_model);
679 if (!oc) {
680 g_autofree char *typename
681 = g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model);
682 oc = object_class_by_name(typename);
683 }
684
685 return oc;
686 }
687
loongarch_cpu_dump_state(CPUState * cs,FILE * f,int flags)688 void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
689 {
690 CPULoongArchState *env = cpu_env(cs);
691 int i;
692
693 qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
694 qemu_fprintf(f, " FCSR0 0x%08x\n", env->fcsr0);
695
696 /* gpr */
697 for (i = 0; i < 32; i++) {
698 if ((i & 3) == 0) {
699 qemu_fprintf(f, " GPR%02d:", i);
700 }
701 qemu_fprintf(f, " %s %016" PRIx64, regnames[i], env->gpr[i]);
702 if ((i & 3) == 3) {
703 qemu_fprintf(f, "\n");
704 }
705 }
706
707 qemu_fprintf(f, "CRMD=%016" PRIx64 "\n", env->CSR_CRMD);
708 qemu_fprintf(f, "PRMD=%016" PRIx64 "\n", env->CSR_PRMD);
709 qemu_fprintf(f, "EUEN=%016" PRIx64 "\n", env->CSR_EUEN);
710 qemu_fprintf(f, "ESTAT=%016" PRIx64 "\n", env->CSR_ESTAT);
711 qemu_fprintf(f, "ERA=%016" PRIx64 "\n", env->CSR_ERA);
712 qemu_fprintf(f, "BADV=%016" PRIx64 "\n", env->CSR_BADV);
713 qemu_fprintf(f, "BADI=%016" PRIx64 "\n", env->CSR_BADI);
714 qemu_fprintf(f, "EENTRY=%016" PRIx64 "\n", env->CSR_EENTRY);
715 qemu_fprintf(f, "PRCFG1=%016" PRIx64 ", PRCFG2=%016" PRIx64 ","
716 " PRCFG3=%016" PRIx64 "\n",
717 env->CSR_PRCFG1, env->CSR_PRCFG2, env->CSR_PRCFG3);
718 qemu_fprintf(f, "TLBRENTRY=%016" PRIx64 "\n", env->CSR_TLBRENTRY);
719 qemu_fprintf(f, "TLBRBADV=%016" PRIx64 "\n", env->CSR_TLBRBADV);
720 qemu_fprintf(f, "TLBRERA=%016" PRIx64 "\n", env->CSR_TLBRERA);
721 qemu_fprintf(f, "TCFG=%016" PRIx64 "\n", env->CSR_TCFG);
722 qemu_fprintf(f, "TVAL=%016" PRIx64 "\n", env->CSR_TVAL);
723
724 /* fpr */
725 if (flags & CPU_DUMP_FPU) {
726 for (i = 0; i < 32; i++) {
727 qemu_fprintf(f, " %s %016" PRIx64, fregnames[i], env->fpr[i].vreg.D(0));
728 if ((i & 3) == 3) {
729 qemu_fprintf(f, "\n");
730 }
731 }
732 }
733 }
734
735 #ifdef CONFIG_TCG
736 #include "hw/core/tcg-cpu-ops.h"
737
738 static const TCGCPUOps loongarch_tcg_ops = {
739 .initialize = loongarch_translate_init,
740 .synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
741 .restore_state_to_opc = loongarch_restore_state_to_opc,
742
743 #ifndef CONFIG_USER_ONLY
744 .tlb_fill = loongarch_cpu_tlb_fill,
745 .cpu_exec_interrupt = loongarch_cpu_exec_interrupt,
746 .cpu_exec_halt = loongarch_cpu_has_work,
747 .do_interrupt = loongarch_cpu_do_interrupt,
748 .do_transaction_failed = loongarch_cpu_do_transaction_failed,
749 #endif
750 };
751 #endif /* CONFIG_TCG */
752
753 #ifndef CONFIG_USER_ONLY
754 #include "hw/core/sysemu-cpu-ops.h"
755
756 static const struct SysemuCPUOps loongarch_sysemu_ops = {
757 .get_phys_page_debug = loongarch_cpu_get_phys_page_debug,
758 };
759
loongarch_cpu_get_arch_id(CPUState * cs)760 static int64_t loongarch_cpu_get_arch_id(CPUState *cs)
761 {
762 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
763
764 return cpu->phy_id;
765 }
766 #endif
767
loongarch_cpu_class_init(ObjectClass * c,void * data)768 static void loongarch_cpu_class_init(ObjectClass *c, void *data)
769 {
770 LoongArchCPUClass *lacc = LOONGARCH_CPU_CLASS(c);
771 CPUClass *cc = CPU_CLASS(c);
772 DeviceClass *dc = DEVICE_CLASS(c);
773 ResettableClass *rc = RESETTABLE_CLASS(c);
774
775 device_class_set_parent_realize(dc, loongarch_cpu_realizefn,
776 &lacc->parent_realize);
777 resettable_class_set_parent_phases(rc, NULL, loongarch_cpu_reset_hold, NULL,
778 &lacc->parent_phases);
779
780 cc->class_by_name = loongarch_cpu_class_by_name;
781 cc->has_work = loongarch_cpu_has_work;
782 cc->mmu_index = loongarch_cpu_mmu_index;
783 cc->dump_state = loongarch_cpu_dump_state;
784 cc->set_pc = loongarch_cpu_set_pc;
785 cc->get_pc = loongarch_cpu_get_pc;
786 #ifndef CONFIG_USER_ONLY
787 cc->get_arch_id = loongarch_cpu_get_arch_id;
788 dc->vmsd = &vmstate_loongarch_cpu;
789 cc->sysemu_ops = &loongarch_sysemu_ops;
790 #endif
791 cc->disas_set_info = loongarch_cpu_disas_set_info;
792 cc->gdb_read_register = loongarch_cpu_gdb_read_register;
793 cc->gdb_write_register = loongarch_cpu_gdb_write_register;
794 cc->gdb_stop_before_watchpoint = true;
795
796 #ifdef CONFIG_TCG
797 cc->tcg_ops = &loongarch_tcg_ops;
798 #endif
799 }
800
loongarch32_gdb_arch_name(CPUState * cs)801 static const gchar *loongarch32_gdb_arch_name(CPUState *cs)
802 {
803 return "loongarch32";
804 }
805
loongarch32_cpu_class_init(ObjectClass * c,void * data)806 static void loongarch32_cpu_class_init(ObjectClass *c, void *data)
807 {
808 CPUClass *cc = CPU_CLASS(c);
809
810 cc->gdb_core_xml_file = "loongarch-base32.xml";
811 cc->gdb_arch_name = loongarch32_gdb_arch_name;
812 }
813
loongarch64_gdb_arch_name(CPUState * cs)814 static const gchar *loongarch64_gdb_arch_name(CPUState *cs)
815 {
816 return "loongarch64";
817 }
818
loongarch64_cpu_class_init(ObjectClass * c,void * data)819 static void loongarch64_cpu_class_init(ObjectClass *c, void *data)
820 {
821 CPUClass *cc = CPU_CLASS(c);
822
823 cc->gdb_core_xml_file = "loongarch-base64.xml";
824 cc->gdb_arch_name = loongarch64_gdb_arch_name;
825 }
826
827 #define DEFINE_LOONGARCH_CPU_TYPE(size, model, initfn) \
828 { \
829 .parent = TYPE_LOONGARCH##size##_CPU, \
830 .instance_init = initfn, \
831 .name = LOONGARCH_CPU_TYPE_NAME(model), \
832 }
833
834 static const TypeInfo loongarch_cpu_type_infos[] = {
835 {
836 .name = TYPE_LOONGARCH_CPU,
837 .parent = TYPE_CPU,
838 .instance_size = sizeof(LoongArchCPU),
839 .instance_align = __alignof(LoongArchCPU),
840 .instance_init = loongarch_cpu_init,
841
842 .abstract = true,
843 .class_size = sizeof(LoongArchCPUClass),
844 .class_init = loongarch_cpu_class_init,
845 },
846 {
847 .name = TYPE_LOONGARCH32_CPU,
848 .parent = TYPE_LOONGARCH_CPU,
849
850 .abstract = true,
851 .class_init = loongarch32_cpu_class_init,
852 },
853 {
854 .name = TYPE_LOONGARCH64_CPU,
855 .parent = TYPE_LOONGARCH_CPU,
856
857 .abstract = true,
858 .class_init = loongarch64_cpu_class_init,
859 },
860 DEFINE_LOONGARCH_CPU_TYPE(64, "la464", loongarch_la464_initfn),
861 DEFINE_LOONGARCH_CPU_TYPE(32, "la132", loongarch_la132_initfn),
862 DEFINE_LOONGARCH_CPU_TYPE(64, "max", loongarch_max_initfn),
863 };
864
865 DEFINE_TYPES(loongarch_cpu_type_infos)
866