xref: /openbmc/qemu/target/loongarch/kvm/kvm.c (revision feb58e3b)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * QEMU LoongArch KVM
4  *
5  * Copyright (c) 2023 Loongson Technology Corporation Limited
6  */
7 
8 #include "qemu/osdep.h"
9 #include <sys/ioctl.h>
10 #include <linux/kvm.h>
11 
12 #include "qemu/timer.h"
13 #include "qemu/error-report.h"
14 #include "qemu/main-loop.h"
15 #include "sysemu/sysemu.h"
16 #include "sysemu/kvm.h"
17 #include "sysemu/kvm_int.h"
18 #include "hw/pci/pci.h"
19 #include "exec/memattrs.h"
20 #include "exec/address-spaces.h"
21 #include "hw/boards.h"
22 #include "hw/irq.h"
23 #include "qemu/log.h"
24 #include "hw/loader.h"
25 #include "sysemu/runstate.h"
26 #include "cpu-csr.h"
27 #include "kvm_loongarch.h"
28 #include "trace.h"
29 
30 static bool cap_has_mp_state;
31 static unsigned int brk_insn;
32 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
33     KVM_CAP_LAST_INFO
34 };
35 
36 static int kvm_loongarch_get_regs_core(CPUState *cs)
37 {
38     int ret = 0;
39     int i;
40     struct kvm_regs regs;
41     CPULoongArchState *env = cpu_env(cs);
42 
43     /* Get the current register set as KVM seems it */
44     ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
45     if (ret < 0) {
46         trace_kvm_failed_get_regs_core(strerror(errno));
47         return ret;
48     }
49     /* gpr[0] value is always 0 */
50     env->gpr[0] = 0;
51     for (i = 1; i < 32; i++) {
52         env->gpr[i] = regs.gpr[i];
53     }
54 
55     env->pc = regs.pc;
56     return ret;
57 }
58 
59 static int kvm_loongarch_put_regs_core(CPUState *cs)
60 {
61     int ret = 0;
62     int i;
63     struct kvm_regs regs;
64     CPULoongArchState *env = cpu_env(cs);
65 
66     /* Set the registers based on QEMU's view of things */
67     for (i = 0; i < 32; i++) {
68         regs.gpr[i] = env->gpr[i];
69     }
70 
71     regs.pc = env->pc;
72     ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
73     if (ret < 0) {
74         trace_kvm_failed_put_regs_core(strerror(errno));
75     }
76 
77     return ret;
78 }
79 
80 static int kvm_loongarch_get_csr(CPUState *cs)
81 {
82     int ret = 0;
83     CPULoongArchState *env = cpu_env(cs);
84 
85     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CRMD),
86                            &env->CSR_CRMD);
87 
88     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRMD),
89                            &env->CSR_PRMD);
90 
91     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EUEN),
92                            &env->CSR_EUEN);
93 
94     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_MISC),
95                            &env->CSR_MISC);
96 
97     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ECFG),
98                            &env->CSR_ECFG);
99 
100     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ESTAT),
101                            &env->CSR_ESTAT);
102 
103     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ERA),
104                            &env->CSR_ERA);
105 
106     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADV),
107                            &env->CSR_BADV);
108 
109     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADI),
110                            &env->CSR_BADI);
111 
112     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EENTRY),
113                            &env->CSR_EENTRY);
114 
115     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBIDX),
116                            &env->CSR_TLBIDX);
117 
118     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBEHI),
119                            &env->CSR_TLBEHI);
120 
121     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO0),
122                            &env->CSR_TLBELO0);
123 
124     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO1),
125                            &env->CSR_TLBELO1);
126 
127     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ASID),
128                            &env->CSR_ASID);
129 
130     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDL),
131                            &env->CSR_PGDL);
132 
133     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDH),
134                            &env->CSR_PGDH);
135 
136     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGD),
137                            &env->CSR_PGD);
138 
139     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCL),
140                            &env->CSR_PWCL);
141 
142     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCH),
143                            &env->CSR_PWCH);
144 
145     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_STLBPS),
146                            &env->CSR_STLBPS);
147 
148     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_RVACFG),
149                            &env->CSR_RVACFG);
150 
151     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CPUID),
152                            &env->CSR_CPUID);
153 
154     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG1),
155                            &env->CSR_PRCFG1);
156 
157     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG2),
158                            &env->CSR_PRCFG2);
159 
160     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG3),
161                            &env->CSR_PRCFG3);
162 
163     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(0)),
164                            &env->CSR_SAVE[0]);
165 
166     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(1)),
167                            &env->CSR_SAVE[1]);
168 
169     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(2)),
170                            &env->CSR_SAVE[2]);
171 
172     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(3)),
173                            &env->CSR_SAVE[3]);
174 
175     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(4)),
176                            &env->CSR_SAVE[4]);
177 
178     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(5)),
179                            &env->CSR_SAVE[5]);
180 
181     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(6)),
182                            &env->CSR_SAVE[6]);
183 
184     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(7)),
185                            &env->CSR_SAVE[7]);
186 
187     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TID),
188                            &env->CSR_TID);
189 
190     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CNTC),
191                            &env->CSR_CNTC);
192 
193     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TICLR),
194                            &env->CSR_TICLR);
195 
196     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_LLBCTL),
197                            &env->CSR_LLBCTL);
198 
199     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL1),
200                            &env->CSR_IMPCTL1);
201 
202     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL2),
203                            &env->CSR_IMPCTL2);
204 
205     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRENTRY),
206                            &env->CSR_TLBRENTRY);
207 
208     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRBADV),
209                            &env->CSR_TLBRBADV);
210 
211     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRERA),
212                            &env->CSR_TLBRERA);
213 
214     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRSAVE),
215                            &env->CSR_TLBRSAVE);
216 
217     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO0),
218                            &env->CSR_TLBRELO0);
219 
220     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO1),
221                            &env->CSR_TLBRELO1);
222 
223     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBREHI),
224                            &env->CSR_TLBREHI);
225 
226     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRPRMD),
227                            &env->CSR_TLBRPRMD);
228 
229     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(0)),
230                            &env->CSR_DMW[0]);
231 
232     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(1)),
233                            &env->CSR_DMW[1]);
234 
235     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(2)),
236                            &env->CSR_DMW[2]);
237 
238     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(3)),
239                            &env->CSR_DMW[3]);
240 
241     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TVAL),
242                            &env->CSR_TVAL);
243 
244     ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TCFG),
245                            &env->CSR_TCFG);
246 
247     return ret;
248 }
249 
250 static int kvm_loongarch_put_csr(CPUState *cs, int level)
251 {
252     int ret = 0;
253     CPULoongArchState *env = cpu_env(cs);
254 
255     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CRMD),
256                            &env->CSR_CRMD);
257 
258     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRMD),
259                            &env->CSR_PRMD);
260 
261     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EUEN),
262                            &env->CSR_EUEN);
263 
264     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_MISC),
265                            &env->CSR_MISC);
266 
267     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ECFG),
268                            &env->CSR_ECFG);
269 
270     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ESTAT),
271                            &env->CSR_ESTAT);
272 
273     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ERA),
274                            &env->CSR_ERA);
275 
276     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADV),
277                            &env->CSR_BADV);
278 
279     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADI),
280                            &env->CSR_BADI);
281 
282     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EENTRY),
283                            &env->CSR_EENTRY);
284 
285     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBIDX),
286                            &env->CSR_TLBIDX);
287 
288     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBEHI),
289                            &env->CSR_TLBEHI);
290 
291     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO0),
292                            &env->CSR_TLBELO0);
293 
294     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO1),
295                            &env->CSR_TLBELO1);
296 
297     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ASID),
298                            &env->CSR_ASID);
299 
300     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDL),
301                            &env->CSR_PGDL);
302 
303     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDH),
304                            &env->CSR_PGDH);
305 
306     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGD),
307                            &env->CSR_PGD);
308 
309     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCL),
310                            &env->CSR_PWCL);
311 
312     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCH),
313                            &env->CSR_PWCH);
314 
315     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_STLBPS),
316                            &env->CSR_STLBPS);
317 
318     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_RVACFG),
319                            &env->CSR_RVACFG);
320 
321     /* CPUID is constant after poweron, it should be set only once */
322     if (level >= KVM_PUT_FULL_STATE) {
323         ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CPUID),
324                            &env->CSR_CPUID);
325     }
326 
327     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG1),
328                            &env->CSR_PRCFG1);
329 
330     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG2),
331                            &env->CSR_PRCFG2);
332 
333     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG3),
334                            &env->CSR_PRCFG3);
335 
336     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(0)),
337                            &env->CSR_SAVE[0]);
338 
339     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(1)),
340                            &env->CSR_SAVE[1]);
341 
342     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(2)),
343                            &env->CSR_SAVE[2]);
344 
345     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(3)),
346                            &env->CSR_SAVE[3]);
347 
348     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(4)),
349                            &env->CSR_SAVE[4]);
350 
351     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(5)),
352                            &env->CSR_SAVE[5]);
353 
354     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(6)),
355                            &env->CSR_SAVE[6]);
356 
357     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(7)),
358                            &env->CSR_SAVE[7]);
359 
360     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TID),
361                            &env->CSR_TID);
362 
363     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CNTC),
364                            &env->CSR_CNTC);
365 
366     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TICLR),
367                            &env->CSR_TICLR);
368 
369     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_LLBCTL),
370                            &env->CSR_LLBCTL);
371 
372     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL1),
373                            &env->CSR_IMPCTL1);
374 
375     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL2),
376                            &env->CSR_IMPCTL2);
377 
378     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRENTRY),
379                            &env->CSR_TLBRENTRY);
380 
381     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRBADV),
382                            &env->CSR_TLBRBADV);
383 
384     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRERA),
385                            &env->CSR_TLBRERA);
386 
387     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRSAVE),
388                            &env->CSR_TLBRSAVE);
389 
390     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO0),
391                            &env->CSR_TLBRELO0);
392 
393     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO1),
394                            &env->CSR_TLBRELO1);
395 
396     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBREHI),
397                            &env->CSR_TLBREHI);
398 
399     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRPRMD),
400                            &env->CSR_TLBRPRMD);
401 
402     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(0)),
403                            &env->CSR_DMW[0]);
404 
405     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(1)),
406                            &env->CSR_DMW[1]);
407 
408     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(2)),
409                            &env->CSR_DMW[2]);
410 
411     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(3)),
412                            &env->CSR_DMW[3]);
413     /*
414      * timer cfg must be put at last since it is used to enable
415      * guest timer
416      */
417     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TVAL),
418                            &env->CSR_TVAL);
419 
420     ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TCFG),
421                            &env->CSR_TCFG);
422     return ret;
423 }
424 
425 static int kvm_loongarch_get_regs_fp(CPUState *cs)
426 {
427     int ret, i;
428     struct kvm_fpu fpu;
429     CPULoongArchState *env = cpu_env(cs);
430 
431     ret = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu);
432     if (ret < 0) {
433         trace_kvm_failed_get_fpu(strerror(errno));
434         return ret;
435     }
436 
437     env->fcsr0 = fpu.fcsr;
438     for (i = 0; i < 32; i++) {
439         env->fpr[i].vreg.UD[0] = fpu.fpr[i].val64[0];
440         env->fpr[i].vreg.UD[1] = fpu.fpr[i].val64[1];
441         env->fpr[i].vreg.UD[2] = fpu.fpr[i].val64[2];
442         env->fpr[i].vreg.UD[3] = fpu.fpr[i].val64[3];
443     }
444     for (i = 0; i < 8; i++) {
445         env->cf[i] = fpu.fcc & 0xFF;
446         fpu.fcc = fpu.fcc >> 8;
447     }
448 
449     return ret;
450 }
451 
452 static int kvm_loongarch_put_regs_fp(CPUState *cs)
453 {
454     int ret, i;
455     struct kvm_fpu fpu;
456     CPULoongArchState *env = cpu_env(cs);
457 
458     fpu.fcsr = env->fcsr0;
459     fpu.fcc = 0;
460     for (i = 0; i < 32; i++) {
461         fpu.fpr[i].val64[0] = env->fpr[i].vreg.UD[0];
462         fpu.fpr[i].val64[1] = env->fpr[i].vreg.UD[1];
463         fpu.fpr[i].val64[2] = env->fpr[i].vreg.UD[2];
464         fpu.fpr[i].val64[3] = env->fpr[i].vreg.UD[3];
465     }
466 
467     for (i = 0; i < 8; i++) {
468         fpu.fcc |= env->cf[i] << (8 * i);
469     }
470 
471     ret = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu);
472     if (ret < 0) {
473         trace_kvm_failed_put_fpu(strerror(errno));
474     }
475 
476     return ret;
477 }
478 
479 void kvm_arch_reset_vcpu(CPUState *cs)
480 {
481     CPULoongArchState *env = cpu_env(cs);
482 
483     env->mp_state = KVM_MP_STATE_RUNNABLE;
484     kvm_set_one_reg(cs, KVM_REG_LOONGARCH_VCPU_RESET, 0);
485 }
486 
487 static int kvm_loongarch_get_mpstate(CPUState *cs)
488 {
489     int ret = 0;
490     struct kvm_mp_state mp_state;
491     CPULoongArchState *env = cpu_env(cs);
492 
493     if (cap_has_mp_state) {
494         ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
495         if (ret) {
496             trace_kvm_failed_get_mpstate(strerror(errno));
497             return ret;
498         }
499         env->mp_state = mp_state.mp_state;
500     }
501 
502     return ret;
503 }
504 
505 static int kvm_loongarch_put_mpstate(CPUState *cs)
506 {
507     int ret = 0;
508     struct kvm_mp_state mp_state = {
509         .mp_state = cpu_env(cs)->mp_state
510     };
511 
512     if (cap_has_mp_state) {
513         ret = kvm_vcpu_ioctl(cs, KVM_SET_MP_STATE, &mp_state);
514         if (ret) {
515             trace_kvm_failed_put_mpstate(strerror(errno));
516         }
517     }
518 
519     return ret;
520 }
521 
522 static int kvm_loongarch_get_cpucfg(CPUState *cs)
523 {
524     int i, ret = 0;
525     uint64_t val;
526     CPULoongArchState *env = cpu_env(cs);
527 
528     for (i = 0; i < 21; i++) {
529         ret = kvm_get_one_reg(cs, KVM_IOC_CPUCFG(i), &val);
530         if (ret < 0) {
531             trace_kvm_failed_get_cpucfg(strerror(errno));
532         }
533         env->cpucfg[i] = (uint32_t)val;
534     }
535     return ret;
536 }
537 
538 static int kvm_check_cpucfg2(CPUState *cs)
539 {
540     int ret;
541     uint64_t val;
542     struct kvm_device_attr attr = {
543         .group = KVM_LOONGARCH_VCPU_CPUCFG,
544         .attr = 2,
545         .addr = (uint64_t)&val,
546     };
547     CPULoongArchState *env = cpu_env(cs);
548 
549     ret = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, &attr);
550 
551     if (!ret) {
552         kvm_vcpu_ioctl(cs, KVM_GET_DEVICE_ATTR, &attr);
553         env->cpucfg[2] &= val;
554 
555         if (FIELD_EX32(env->cpucfg[2], CPUCFG2, FP)) {
556             /* The FP minimal version is 1. */
557             env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, FP_VER, 1);
558         }
559 
560         if (FIELD_EX32(env->cpucfg[2], CPUCFG2, LLFTP)) {
561             /* The LLFTP minimal version is 1. */
562             env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LLFTP_VER, 1);
563         }
564     }
565 
566     return ret;
567 }
568 
569 static int kvm_loongarch_put_cpucfg(CPUState *cs)
570 {
571     int i, ret = 0;
572     CPULoongArchState *env = cpu_env(cs);
573     uint64_t val;
574 
575     for (i = 0; i < 21; i++) {
576 	if (i == 2) {
577             ret = kvm_check_cpucfg2(cs);
578             if (ret) {
579                 return ret;
580             }
581 	}
582         val = env->cpucfg[i];
583         ret = kvm_set_one_reg(cs, KVM_IOC_CPUCFG(i), &val);
584         if (ret < 0) {
585             trace_kvm_failed_put_cpucfg(strerror(errno));
586         }
587     }
588     return ret;
589 }
590 
591 int kvm_arch_get_registers(CPUState *cs, Error **errp)
592 {
593     int ret;
594 
595     ret = kvm_loongarch_get_regs_core(cs);
596     if (ret) {
597         return ret;
598     }
599 
600     ret = kvm_loongarch_get_cpucfg(cs);
601     if (ret) {
602         return ret;
603     }
604 
605     ret = kvm_loongarch_get_csr(cs);
606     if (ret) {
607         return ret;
608     }
609 
610     ret = kvm_loongarch_get_regs_fp(cs);
611     if (ret) {
612         return ret;
613     }
614 
615     ret = kvm_loongarch_get_mpstate(cs);
616     return ret;
617 }
618 
619 int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
620 {
621     int ret;
622 
623     ret = kvm_loongarch_put_regs_core(cs);
624     if (ret) {
625         return ret;
626     }
627 
628     ret = kvm_loongarch_put_cpucfg(cs);
629     if (ret) {
630         return ret;
631     }
632 
633     ret = kvm_loongarch_put_csr(cs, level);
634     if (ret) {
635         return ret;
636     }
637 
638     ret = kvm_loongarch_put_regs_fp(cs);
639     if (ret) {
640         return ret;
641     }
642 
643     ret = kvm_loongarch_put_mpstate(cs);
644     return ret;
645 }
646 
647 static void kvm_loongarch_vm_stage_change(void *opaque, bool running,
648                                           RunState state)
649 {
650     int ret;
651     CPUState *cs = opaque;
652     LoongArchCPU *cpu = LOONGARCH_CPU(cs);
653 
654     if (running) {
655         ret = kvm_set_one_reg(cs, KVM_REG_LOONGARCH_COUNTER,
656                               &cpu->kvm_state_counter);
657         if (ret < 0) {
658             trace_kvm_failed_put_counter(strerror(errno));
659         }
660     } else {
661         ret = kvm_get_one_reg(cs, KVM_REG_LOONGARCH_COUNTER,
662                               &cpu->kvm_state_counter);
663         if (ret < 0) {
664             trace_kvm_failed_get_counter(strerror(errno));
665         }
666     }
667 }
668 
669 int kvm_arch_init_vcpu(CPUState *cs)
670 {
671     uint64_t val;
672 
673     qemu_add_vm_change_state_handler(kvm_loongarch_vm_stage_change, cs);
674 
675     if (!kvm_get_one_reg(cs, KVM_REG_LOONGARCH_DEBUG_INST, &val)) {
676         brk_insn = val;
677     }
678 
679     return 0;
680 }
681 
682 int kvm_arch_destroy_vcpu(CPUState *cs)
683 {
684     return 0;
685 }
686 
687 unsigned long kvm_arch_vcpu_id(CPUState *cs)
688 {
689     return cs->cpu_index;
690 }
691 
692 int kvm_arch_release_virq_post(int virq)
693 {
694     return 0;
695 }
696 
697 int kvm_arch_msi_data_to_gsi(uint32_t data)
698 {
699     abort();
700 }
701 
702 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
703                              uint64_t address, uint32_t data, PCIDevice *dev)
704 {
705     return 0;
706 }
707 
708 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
709                                 int vector, PCIDevice *dev)
710 {
711     return 0;
712 }
713 
714 void kvm_arch_init_irq_routing(KVMState *s)
715 {
716 }
717 
718 int kvm_arch_get_default_type(MachineState *ms)
719 {
720     return 0;
721 }
722 
723 int kvm_arch_init(MachineState *ms, KVMState *s)
724 {
725     cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
726     return 0;
727 }
728 
729 int kvm_arch_irqchip_create(KVMState *s)
730 {
731     return 0;
732 }
733 
734 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
735 {
736 }
737 
738 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
739 {
740     return MEMTXATTRS_UNSPECIFIED;
741 }
742 
743 int kvm_arch_process_async_events(CPUState *cs)
744 {
745     return cs->halted;
746 }
747 
748 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
749 {
750     return true;
751 }
752 
753 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
754 {
755     if (kvm_sw_breakpoints_active(cpu)) {
756         dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
757     }
758 }
759 
760 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
761 {
762     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
763         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
764         error_report("%s failed", __func__);
765         return -EINVAL;
766     }
767     return 0;
768 }
769 
770 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
771 {
772     static uint32_t brk;
773 
774     if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
775         brk != brk_insn ||
776         cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
777         error_report("%s failed", __func__);
778         return -EINVAL;
779     }
780     return 0;
781 }
782 
783 int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
784 {
785     return -ENOSYS;
786 }
787 
788 int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
789 {
790     return -ENOSYS;
791 }
792 
793 void kvm_arch_remove_all_hw_breakpoints(void)
794 {
795 }
796 
797 static bool kvm_loongarch_handle_debug(CPUState *cs, struct kvm_run *run)
798 {
799     LoongArchCPU *cpu = LOONGARCH_CPU(cs);
800     CPULoongArchState *env = &cpu->env;
801 
802     kvm_cpu_synchronize_state(cs);
803     if (cs->singlestep_enabled) {
804         return true;
805     }
806 
807     if (kvm_find_sw_breakpoint(cs, env->pc)) {
808         return true;
809     }
810 
811     return false;
812 }
813 
814 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
815 {
816     int ret = 0;
817     CPULoongArchState *env = cpu_env(cs);
818     MemTxAttrs attrs = {};
819 
820     attrs.requester_id = env_cpu(env)->cpu_index;
821 
822     trace_kvm_arch_handle_exit(run->exit_reason);
823     switch (run->exit_reason) {
824     case KVM_EXIT_LOONGARCH_IOCSR:
825         address_space_rw(env->address_space_iocsr,
826                          run->iocsr_io.phys_addr,
827                          attrs,
828                          run->iocsr_io.data,
829                          run->iocsr_io.len,
830                          run->iocsr_io.is_write);
831         break;
832 
833     case KVM_EXIT_DEBUG:
834         if (kvm_loongarch_handle_debug(cs, run)) {
835             ret = EXCP_DEBUG;
836         }
837         break;
838 
839     default:
840         ret = -1;
841         warn_report("KVM: unknown exit reason %d", run->exit_reason);
842         break;
843     }
844     return ret;
845 }
846 
847 int kvm_loongarch_set_interrupt(LoongArchCPU *cpu, int irq, int level)
848 {
849     struct kvm_interrupt intr;
850     CPUState *cs = CPU(cpu);
851 
852     if (level) {
853         intr.irq = irq;
854     } else {
855         intr.irq = -irq;
856     }
857 
858     trace_kvm_set_intr(irq, level);
859     return kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
860 }
861 
862 void kvm_arch_accel_class_init(ObjectClass *oc)
863 {
864 }
865