xref: /openbmc/qemu/target/riscv/cpu_helper.c (revision 2df1eb27)
1 /*
2  * RISC-V CPU helpers for qemu.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "pmu.h"
26 #include "exec/exec-all.h"
27 #include "instmap.h"
28 #include "tcg/tcg-op.h"
29 #include "trace.h"
30 #include "semihosting/common-semi.h"
31 #include "sysemu/cpu-timers.h"
32 #include "cpu_bits.h"
33 #include "debug.h"
34 #include "tcg/oversized-guest.h"
35 
36 int riscv_env_mmu_index(CPURISCVState *env, bool ifetch)
37 {
38 #ifdef CONFIG_USER_ONLY
39     return 0;
40 #else
41     bool virt = env->virt_enabled;
42     int mode = env->priv;
43 
44     /* All priv -> mmu_idx mapping are here */
45     if (!ifetch) {
46         uint64_t status = env->mstatus;
47 
48         if (mode == PRV_M && get_field(status, MSTATUS_MPRV)) {
49             mode = get_field(env->mstatus, MSTATUS_MPP);
50             virt = get_field(env->mstatus, MSTATUS_MPV) &&
51                    (mode != PRV_M);
52             if (virt) {
53                 status = env->vsstatus;
54             }
55         }
56         if (mode == PRV_S && get_field(status, MSTATUS_SUM)) {
57             mode = MMUIdx_S_SUM;
58         }
59     }
60 
61     return mode | (virt ? MMU_2STAGE_BIT : 0);
62 #endif
63 }
64 
65 void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
66                           uint64_t *cs_base, uint32_t *pflags)
67 {
68     RISCVCPU *cpu = env_archcpu(env);
69     RISCVExtStatus fs, vs;
70     uint32_t flags = 0;
71 
72     *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
73     *cs_base = 0;
74 
75     if (cpu->cfg.ext_zve32f) {
76         /*
77          * If env->vl equals to VLMAX, we can use generic vector operation
78          * expanders (GVEC) to accerlate the vector operations.
79          * However, as LMUL could be a fractional number. The maximum
80          * vector size can be operated might be less than 8 bytes,
81          * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
82          * only when maxsz >= 8 bytes.
83          */
84         uint32_t vlmax = vext_get_vlmax(cpu, env->vtype);
85         uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
86         uint32_t maxsz = vlmax << sew;
87         bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
88                            (maxsz >= 8);
89         flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
90         flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew);
91         flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
92                            FIELD_EX64(env->vtype, VTYPE, VLMUL));
93         flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
94         flags = FIELD_DP32(flags, TB_FLAGS, VTA,
95                            FIELD_EX64(env->vtype, VTYPE, VTA));
96         flags = FIELD_DP32(flags, TB_FLAGS, VMA,
97                            FIELD_EX64(env->vtype, VTYPE, VMA));
98         flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0);
99     } else {
100         flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
101     }
102 
103 #ifdef CONFIG_USER_ONLY
104     fs = EXT_STATUS_DIRTY;
105     vs = EXT_STATUS_DIRTY;
106 #else
107     flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv);
108 
109     flags |= riscv_env_mmu_index(env, 0);
110     fs = get_field(env->mstatus, MSTATUS_FS);
111     vs = get_field(env->mstatus, MSTATUS_VS);
112 
113     if (env->virt_enabled) {
114         flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1);
115         /*
116          * Merge DISABLED and !DIRTY states using MIN.
117          * We will set both fields when dirtying.
118          */
119         fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS));
120         vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS));
121     }
122 
123     /* With Zfinx, floating point is enabled/disabled by Smstateen. */
124     if (!riscv_has_ext(env, RVF)) {
125         fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE)
126              ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED;
127     }
128 
129     if (cpu->cfg.debug && !icount_enabled()) {
130         flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
131     }
132 #endif
133 
134     flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
135     flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
136     flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
137     flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
138     if (env->cur_pmmask != 0) {
139         flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1);
140     }
141     if (env->cur_pmbase != 0) {
142         flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1);
143     }
144 
145     *pflags = flags;
146 }
147 
148 void riscv_cpu_update_mask(CPURISCVState *env)
149 {
150     target_ulong mask = 0, base = 0;
151     RISCVMXL xl = env->xl;
152     /*
153      * TODO: Current RVJ spec does not specify
154      * how the extension interacts with XLEN.
155      */
156 #ifndef CONFIG_USER_ONLY
157     int mode = cpu_address_mode(env);
158     xl = cpu_get_xl(env, mode);
159     if (riscv_has_ext(env, RVJ)) {
160         switch (mode) {
161         case PRV_M:
162             if (env->mmte & M_PM_ENABLE) {
163                 mask = env->mpmmask;
164                 base = env->mpmbase;
165             }
166             break;
167         case PRV_S:
168             if (env->mmte & S_PM_ENABLE) {
169                 mask = env->spmmask;
170                 base = env->spmbase;
171             }
172             break;
173         case PRV_U:
174             if (env->mmte & U_PM_ENABLE) {
175                 mask = env->upmmask;
176                 base = env->upmbase;
177             }
178             break;
179         default:
180             g_assert_not_reached();
181         }
182     }
183 #endif
184     if (xl == MXL_RV32) {
185         env->cur_pmmask = mask & UINT32_MAX;
186         env->cur_pmbase = base & UINT32_MAX;
187     } else {
188         env->cur_pmmask = mask;
189         env->cur_pmbase = base;
190     }
191 }
192 
193 #ifndef CONFIG_USER_ONLY
194 
195 /*
196  * The HS-mode is allowed to configure priority only for the
197  * following VS-mode local interrupts:
198  *
199  * 0  (Reserved interrupt, reads as zero)
200  * 1  Supervisor software interrupt
201  * 4  (Reserved interrupt, reads as zero)
202  * 5  Supervisor timer interrupt
203  * 8  (Reserved interrupt, reads as zero)
204  * 13 (Reserved interrupt)
205  * 14 "
206  * 15 "
207  * 16 "
208  * 17 "
209  * 18 "
210  * 19 "
211  * 20 "
212  * 21 "
213  * 22 "
214  * 23 "
215  */
216 
217 static const int hviprio_index2irq[] = {
218     0, 1, 4, 5, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 };
219 static const int hviprio_index2rdzero[] = {
220     1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
221 
222 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero)
223 {
224     if (index < 0 || ARRAY_SIZE(hviprio_index2irq) <= index) {
225         return -EINVAL;
226     }
227 
228     if (out_irq) {
229         *out_irq = hviprio_index2irq[index];
230     }
231 
232     if (out_rdzero) {
233         *out_rdzero = hviprio_index2rdzero[index];
234     }
235 
236     return 0;
237 }
238 
239 /*
240  * Default priorities of local interrupts are defined in the
241  * RISC-V Advanced Interrupt Architecture specification.
242  *
243  * ----------------------------------------------------------------
244  *  Default  |
245  *  Priority | Major Interrupt Numbers
246  * ----------------------------------------------------------------
247  *  Highest  | 47, 23, 46, 45, 22, 44,
248  *           | 43, 21, 42, 41, 20, 40
249  *           |
250  *           | 11 (0b),  3 (03),  7 (07)
251  *           |  9 (09),  1 (01),  5 (05)
252  *           | 12 (0c)
253  *           | 10 (0a),  2 (02),  6 (06)
254  *           |
255  *           | 39, 19, 38, 37, 18, 36,
256  *  Lowest   | 35, 17, 34, 33, 16, 32
257  * ----------------------------------------------------------------
258  */
259 static const uint8_t default_iprio[64] = {
260     /* Custom interrupts 48 to 63 */
261     [63] = IPRIO_MMAXIPRIO,
262     [62] = IPRIO_MMAXIPRIO,
263     [61] = IPRIO_MMAXIPRIO,
264     [60] = IPRIO_MMAXIPRIO,
265     [59] = IPRIO_MMAXIPRIO,
266     [58] = IPRIO_MMAXIPRIO,
267     [57] = IPRIO_MMAXIPRIO,
268     [56] = IPRIO_MMAXIPRIO,
269     [55] = IPRIO_MMAXIPRIO,
270     [54] = IPRIO_MMAXIPRIO,
271     [53] = IPRIO_MMAXIPRIO,
272     [52] = IPRIO_MMAXIPRIO,
273     [51] = IPRIO_MMAXIPRIO,
274     [50] = IPRIO_MMAXIPRIO,
275     [49] = IPRIO_MMAXIPRIO,
276     [48] = IPRIO_MMAXIPRIO,
277 
278     /* Custom interrupts 24 to 31 */
279     [31] = IPRIO_MMAXIPRIO,
280     [30] = IPRIO_MMAXIPRIO,
281     [29] = IPRIO_MMAXIPRIO,
282     [28] = IPRIO_MMAXIPRIO,
283     [27] = IPRIO_MMAXIPRIO,
284     [26] = IPRIO_MMAXIPRIO,
285     [25] = IPRIO_MMAXIPRIO,
286     [24] = IPRIO_MMAXIPRIO,
287 
288     [47] = IPRIO_DEFAULT_UPPER,
289     [23] = IPRIO_DEFAULT_UPPER + 1,
290     [46] = IPRIO_DEFAULT_UPPER + 2,
291     [45] = IPRIO_DEFAULT_UPPER + 3,
292     [22] = IPRIO_DEFAULT_UPPER + 4,
293     [44] = IPRIO_DEFAULT_UPPER + 5,
294 
295     [43] = IPRIO_DEFAULT_UPPER + 6,
296     [21] = IPRIO_DEFAULT_UPPER + 7,
297     [42] = IPRIO_DEFAULT_UPPER + 8,
298     [41] = IPRIO_DEFAULT_UPPER + 9,
299     [20] = IPRIO_DEFAULT_UPPER + 10,
300     [40] = IPRIO_DEFAULT_UPPER + 11,
301 
302     [11] = IPRIO_DEFAULT_M,
303     [3]  = IPRIO_DEFAULT_M + 1,
304     [7]  = IPRIO_DEFAULT_M + 2,
305 
306     [9]  = IPRIO_DEFAULT_S,
307     [1]  = IPRIO_DEFAULT_S + 1,
308     [5]  = IPRIO_DEFAULT_S + 2,
309 
310     [12] = IPRIO_DEFAULT_SGEXT,
311 
312     [10] = IPRIO_DEFAULT_VS,
313     [2]  = IPRIO_DEFAULT_VS + 1,
314     [6]  = IPRIO_DEFAULT_VS + 2,
315 
316     [39] = IPRIO_DEFAULT_LOWER,
317     [19] = IPRIO_DEFAULT_LOWER + 1,
318     [38] = IPRIO_DEFAULT_LOWER + 2,
319     [37] = IPRIO_DEFAULT_LOWER + 3,
320     [18] = IPRIO_DEFAULT_LOWER + 4,
321     [36] = IPRIO_DEFAULT_LOWER + 5,
322 
323     [35] = IPRIO_DEFAULT_LOWER + 6,
324     [17] = IPRIO_DEFAULT_LOWER + 7,
325     [34] = IPRIO_DEFAULT_LOWER + 8,
326     [33] = IPRIO_DEFAULT_LOWER + 9,
327     [16] = IPRIO_DEFAULT_LOWER + 10,
328     [32] = IPRIO_DEFAULT_LOWER + 11,
329 };
330 
331 uint8_t riscv_cpu_default_priority(int irq)
332 {
333     if (irq < 0 || irq > 63) {
334         return IPRIO_MMAXIPRIO;
335     }
336 
337     return default_iprio[irq] ? default_iprio[irq] : IPRIO_MMAXIPRIO;
338 };
339 
340 static int riscv_cpu_pending_to_irq(CPURISCVState *env,
341                                     int extirq, unsigned int extirq_def_prio,
342                                     uint64_t pending, uint8_t *iprio)
343 {
344     int irq, best_irq = RISCV_EXCP_NONE;
345     unsigned int prio, best_prio = UINT_MAX;
346 
347     if (!pending) {
348         return RISCV_EXCP_NONE;
349     }
350 
351     irq = ctz64(pending);
352     if (!((extirq == IRQ_M_EXT) ? riscv_cpu_cfg(env)->ext_smaia :
353                                   riscv_cpu_cfg(env)->ext_ssaia)) {
354         return irq;
355     }
356 
357     pending = pending >> irq;
358     while (pending) {
359         prio = iprio[irq];
360         if (!prio) {
361             if (irq == extirq) {
362                 prio = extirq_def_prio;
363             } else {
364                 prio = (riscv_cpu_default_priority(irq) < extirq_def_prio) ?
365                        1 : IPRIO_MMAXIPRIO;
366             }
367         }
368         if ((pending & 0x1) && (prio <= best_prio)) {
369             best_irq = irq;
370             best_prio = prio;
371         }
372         irq++;
373         pending = pending >> 1;
374     }
375 
376     return best_irq;
377 }
378 
379 /*
380  * Doesn't report interrupts inserted using mvip from M-mode firmware or
381  * using hvip bits 13:63 from HS-mode. Those are returned in
382  * riscv_cpu_sirq_pending() and riscv_cpu_vsirq_pending().
383  */
384 uint64_t riscv_cpu_all_pending(CPURISCVState *env)
385 {
386     uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN);
387     uint64_t vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
388     uint64_t vstip = (env->vstime_irq) ? MIP_VSTIP : 0;
389 
390     return (env->mip | vsgein | vstip) & env->mie;
391 }
392 
393 int riscv_cpu_mirq_pending(CPURISCVState *env)
394 {
395     uint64_t irqs = riscv_cpu_all_pending(env) & ~env->mideleg &
396                     ~(MIP_SGEIP | MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
397 
398     return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
399                                     irqs, env->miprio);
400 }
401 
402 int riscv_cpu_sirq_pending(CPURISCVState *env)
403 {
404     uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
405                     ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
406     uint64_t irqs_f = env->mvip & env->mvien & ~env->mideleg & env->sie;
407 
408     return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
409                                     irqs | irqs_f, env->siprio);
410 }
411 
412 int riscv_cpu_vsirq_pending(CPURISCVState *env)
413 {
414     uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & env->hideleg;
415     uint64_t irqs_f_vs = env->hvip & env->hvien & ~env->hideleg & env->vsie;
416     uint64_t vsbits;
417 
418     /* Bring VS-level bits to correct position */
419     vsbits = irqs & VS_MODE_INTERRUPTS;
420     irqs &= ~VS_MODE_INTERRUPTS;
421     irqs |= vsbits >> 1;
422 
423     return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
424                                     (irqs | irqs_f_vs), env->hviprio);
425 }
426 
427 static int riscv_cpu_local_irq_pending(CPURISCVState *env)
428 {
429     uint64_t irqs, pending, mie, hsie, vsie, irqs_f, irqs_f_vs;
430     uint64_t vsbits, irq_delegated;
431     int virq;
432 
433     /* Determine interrupt enable state of all privilege modes */
434     if (env->virt_enabled) {
435         mie = 1;
436         hsie = 1;
437         vsie = (env->priv < PRV_S) ||
438                (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
439     } else {
440         mie = (env->priv < PRV_M) ||
441               (env->priv == PRV_M && get_field(env->mstatus, MSTATUS_MIE));
442         hsie = (env->priv < PRV_S) ||
443                (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
444         vsie = 0;
445     }
446 
447     /* Determine all pending interrupts */
448     pending = riscv_cpu_all_pending(env);
449 
450     /* Check M-mode interrupts */
451     irqs = pending & ~env->mideleg & -mie;
452     if (irqs) {
453         return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
454                                         irqs, env->miprio);
455     }
456 
457     /* Check for virtual S-mode interrupts. */
458     irqs_f = env->mvip & (env->mvien & ~env->mideleg) & env->sie;
459 
460     /* Check HS-mode interrupts */
461     irqs =  ((pending & env->mideleg & ~env->hideleg) | irqs_f) & -hsie;
462     if (irqs) {
463         return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
464                                         irqs, env->siprio);
465     }
466 
467     /* Check for virtual VS-mode interrupts. */
468     irqs_f_vs = env->hvip & env->hvien & ~env->hideleg & env->vsie;
469 
470     /* Check VS-mode interrupts */
471     irq_delegated = pending & env->mideleg & env->hideleg;
472 
473     /* Bring VS-level bits to correct position */
474     vsbits = irq_delegated & VS_MODE_INTERRUPTS;
475     irq_delegated &= ~VS_MODE_INTERRUPTS;
476     irq_delegated |= vsbits >> 1;
477 
478     irqs = (irq_delegated | irqs_f_vs) & -vsie;
479     if (irqs) {
480         virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
481                                         irqs, env->hviprio);
482         if (virq <= 0 || (virq > 12 && virq <= 63)) {
483             return virq;
484         } else {
485             return virq + 1;
486         }
487     }
488 
489     /* Indicate no pending interrupt */
490     return RISCV_EXCP_NONE;
491 }
492 
493 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
494 {
495     if (interrupt_request & CPU_INTERRUPT_HARD) {
496         RISCVCPU *cpu = RISCV_CPU(cs);
497         CPURISCVState *env = &cpu->env;
498         int interruptno = riscv_cpu_local_irq_pending(env);
499         if (interruptno >= 0) {
500             cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
501             riscv_cpu_do_interrupt(cs);
502             return true;
503         }
504     }
505     return false;
506 }
507 
508 /* Return true is floating point support is currently enabled */
509 bool riscv_cpu_fp_enabled(CPURISCVState *env)
510 {
511     if (env->mstatus & MSTATUS_FS) {
512         if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_FS)) {
513             return false;
514         }
515         return true;
516     }
517 
518     return false;
519 }
520 
521 /* Return true is vector support is currently enabled */
522 bool riscv_cpu_vector_enabled(CPURISCVState *env)
523 {
524     if (env->mstatus & MSTATUS_VS) {
525         if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_VS)) {
526             return false;
527         }
528         return true;
529     }
530 
531     return false;
532 }
533 
534 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
535 {
536     uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM |
537                             MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
538                             MSTATUS64_UXL | MSTATUS_VS;
539 
540     if (riscv_has_ext(env, RVF)) {
541         mstatus_mask |= MSTATUS_FS;
542     }
543     bool current_virt = env->virt_enabled;
544 
545     g_assert(riscv_has_ext(env, RVH));
546 
547     if (current_virt) {
548         /* Current V=1 and we are about to change to V=0 */
549         env->vsstatus = env->mstatus & mstatus_mask;
550         env->mstatus &= ~mstatus_mask;
551         env->mstatus |= env->mstatus_hs;
552 
553         env->vstvec = env->stvec;
554         env->stvec = env->stvec_hs;
555 
556         env->vsscratch = env->sscratch;
557         env->sscratch = env->sscratch_hs;
558 
559         env->vsepc = env->sepc;
560         env->sepc = env->sepc_hs;
561 
562         env->vscause = env->scause;
563         env->scause = env->scause_hs;
564 
565         env->vstval = env->stval;
566         env->stval = env->stval_hs;
567 
568         env->vsatp = env->satp;
569         env->satp = env->satp_hs;
570     } else {
571         /* Current V=0 and we are about to change to V=1 */
572         env->mstatus_hs = env->mstatus & mstatus_mask;
573         env->mstatus &= ~mstatus_mask;
574         env->mstatus |= env->vsstatus;
575 
576         env->stvec_hs = env->stvec;
577         env->stvec = env->vstvec;
578 
579         env->sscratch_hs = env->sscratch;
580         env->sscratch = env->vsscratch;
581 
582         env->sepc_hs = env->sepc;
583         env->sepc = env->vsepc;
584 
585         env->scause_hs = env->scause;
586         env->scause = env->vscause;
587 
588         env->stval_hs = env->stval;
589         env->stval = env->vstval;
590 
591         env->satp_hs = env->satp;
592         env->satp = env->vsatp;
593     }
594 }
595 
596 target_ulong riscv_cpu_get_geilen(CPURISCVState *env)
597 {
598     if (!riscv_has_ext(env, RVH)) {
599         return 0;
600     }
601 
602     return env->geilen;
603 }
604 
605 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
606 {
607     if (!riscv_has_ext(env, RVH)) {
608         return;
609     }
610 
611     if (geilen > (TARGET_LONG_BITS - 1)) {
612         return;
613     }
614 
615     env->geilen = geilen;
616 }
617 
618 /* This function can only be called to set virt when RVH is enabled */
619 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
620 {
621     /* Flush the TLB on all virt mode changes. */
622     if (env->virt_enabled != enable) {
623         tlb_flush(env_cpu(env));
624     }
625 
626     env->virt_enabled = enable;
627 
628     if (enable) {
629         /*
630          * The guest external interrupts from an interrupt controller are
631          * delivered only when the Guest/VM is running (i.e. V=1). This means
632          * any guest external interrupt which is triggered while the Guest/VM
633          * is not running (i.e. V=0) will be missed on QEMU resulting in guest
634          * with sluggish response to serial console input and other I/O events.
635          *
636          * To solve this, we check and inject interrupt after setting V=1.
637          */
638         riscv_cpu_update_mip(env, 0, 0);
639     }
640 }
641 
642 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
643 {
644     CPURISCVState *env = &cpu->env;
645     if (env->miclaim & interrupts) {
646         return -1;
647     } else {
648         env->miclaim |= interrupts;
649         return 0;
650     }
651 }
652 
653 void riscv_cpu_interrupt(CPURISCVState *env)
654 {
655     uint64_t gein, vsgein = 0, vstip = 0, irqf = 0;
656     CPUState *cs = env_cpu(env);
657 
658     BQL_LOCK_GUARD();
659 
660     if (env->virt_enabled) {
661         gein = get_field(env->hstatus, HSTATUS_VGEIN);
662         vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
663         irqf = env->hvien & env->hvip & env->vsie;
664     } else {
665         irqf = env->mvien & env->mvip & env->sie;
666     }
667 
668     vstip = env->vstime_irq ? MIP_VSTIP : 0;
669 
670     if (env->mip | vsgein | vstip | irqf) {
671         cpu_interrupt(cs, CPU_INTERRUPT_HARD);
672     } else {
673         cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
674     }
675 }
676 
677 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, uint64_t value)
678 {
679     uint64_t old = env->mip;
680 
681     /* No need to update mip for VSTIP */
682     mask = ((mask == MIP_VSTIP) && env->vstime_irq) ? 0 : mask;
683 
684     BQL_LOCK_GUARD();
685 
686     env->mip = (env->mip & ~mask) | (value & mask);
687 
688     riscv_cpu_interrupt(env);
689 
690     return old;
691 }
692 
693 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
694                              void *arg)
695 {
696     env->rdtime_fn = fn;
697     env->rdtime_fn_arg = arg;
698 }
699 
700 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
701                                    int (*rmw_fn)(void *arg,
702                                                  target_ulong reg,
703                                                  target_ulong *val,
704                                                  target_ulong new_val,
705                                                  target_ulong write_mask),
706                                    void *rmw_fn_arg)
707 {
708     if (priv <= PRV_M) {
709         env->aia_ireg_rmw_fn[priv] = rmw_fn;
710         env->aia_ireg_rmw_fn_arg[priv] = rmw_fn_arg;
711     }
712 }
713 
714 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
715 {
716     g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
717 
718     if (icount_enabled() && newpriv != env->priv) {
719         riscv_itrigger_update_priv(env);
720     }
721     /* tlb_flush is unnecessary as mode is contained in mmu_idx */
722     env->priv = newpriv;
723     env->xl = cpu_recompute_xl(env);
724     riscv_cpu_update_mask(env);
725 
726     /*
727      * Clear the load reservation - otherwise a reservation placed in one
728      * context/process can be used by another, resulting in an SC succeeding
729      * incorrectly. Version 2.2 of the ISA specification explicitly requires
730      * this behaviour, while later revisions say that the kernel "should" use
731      * an SC instruction to force the yielding of a load reservation on a
732      * preemptive context switch. As a result, do both.
733      */
734     env->load_res = -1;
735 }
736 
737 /*
738  * get_physical_address_pmp - check PMP permission for this physical address
739  *
740  * Match the PMP region and check permission for this physical address and it's
741  * TLB page. Returns 0 if the permission checking was successful
742  *
743  * @env: CPURISCVState
744  * @prot: The returned protection attributes
745  * @addr: The physical address to be checked permission
746  * @access_type: The type of MMU access
747  * @mode: Indicates current privilege level.
748  */
749 static int get_physical_address_pmp(CPURISCVState *env, int *prot, hwaddr addr,
750                                     int size, MMUAccessType access_type,
751                                     int mode)
752 {
753     pmp_priv_t pmp_priv;
754     bool pmp_has_privs;
755 
756     if (!riscv_cpu_cfg(env)->pmp) {
757         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
758         return TRANSLATE_SUCCESS;
759     }
760 
761     pmp_has_privs = pmp_hart_has_privs(env, addr, size, 1 << access_type,
762                                        &pmp_priv, mode);
763     if (!pmp_has_privs) {
764         *prot = 0;
765         return TRANSLATE_PMP_FAIL;
766     }
767 
768     *prot = pmp_priv_to_page_prot(pmp_priv);
769 
770     return TRANSLATE_SUCCESS;
771 }
772 
773 /*
774  * get_physical_address - get the physical address for this virtual address
775  *
776  * Do a page table walk to obtain the physical address corresponding to a
777  * virtual address. Returns 0 if the translation was successful
778  *
779  * Adapted from Spike's mmu_t::translate and mmu_t::walk
780  *
781  * @env: CPURISCVState
782  * @physical: This will be set to the calculated physical address
783  * @prot: The returned protection attributes
784  * @addr: The virtual address or guest physical address to be translated
785  * @fault_pte_addr: If not NULL, this will be set to fault pte address
786  *                  when a error occurs on pte address translation.
787  *                  This will already be shifted to match htval.
788  * @access_type: The type of MMU access
789  * @mmu_idx: Indicates current privilege level
790  * @first_stage: Are we in first stage translation?
791  *               Second stage is used for hypervisor guest translation
792  * @two_stage: Are we going to perform two stage translation
793  * @is_debug: Is this access from a debugger or the monitor?
794  */
795 static int get_physical_address(CPURISCVState *env, hwaddr *physical,
796                                 int *ret_prot, vaddr addr,
797                                 target_ulong *fault_pte_addr,
798                                 int access_type, int mmu_idx,
799                                 bool first_stage, bool two_stage,
800                                 bool is_debug)
801 {
802     /*
803      * NOTE: the env->pc value visible here will not be
804      * correct, but the value visible to the exception handler
805      * (riscv_cpu_do_interrupt) is correct
806      */
807     MemTxResult res;
808     MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
809     int mode = mmuidx_priv(mmu_idx);
810     bool use_background = false;
811     hwaddr ppn;
812     int napot_bits = 0;
813     target_ulong napot_mask;
814 
815     /*
816      * Check if we should use the background registers for the two
817      * stage translation. We don't need to check if we actually need
818      * two stage translation as that happened before this function
819      * was called. Background registers will be used if the guest has
820      * forced a two stage translation to be on (in HS or M mode).
821      */
822     if (!env->virt_enabled && two_stage) {
823         use_background = true;
824     }
825 
826     if (mode == PRV_M || !riscv_cpu_cfg(env)->mmu) {
827         *physical = addr;
828         *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
829         return TRANSLATE_SUCCESS;
830     }
831 
832     *ret_prot = 0;
833 
834     hwaddr base;
835     int levels, ptidxbits, ptesize, vm, widened;
836 
837     if (first_stage == true) {
838         if (use_background) {
839             if (riscv_cpu_mxl(env) == MXL_RV32) {
840                 base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT;
841                 vm = get_field(env->vsatp, SATP32_MODE);
842             } else {
843                 base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT;
844                 vm = get_field(env->vsatp, SATP64_MODE);
845             }
846         } else {
847             if (riscv_cpu_mxl(env) == MXL_RV32) {
848                 base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT;
849                 vm = get_field(env->satp, SATP32_MODE);
850             } else {
851                 base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT;
852                 vm = get_field(env->satp, SATP64_MODE);
853             }
854         }
855         widened = 0;
856     } else {
857         if (riscv_cpu_mxl(env) == MXL_RV32) {
858             base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT;
859             vm = get_field(env->hgatp, SATP32_MODE);
860         } else {
861             base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT;
862             vm = get_field(env->hgatp, SATP64_MODE);
863         }
864         widened = 2;
865     }
866 
867     switch (vm) {
868     case VM_1_10_SV32:
869       levels = 2; ptidxbits = 10; ptesize = 4; break;
870     case VM_1_10_SV39:
871       levels = 3; ptidxbits = 9; ptesize = 8; break;
872     case VM_1_10_SV48:
873       levels = 4; ptidxbits = 9; ptesize = 8; break;
874     case VM_1_10_SV57:
875       levels = 5; ptidxbits = 9; ptesize = 8; break;
876     case VM_1_10_MBARE:
877         *physical = addr;
878         *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
879         return TRANSLATE_SUCCESS;
880     default:
881       g_assert_not_reached();
882     }
883 
884     CPUState *cs = env_cpu(env);
885     int va_bits = PGSHIFT + levels * ptidxbits + widened;
886 
887     if (first_stage == true) {
888         target_ulong mask, masked_msbs;
889 
890         if (TARGET_LONG_BITS > (va_bits - 1)) {
891             mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
892         } else {
893             mask = 0;
894         }
895         masked_msbs = (addr >> (va_bits - 1)) & mask;
896 
897         if (masked_msbs != 0 && masked_msbs != mask) {
898             return TRANSLATE_FAIL;
899         }
900     } else {
901         if (vm != VM_1_10_SV32 && addr >> va_bits != 0) {
902             return TRANSLATE_FAIL;
903         }
904     }
905 
906     bool pbmte = env->menvcfg & MENVCFG_PBMTE;
907     bool adue = env->menvcfg & MENVCFG_ADUE;
908 
909     if (first_stage && two_stage && env->virt_enabled) {
910         pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE);
911         adue = adue && (env->henvcfg & HENVCFG_ADUE);
912     }
913 
914     int ptshift = (levels - 1) * ptidxbits;
915     target_ulong pte;
916     hwaddr pte_addr;
917     int i;
918 
919 #if !TCG_OVERSIZED_GUEST
920 restart:
921 #endif
922     for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
923         target_ulong idx;
924         if (i == 0) {
925             idx = (addr >> (PGSHIFT + ptshift)) &
926                            ((1 << (ptidxbits + widened)) - 1);
927         } else {
928             idx = (addr >> (PGSHIFT + ptshift)) &
929                            ((1 << ptidxbits) - 1);
930         }
931 
932         /* check that physical address of PTE is legal */
933 
934         if (two_stage && first_stage) {
935             int vbase_prot;
936             hwaddr vbase;
937 
938             /* Do the second stage translation on the base PTE address. */
939             int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
940                                                  base, NULL, MMU_DATA_LOAD,
941                                                  MMUIdx_U, false, true,
942                                                  is_debug);
943 
944             if (vbase_ret != TRANSLATE_SUCCESS) {
945                 if (fault_pte_addr) {
946                     *fault_pte_addr = (base + idx * ptesize) >> 2;
947                 }
948                 return TRANSLATE_G_STAGE_FAIL;
949             }
950 
951             pte_addr = vbase + idx * ptesize;
952         } else {
953             pte_addr = base + idx * ptesize;
954         }
955 
956         int pmp_prot;
957         int pmp_ret = get_physical_address_pmp(env, &pmp_prot, pte_addr,
958                                                sizeof(target_ulong),
959                                                MMU_DATA_LOAD, PRV_S);
960         if (pmp_ret != TRANSLATE_SUCCESS) {
961             return TRANSLATE_PMP_FAIL;
962         }
963 
964         if (riscv_cpu_mxl(env) == MXL_RV32) {
965             pte = address_space_ldl(cs->as, pte_addr, attrs, &res);
966         } else {
967             pte = address_space_ldq(cs->as, pte_addr, attrs, &res);
968         }
969 
970         if (res != MEMTX_OK) {
971             return TRANSLATE_FAIL;
972         }
973 
974         if (riscv_cpu_sxl(env) == MXL_RV32) {
975             ppn = pte >> PTE_PPN_SHIFT;
976         } else {
977             if (pte & PTE_RESERVED) {
978                 return TRANSLATE_FAIL;
979             }
980 
981             if (!pbmte && (pte & PTE_PBMT)) {
982                 return TRANSLATE_FAIL;
983             }
984 
985             if (!riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
986                 return TRANSLATE_FAIL;
987             }
988 
989             ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT;
990         }
991 
992         if (!(pte & PTE_V)) {
993             /* Invalid PTE */
994             return TRANSLATE_FAIL;
995         }
996         if (pte & (PTE_R | PTE_W | PTE_X)) {
997             goto leaf;
998         }
999 
1000         /* Inner PTE, continue walking */
1001         if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
1002             return TRANSLATE_FAIL;
1003         }
1004         base = ppn << PGSHIFT;
1005     }
1006 
1007     /* No leaf pte at any translation level. */
1008     return TRANSLATE_FAIL;
1009 
1010  leaf:
1011     if (ppn & ((1ULL << ptshift) - 1)) {
1012         /* Misaligned PPN */
1013         return TRANSLATE_FAIL;
1014     }
1015     if (!pbmte && (pte & PTE_PBMT)) {
1016         /* Reserved without Svpbmt. */
1017         return TRANSLATE_FAIL;
1018     }
1019 
1020     /* Check for reserved combinations of RWX flags. */
1021     switch (pte & (PTE_R | PTE_W | PTE_X)) {
1022     case PTE_W:
1023     case PTE_W | PTE_X:
1024         return TRANSLATE_FAIL;
1025     }
1026 
1027     int prot = 0;
1028     if (pte & PTE_R) {
1029         prot |= PAGE_READ;
1030     }
1031     if (pte & PTE_W) {
1032         prot |= PAGE_WRITE;
1033     }
1034     if (pte & PTE_X) {
1035         bool mxr = false;
1036 
1037         /*
1038          * Use mstatus for first stage or for the second stage without
1039          * virt_enabled (MPRV+MPV)
1040          */
1041         if (first_stage || !env->virt_enabled) {
1042             mxr = get_field(env->mstatus, MSTATUS_MXR);
1043         }
1044 
1045         /* MPRV+MPV case, check VSSTATUS */
1046         if (first_stage && two_stage && !env->virt_enabled) {
1047             mxr |= get_field(env->vsstatus, MSTATUS_MXR);
1048         }
1049 
1050         /*
1051          * Setting MXR at HS-level overrides both VS-stage and G-stage
1052          * execute-only permissions
1053          */
1054         if (env->virt_enabled) {
1055             mxr |= get_field(env->mstatus_hs, MSTATUS_MXR);
1056         }
1057 
1058         if (mxr) {
1059             prot |= PAGE_READ;
1060         }
1061         prot |= PAGE_EXEC;
1062     }
1063 
1064     if (pte & PTE_U) {
1065         if (mode != PRV_U) {
1066             if (!mmuidx_sum(mmu_idx)) {
1067                 return TRANSLATE_FAIL;
1068             }
1069             /* SUM allows only read+write, not execute. */
1070             prot &= PAGE_READ | PAGE_WRITE;
1071         }
1072     } else if (mode != PRV_S) {
1073         /* Supervisor PTE flags when not S mode */
1074         return TRANSLATE_FAIL;
1075     }
1076 
1077     if (!((prot >> access_type) & 1)) {
1078         /* Access check failed */
1079         return TRANSLATE_FAIL;
1080     }
1081 
1082     /* If necessary, set accessed and dirty bits. */
1083     target_ulong updated_pte = pte | PTE_A |
1084                 (access_type == MMU_DATA_STORE ? PTE_D : 0);
1085 
1086     /* Page table updates need to be atomic with MTTCG enabled */
1087     if (updated_pte != pte && !is_debug) {
1088         if (!adue) {
1089             return TRANSLATE_FAIL;
1090         }
1091 
1092         /*
1093          * - if accessed or dirty bits need updating, and the PTE is
1094          *   in RAM, then we do so atomically with a compare and swap.
1095          * - if the PTE is in IO space or ROM, then it can't be updated
1096          *   and we return TRANSLATE_FAIL.
1097          * - if the PTE changed by the time we went to update it, then
1098          *   it is no longer valid and we must re-walk the page table.
1099          */
1100         MemoryRegion *mr;
1101         hwaddr l = sizeof(target_ulong), addr1;
1102         mr = address_space_translate(cs->as, pte_addr, &addr1, &l,
1103                                      false, MEMTXATTRS_UNSPECIFIED);
1104         if (memory_region_is_ram(mr)) {
1105             target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1);
1106 #if TCG_OVERSIZED_GUEST
1107             /*
1108              * MTTCG is not enabled on oversized TCG guests so
1109              * page table updates do not need to be atomic
1110              */
1111             *pte_pa = pte = updated_pte;
1112 #else
1113             target_ulong old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte);
1114             if (old_pte != pte) {
1115                 goto restart;
1116             }
1117             pte = updated_pte;
1118 #endif
1119         } else {
1120             /*
1121              * Misconfigured PTE in ROM (AD bits are not preset) or
1122              * PTE is in IO space and can't be updated atomically.
1123              */
1124             return TRANSLATE_FAIL;
1125         }
1126     }
1127 
1128     /* For superpage mappings, make a fake leaf PTE for the TLB's benefit. */
1129     target_ulong vpn = addr >> PGSHIFT;
1130 
1131     if (riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
1132         napot_bits = ctzl(ppn) + 1;
1133         if ((i != (levels - 1)) || (napot_bits != 4)) {
1134             return TRANSLATE_FAIL;
1135         }
1136     }
1137 
1138     napot_mask = (1 << napot_bits) - 1;
1139     *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) |
1140                   (vpn & (((target_ulong)1 << ptshift) - 1))
1141                  ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK);
1142 
1143     /*
1144      * Remove write permission unless this is a store, or the page is
1145      * already dirty, so that we TLB miss on later writes to update
1146      * the dirty bit.
1147      */
1148     if (access_type != MMU_DATA_STORE && !(pte & PTE_D)) {
1149         prot &= ~PAGE_WRITE;
1150     }
1151     *ret_prot = prot;
1152 
1153     return TRANSLATE_SUCCESS;
1154 }
1155 
1156 static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
1157                                 MMUAccessType access_type, bool pmp_violation,
1158                                 bool first_stage, bool two_stage,
1159                                 bool two_stage_indirect)
1160 {
1161     CPUState *cs = env_cpu(env);
1162 
1163     switch (access_type) {
1164     case MMU_INST_FETCH:
1165         if (env->virt_enabled && !first_stage) {
1166             cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT;
1167         } else {
1168             cs->exception_index = pmp_violation ?
1169                 RISCV_EXCP_INST_ACCESS_FAULT : RISCV_EXCP_INST_PAGE_FAULT;
1170         }
1171         break;
1172     case MMU_DATA_LOAD:
1173         if (two_stage && !first_stage) {
1174             cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
1175         } else {
1176             cs->exception_index = pmp_violation ?
1177                 RISCV_EXCP_LOAD_ACCESS_FAULT : RISCV_EXCP_LOAD_PAGE_FAULT;
1178         }
1179         break;
1180     case MMU_DATA_STORE:
1181         if (two_stage && !first_stage) {
1182             cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
1183         } else {
1184             cs->exception_index = pmp_violation ?
1185                 RISCV_EXCP_STORE_AMO_ACCESS_FAULT :
1186                 RISCV_EXCP_STORE_PAGE_FAULT;
1187         }
1188         break;
1189     default:
1190         g_assert_not_reached();
1191     }
1192     env->badaddr = address;
1193     env->two_stage_lookup = two_stage;
1194     env->two_stage_indirect_lookup = two_stage_indirect;
1195 }
1196 
1197 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1198 {
1199     RISCVCPU *cpu = RISCV_CPU(cs);
1200     CPURISCVState *env = &cpu->env;
1201     hwaddr phys_addr;
1202     int prot;
1203     int mmu_idx = riscv_env_mmu_index(&cpu->env, false);
1204 
1205     if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
1206                              true, env->virt_enabled, true)) {
1207         return -1;
1208     }
1209 
1210     if (env->virt_enabled) {
1211         if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
1212                                  0, mmu_idx, false, true, true)) {
1213             return -1;
1214         }
1215     }
1216 
1217     return phys_addr & TARGET_PAGE_MASK;
1218 }
1219 
1220 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
1221                                      vaddr addr, unsigned size,
1222                                      MMUAccessType access_type,
1223                                      int mmu_idx, MemTxAttrs attrs,
1224                                      MemTxResult response, uintptr_t retaddr)
1225 {
1226     RISCVCPU *cpu = RISCV_CPU(cs);
1227     CPURISCVState *env = &cpu->env;
1228 
1229     if (access_type == MMU_DATA_STORE) {
1230         cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1231     } else if (access_type == MMU_DATA_LOAD) {
1232         cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1233     } else {
1234         cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
1235     }
1236 
1237     env->badaddr = addr;
1238     env->two_stage_lookup = mmuidx_2stage(mmu_idx);
1239     env->two_stage_indirect_lookup = false;
1240     cpu_loop_exit_restore(cs, retaddr);
1241 }
1242 
1243 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
1244                                    MMUAccessType access_type, int mmu_idx,
1245                                    uintptr_t retaddr)
1246 {
1247     RISCVCPU *cpu = RISCV_CPU(cs);
1248     CPURISCVState *env = &cpu->env;
1249     switch (access_type) {
1250     case MMU_INST_FETCH:
1251         cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
1252         break;
1253     case MMU_DATA_LOAD:
1254         cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
1255         break;
1256     case MMU_DATA_STORE:
1257         cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
1258         break;
1259     default:
1260         g_assert_not_reached();
1261     }
1262     env->badaddr = addr;
1263     env->two_stage_lookup = mmuidx_2stage(mmu_idx);
1264     env->two_stage_indirect_lookup = false;
1265     cpu_loop_exit_restore(cs, retaddr);
1266 }
1267 
1268 
1269 static void pmu_tlb_fill_incr_ctr(RISCVCPU *cpu, MMUAccessType access_type)
1270 {
1271     enum riscv_pmu_event_idx pmu_event_type;
1272 
1273     switch (access_type) {
1274     case MMU_INST_FETCH:
1275         pmu_event_type = RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS;
1276         break;
1277     case MMU_DATA_LOAD:
1278         pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS;
1279         break;
1280     case MMU_DATA_STORE:
1281         pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS;
1282         break;
1283     default:
1284         return;
1285     }
1286 
1287     riscv_pmu_incr_ctr(cpu, pmu_event_type);
1288 }
1289 
1290 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
1291                         MMUAccessType access_type, int mmu_idx,
1292                         bool probe, uintptr_t retaddr)
1293 {
1294     RISCVCPU *cpu = RISCV_CPU(cs);
1295     CPURISCVState *env = &cpu->env;
1296     vaddr im_address;
1297     hwaddr pa = 0;
1298     int prot, prot2, prot_pmp;
1299     bool pmp_violation = false;
1300     bool first_stage_error = true;
1301     bool two_stage_lookup = mmuidx_2stage(mmu_idx);
1302     bool two_stage_indirect_error = false;
1303     int ret = TRANSLATE_FAIL;
1304     int mode = mmu_idx;
1305     /* default TLB page size */
1306     target_ulong tlb_size = TARGET_PAGE_SIZE;
1307 
1308     env->guest_phys_fault_addr = 0;
1309 
1310     qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
1311                   __func__, address, access_type, mmu_idx);
1312 
1313     pmu_tlb_fill_incr_ctr(cpu, access_type);
1314     if (two_stage_lookup) {
1315         /* Two stage lookup */
1316         ret = get_physical_address(env, &pa, &prot, address,
1317                                    &env->guest_phys_fault_addr, access_type,
1318                                    mmu_idx, true, true, false);
1319 
1320         /*
1321          * A G-stage exception may be triggered during two state lookup.
1322          * And the env->guest_phys_fault_addr has already been set in
1323          * get_physical_address().
1324          */
1325         if (ret == TRANSLATE_G_STAGE_FAIL) {
1326             first_stage_error = false;
1327             two_stage_indirect_error = true;
1328         }
1329 
1330         qemu_log_mask(CPU_LOG_MMU,
1331                       "%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
1332                       HWADDR_FMT_plx " prot %d\n",
1333                       __func__, address, ret, pa, prot);
1334 
1335         if (ret == TRANSLATE_SUCCESS) {
1336             /* Second stage lookup */
1337             im_address = pa;
1338 
1339             ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
1340                                        access_type, MMUIdx_U, false, true,
1341                                        false);
1342 
1343             qemu_log_mask(CPU_LOG_MMU,
1344                           "%s 2nd-stage address=%" VADDR_PRIx
1345                           " ret %d physical "
1346                           HWADDR_FMT_plx " prot %d\n",
1347                           __func__, im_address, ret, pa, prot2);
1348 
1349             prot &= prot2;
1350 
1351             if (ret == TRANSLATE_SUCCESS) {
1352                 ret = get_physical_address_pmp(env, &prot_pmp, pa,
1353                                                size, access_type, mode);
1354                 tlb_size = pmp_get_tlb_size(env, pa);
1355 
1356                 qemu_log_mask(CPU_LOG_MMU,
1357                               "%s PMP address=" HWADDR_FMT_plx " ret %d prot"
1358                               " %d tlb_size " TARGET_FMT_lu "\n",
1359                               __func__, pa, ret, prot_pmp, tlb_size);
1360 
1361                 prot &= prot_pmp;
1362             }
1363 
1364             if (ret != TRANSLATE_SUCCESS) {
1365                 /*
1366                  * Guest physical address translation failed, this is a HS
1367                  * level exception
1368                  */
1369                 first_stage_error = false;
1370                 env->guest_phys_fault_addr = (im_address |
1371                                               (address &
1372                                                (TARGET_PAGE_SIZE - 1))) >> 2;
1373             }
1374         }
1375     } else {
1376         /* Single stage lookup */
1377         ret = get_physical_address(env, &pa, &prot, address, NULL,
1378                                    access_type, mmu_idx, true, false, false);
1379 
1380         qemu_log_mask(CPU_LOG_MMU,
1381                       "%s address=%" VADDR_PRIx " ret %d physical "
1382                       HWADDR_FMT_plx " prot %d\n",
1383                       __func__, address, ret, pa, prot);
1384 
1385         if (ret == TRANSLATE_SUCCESS) {
1386             ret = get_physical_address_pmp(env, &prot_pmp, pa,
1387                                            size, access_type, mode);
1388             tlb_size = pmp_get_tlb_size(env, pa);
1389 
1390             qemu_log_mask(CPU_LOG_MMU,
1391                           "%s PMP address=" HWADDR_FMT_plx " ret %d prot"
1392                           " %d tlb_size " TARGET_FMT_lu "\n",
1393                           __func__, pa, ret, prot_pmp, tlb_size);
1394 
1395             prot &= prot_pmp;
1396         }
1397     }
1398 
1399     if (ret == TRANSLATE_PMP_FAIL) {
1400         pmp_violation = true;
1401     }
1402 
1403     if (ret == TRANSLATE_SUCCESS) {
1404         tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
1405                      prot, mmu_idx, tlb_size);
1406         return true;
1407     } else if (probe) {
1408         return false;
1409     } else {
1410         raise_mmu_exception(env, address, access_type, pmp_violation,
1411                             first_stage_error, two_stage_lookup,
1412                             two_stage_indirect_error);
1413         cpu_loop_exit_restore(cs, retaddr);
1414     }
1415 
1416     return true;
1417 }
1418 
1419 static target_ulong riscv_transformed_insn(CPURISCVState *env,
1420                                            target_ulong insn,
1421                                            target_ulong taddr)
1422 {
1423     target_ulong xinsn = 0;
1424     target_ulong access_rs1 = 0, access_imm = 0, access_size = 0;
1425 
1426     /*
1427      * Only Quadrant 0 and Quadrant 2 of RVC instruction space need to
1428      * be uncompressed. The Quadrant 1 of RVC instruction space need
1429      * not be transformed because these instructions won't generate
1430      * any load/store trap.
1431      */
1432 
1433     if ((insn & 0x3) != 0x3) {
1434         /* Transform 16bit instruction into 32bit instruction */
1435         switch (GET_C_OP(insn)) {
1436         case OPC_RISC_C_OP_QUAD0: /* Quadrant 0 */
1437             switch (GET_C_FUNC(insn)) {
1438             case OPC_RISC_C_FUNC_FLD_LQ:
1439                 if (riscv_cpu_xlen(env) != 128) { /* C.FLD (RV32/64) */
1440                     xinsn = OPC_RISC_FLD;
1441                     xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
1442                     access_rs1 = GET_C_RS1S(insn);
1443                     access_imm = GET_C_LD_IMM(insn);
1444                     access_size = 8;
1445                 }
1446                 break;
1447             case OPC_RISC_C_FUNC_LW: /* C.LW */
1448                 xinsn = OPC_RISC_LW;
1449                 xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
1450                 access_rs1 = GET_C_RS1S(insn);
1451                 access_imm = GET_C_LW_IMM(insn);
1452                 access_size = 4;
1453                 break;
1454             case OPC_RISC_C_FUNC_FLW_LD:
1455                 if (riscv_cpu_xlen(env) == 32) { /* C.FLW (RV32) */
1456                     xinsn = OPC_RISC_FLW;
1457                     xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
1458                     access_rs1 = GET_C_RS1S(insn);
1459                     access_imm = GET_C_LW_IMM(insn);
1460                     access_size = 4;
1461                 } else { /* C.LD (RV64/RV128) */
1462                     xinsn = OPC_RISC_LD;
1463                     xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
1464                     access_rs1 = GET_C_RS1S(insn);
1465                     access_imm = GET_C_LD_IMM(insn);
1466                     access_size = 8;
1467                 }
1468                 break;
1469             case OPC_RISC_C_FUNC_FSD_SQ:
1470                 if (riscv_cpu_xlen(env) != 128) { /* C.FSD (RV32/64) */
1471                     xinsn = OPC_RISC_FSD;
1472                     xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
1473                     access_rs1 = GET_C_RS1S(insn);
1474                     access_imm = GET_C_SD_IMM(insn);
1475                     access_size = 8;
1476                 }
1477                 break;
1478             case OPC_RISC_C_FUNC_SW: /* C.SW */
1479                 xinsn = OPC_RISC_SW;
1480                 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
1481                 access_rs1 = GET_C_RS1S(insn);
1482                 access_imm = GET_C_SW_IMM(insn);
1483                 access_size = 4;
1484                 break;
1485             case OPC_RISC_C_FUNC_FSW_SD:
1486                 if (riscv_cpu_xlen(env) == 32) { /* C.FSW (RV32) */
1487                     xinsn = OPC_RISC_FSW;
1488                     xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
1489                     access_rs1 = GET_C_RS1S(insn);
1490                     access_imm = GET_C_SW_IMM(insn);
1491                     access_size = 4;
1492                 } else { /* C.SD (RV64/RV128) */
1493                     xinsn = OPC_RISC_SD;
1494                     xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
1495                     access_rs1 = GET_C_RS1S(insn);
1496                     access_imm = GET_C_SD_IMM(insn);
1497                     access_size = 8;
1498                 }
1499                 break;
1500             default:
1501                 break;
1502             }
1503             break;
1504         case OPC_RISC_C_OP_QUAD2: /* Quadrant 2 */
1505             switch (GET_C_FUNC(insn)) {
1506             case OPC_RISC_C_FUNC_FLDSP_LQSP:
1507                 if (riscv_cpu_xlen(env) != 128) { /* C.FLDSP (RV32/64) */
1508                     xinsn = OPC_RISC_FLD;
1509                     xinsn = SET_RD(xinsn, GET_C_RD(insn));
1510                     access_rs1 = 2;
1511                     access_imm = GET_C_LDSP_IMM(insn);
1512                     access_size = 8;
1513                 }
1514                 break;
1515             case OPC_RISC_C_FUNC_LWSP: /* C.LWSP */
1516                 xinsn = OPC_RISC_LW;
1517                 xinsn = SET_RD(xinsn, GET_C_RD(insn));
1518                 access_rs1 = 2;
1519                 access_imm = GET_C_LWSP_IMM(insn);
1520                 access_size = 4;
1521                 break;
1522             case OPC_RISC_C_FUNC_FLWSP_LDSP:
1523                 if (riscv_cpu_xlen(env) == 32) { /* C.FLWSP (RV32) */
1524                     xinsn = OPC_RISC_FLW;
1525                     xinsn = SET_RD(xinsn, GET_C_RD(insn));
1526                     access_rs1 = 2;
1527                     access_imm = GET_C_LWSP_IMM(insn);
1528                     access_size = 4;
1529                 } else { /* C.LDSP (RV64/RV128) */
1530                     xinsn = OPC_RISC_LD;
1531                     xinsn = SET_RD(xinsn, GET_C_RD(insn));
1532                     access_rs1 = 2;
1533                     access_imm = GET_C_LDSP_IMM(insn);
1534                     access_size = 8;
1535                 }
1536                 break;
1537             case OPC_RISC_C_FUNC_FSDSP_SQSP:
1538                 if (riscv_cpu_xlen(env) != 128) { /* C.FSDSP (RV32/64) */
1539                     xinsn = OPC_RISC_FSD;
1540                     xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
1541                     access_rs1 = 2;
1542                     access_imm = GET_C_SDSP_IMM(insn);
1543                     access_size = 8;
1544                 }
1545                 break;
1546             case OPC_RISC_C_FUNC_SWSP: /* C.SWSP */
1547                 xinsn = OPC_RISC_SW;
1548                 xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
1549                 access_rs1 = 2;
1550                 access_imm = GET_C_SWSP_IMM(insn);
1551                 access_size = 4;
1552                 break;
1553             case 7:
1554                 if (riscv_cpu_xlen(env) == 32) { /* C.FSWSP (RV32) */
1555                     xinsn = OPC_RISC_FSW;
1556                     xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
1557                     access_rs1 = 2;
1558                     access_imm = GET_C_SWSP_IMM(insn);
1559                     access_size = 4;
1560                 } else { /* C.SDSP (RV64/RV128) */
1561                     xinsn = OPC_RISC_SD;
1562                     xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
1563                     access_rs1 = 2;
1564                     access_imm = GET_C_SDSP_IMM(insn);
1565                     access_size = 8;
1566                 }
1567                 break;
1568             default:
1569                 break;
1570             }
1571             break;
1572         default:
1573             break;
1574         }
1575 
1576         /*
1577          * Clear Bit1 of transformed instruction to indicate that
1578          * original insruction was a 16bit instruction
1579          */
1580         xinsn &= ~((target_ulong)0x2);
1581     } else {
1582         /* Transform 32bit (or wider) instructions */
1583         switch (MASK_OP_MAJOR(insn)) {
1584         case OPC_RISC_ATOMIC:
1585             xinsn = insn;
1586             access_rs1 = GET_RS1(insn);
1587             access_size = 1 << GET_FUNCT3(insn);
1588             break;
1589         case OPC_RISC_LOAD:
1590         case OPC_RISC_FP_LOAD:
1591             xinsn = SET_I_IMM(insn, 0);
1592             access_rs1 = GET_RS1(insn);
1593             access_imm = GET_IMM(insn);
1594             access_size = 1 << GET_FUNCT3(insn);
1595             break;
1596         case OPC_RISC_STORE:
1597         case OPC_RISC_FP_STORE:
1598             xinsn = SET_S_IMM(insn, 0);
1599             access_rs1 = GET_RS1(insn);
1600             access_imm = GET_STORE_IMM(insn);
1601             access_size = 1 << GET_FUNCT3(insn);
1602             break;
1603         case OPC_RISC_SYSTEM:
1604             if (MASK_OP_SYSTEM(insn) == OPC_RISC_HLVHSV) {
1605                 xinsn = insn;
1606                 access_rs1 = GET_RS1(insn);
1607                 access_size = 1 << ((GET_FUNCT7(insn) >> 1) & 0x3);
1608                 access_size = 1 << access_size;
1609             }
1610             break;
1611         default:
1612             break;
1613         }
1614     }
1615 
1616     if (access_size) {
1617         xinsn = SET_RS1(xinsn, (taddr - (env->gpr[access_rs1] + access_imm)) &
1618                                (access_size - 1));
1619     }
1620 
1621     return xinsn;
1622 }
1623 #endif /* !CONFIG_USER_ONLY */
1624 
1625 /*
1626  * Handle Traps
1627  *
1628  * Adapted from Spike's processor_t::take_trap.
1629  *
1630  */
1631 void riscv_cpu_do_interrupt(CPUState *cs)
1632 {
1633 #if !defined(CONFIG_USER_ONLY)
1634 
1635     RISCVCPU *cpu = RISCV_CPU(cs);
1636     CPURISCVState *env = &cpu->env;
1637     bool write_gva = false;
1638     uint64_t s;
1639 
1640     /*
1641      * cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
1642      * so we mask off the MSB and separate into trap type and cause.
1643      */
1644     bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
1645     target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
1646     uint64_t deleg = async ? env->mideleg : env->medeleg;
1647     bool s_injected = env->mvip & (1 << cause) & env->mvien &&
1648         !(env->mip & (1 << cause));
1649     bool vs_injected = env->hvip & (1 << cause) & env->hvien &&
1650         !(env->mip & (1 << cause));
1651     target_ulong tval = 0;
1652     target_ulong tinst = 0;
1653     target_ulong htval = 0;
1654     target_ulong mtval2 = 0;
1655 
1656     if (!async) {
1657         /* set tval to badaddr for traps with address information */
1658         switch (cause) {
1659         case RISCV_EXCP_SEMIHOST:
1660             do_common_semihosting(cs);
1661             env->pc += 4;
1662             return;
1663         case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
1664         case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
1665         case RISCV_EXCP_LOAD_ADDR_MIS:
1666         case RISCV_EXCP_STORE_AMO_ADDR_MIS:
1667         case RISCV_EXCP_LOAD_ACCESS_FAULT:
1668         case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
1669         case RISCV_EXCP_LOAD_PAGE_FAULT:
1670         case RISCV_EXCP_STORE_PAGE_FAULT:
1671             write_gva = env->two_stage_lookup;
1672             tval = env->badaddr;
1673             if (env->two_stage_indirect_lookup) {
1674                 /*
1675                  * special pseudoinstruction for G-stage fault taken while
1676                  * doing VS-stage page table walk.
1677                  */
1678                 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000;
1679             } else {
1680                 /*
1681                  * The "Addr. Offset" field in transformed instruction is
1682                  * non-zero only for misaligned access.
1683                  */
1684                 tinst = riscv_transformed_insn(env, env->bins, tval);
1685             }
1686             break;
1687         case RISCV_EXCP_INST_GUEST_PAGE_FAULT:
1688         case RISCV_EXCP_INST_ADDR_MIS:
1689         case RISCV_EXCP_INST_ACCESS_FAULT:
1690         case RISCV_EXCP_INST_PAGE_FAULT:
1691             write_gva = env->two_stage_lookup;
1692             tval = env->badaddr;
1693             if (env->two_stage_indirect_lookup) {
1694                 /*
1695                  * special pseudoinstruction for G-stage fault taken while
1696                  * doing VS-stage page table walk.
1697                  */
1698                 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000;
1699             }
1700             break;
1701         case RISCV_EXCP_ILLEGAL_INST:
1702         case RISCV_EXCP_VIRT_INSTRUCTION_FAULT:
1703             tval = env->bins;
1704             break;
1705         case RISCV_EXCP_BREAKPOINT:
1706             if (cs->watchpoint_hit) {
1707                 tval = cs->watchpoint_hit->hitaddr;
1708                 cs->watchpoint_hit = NULL;
1709             }
1710             break;
1711         default:
1712             break;
1713         }
1714         /* ecall is dispatched as one cause so translate based on mode */
1715         if (cause == RISCV_EXCP_U_ECALL) {
1716             assert(env->priv <= 3);
1717 
1718             if (env->priv == PRV_M) {
1719                 cause = RISCV_EXCP_M_ECALL;
1720             } else if (env->priv == PRV_S && env->virt_enabled) {
1721                 cause = RISCV_EXCP_VS_ECALL;
1722             } else if (env->priv == PRV_S && !env->virt_enabled) {
1723                 cause = RISCV_EXCP_S_ECALL;
1724             } else if (env->priv == PRV_U) {
1725                 cause = RISCV_EXCP_U_ECALL;
1726             }
1727         }
1728     }
1729 
1730     trace_riscv_trap(env->mhartid, async, cause, env->pc, tval,
1731                      riscv_cpu_get_trap_name(cause, async));
1732 
1733     qemu_log_mask(CPU_LOG_INT,
1734                   "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", "
1735                   "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n",
1736                   __func__, env->mhartid, async, cause, env->pc, tval,
1737                   riscv_cpu_get_trap_name(cause, async));
1738 
1739     if (env->priv <= PRV_S && cause < 64 &&
1740         (((deleg >> cause) & 1) || s_injected || vs_injected)) {
1741         /* handle the trap in S-mode */
1742         if (riscv_has_ext(env, RVH)) {
1743             uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
1744 
1745             if (env->virt_enabled &&
1746                 (((hdeleg >> cause) & 1) || vs_injected)) {
1747                 /* Trap to VS mode */
1748                 /*
1749                  * See if we need to adjust cause. Yes if its VS mode interrupt
1750                  * no if hypervisor has delegated one of hs mode's interrupt
1751                  */
1752                 if (async && (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
1753                               cause == IRQ_VS_EXT)) {
1754                     cause = cause - 1;
1755                 }
1756                 write_gva = false;
1757             } else if (env->virt_enabled) {
1758                 /* Trap into HS mode, from virt */
1759                 riscv_cpu_swap_hypervisor_regs(env);
1760                 env->hstatus = set_field(env->hstatus, HSTATUS_SPVP,
1761                                          env->priv);
1762                 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, true);
1763 
1764                 htval = env->guest_phys_fault_addr;
1765 
1766                 riscv_cpu_set_virt_enabled(env, 0);
1767             } else {
1768                 /* Trap into HS mode */
1769                 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
1770                 htval = env->guest_phys_fault_addr;
1771             }
1772             env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva);
1773         }
1774 
1775         s = env->mstatus;
1776         s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
1777         s = set_field(s, MSTATUS_SPP, env->priv);
1778         s = set_field(s, MSTATUS_SIE, 0);
1779         env->mstatus = s;
1780         env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1));
1781         env->sepc = env->pc;
1782         env->stval = tval;
1783         env->htval = htval;
1784         env->htinst = tinst;
1785         env->pc = (env->stvec >> 2 << 2) +
1786                   ((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
1787         riscv_cpu_set_mode(env, PRV_S);
1788     } else {
1789         /* handle the trap in M-mode */
1790         if (riscv_has_ext(env, RVH)) {
1791             if (env->virt_enabled) {
1792                 riscv_cpu_swap_hypervisor_regs(env);
1793             }
1794             env->mstatus = set_field(env->mstatus, MSTATUS_MPV,
1795                                      env->virt_enabled);
1796             if (env->virt_enabled && tval) {
1797                 env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1);
1798             }
1799 
1800             mtval2 = env->guest_phys_fault_addr;
1801 
1802             /* Trapping to M mode, virt is disabled */
1803             riscv_cpu_set_virt_enabled(env, 0);
1804         }
1805 
1806         s = env->mstatus;
1807         s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
1808         s = set_field(s, MSTATUS_MPP, env->priv);
1809         s = set_field(s, MSTATUS_MIE, 0);
1810         env->mstatus = s;
1811         env->mcause = cause | ~(((target_ulong)-1) >> async);
1812         env->mepc = env->pc;
1813         env->mtval = tval;
1814         env->mtval2 = mtval2;
1815         env->mtinst = tinst;
1816         env->pc = (env->mtvec >> 2 << 2) +
1817                   ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
1818         riscv_cpu_set_mode(env, PRV_M);
1819     }
1820 
1821     /*
1822      * NOTE: it is not necessary to yield load reservations here. It is only
1823      * necessary for an SC from "another hart" to cause a load reservation
1824      * to be yielded. Refer to the memory consistency model section of the
1825      * RISC-V ISA Specification.
1826      */
1827 
1828     env->two_stage_lookup = false;
1829     env->two_stage_indirect_lookup = false;
1830 #endif
1831     cs->exception_index = RISCV_EXCP_NONE; /* mark handled to qemu */
1832 }
1833