xref: /openbmc/qemu/target/riscv/cpu_helper.c (revision 487a9955)
1 /*
2  * RISC-V CPU helpers for qemu.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "trace.h"
27 #include "semihosting/common-semi.h"
28 
29 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
30 {
31 #ifdef CONFIG_USER_ONLY
32     return 0;
33 #else
34     return env->priv;
35 #endif
36 }
37 
38 static RISCVMXL cpu_get_xl(CPURISCVState *env)
39 {
40 #if defined(TARGET_RISCV32)
41     return MXL_RV32;
42 #elif defined(CONFIG_USER_ONLY)
43     return MXL_RV64;
44 #else
45     RISCVMXL xl = riscv_cpu_mxl(env);
46 
47     /*
48      * When emulating a 32-bit-only cpu, use RV32.
49      * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
50      * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
51      * back to RV64 for lower privs.
52      */
53     if (xl != MXL_RV32) {
54         switch (env->priv) {
55         case PRV_M:
56             break;
57         case PRV_U:
58             xl = get_field(env->mstatus, MSTATUS64_UXL);
59             break;
60         default: /* PRV_S | PRV_H */
61             xl = get_field(env->mstatus, MSTATUS64_SXL);
62             break;
63         }
64     }
65     return xl;
66 #endif
67 }
68 
69 void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
70                           target_ulong *cs_base, uint32_t *pflags)
71 {
72     uint32_t flags = 0;
73 
74     *pc = env->pc;
75     *cs_base = 0;
76 
77     if (riscv_has_ext(env, RVV)) {
78         uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype);
79         bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl);
80         flags = FIELD_DP32(flags, TB_FLAGS, VILL,
81                     FIELD_EX64(env->vtype, VTYPE, VILL));
82         flags = FIELD_DP32(flags, TB_FLAGS, SEW,
83                     FIELD_EX64(env->vtype, VTYPE, VSEW));
84         flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
85                     FIELD_EX64(env->vtype, VTYPE, VLMUL));
86         flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
87     } else {
88         flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
89     }
90 
91 #ifdef CONFIG_USER_ONLY
92     flags |= TB_FLAGS_MSTATUS_FS;
93 #else
94     flags |= cpu_mmu_index(env, 0);
95     if (riscv_cpu_fp_enabled(env)) {
96         flags |= env->mstatus & MSTATUS_FS;
97     }
98 
99     if (riscv_has_ext(env, RVH)) {
100         if (env->priv == PRV_M ||
101             (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
102             (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
103                 get_field(env->hstatus, HSTATUS_HU))) {
104             flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1);
105         }
106 
107         flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_FS,
108                            get_field(env->mstatus_hs, MSTATUS_FS));
109     }
110     if (riscv_has_ext(env, RVJ)) {
111         int priv = flags & TB_FLAGS_PRIV_MMU_MASK;
112         bool pm_enabled = false;
113         switch (priv) {
114         case PRV_U:
115             pm_enabled = env->mmte & U_PM_ENABLE;
116             break;
117         case PRV_S:
118             pm_enabled = env->mmte & S_PM_ENABLE;
119             break;
120         case PRV_M:
121             pm_enabled = env->mmte & M_PM_ENABLE;
122             break;
123         default:
124             g_assert_not_reached();
125         }
126         flags = FIELD_DP32(flags, TB_FLAGS, PM_ENABLED, pm_enabled);
127     }
128 #endif
129 
130     flags = FIELD_DP32(flags, TB_FLAGS, XL, cpu_get_xl(env));
131 
132     *pflags = flags;
133 }
134 
135 #ifndef CONFIG_USER_ONLY
136 static int riscv_cpu_local_irq_pending(CPURISCVState *env)
137 {
138     target_ulong virt_enabled = riscv_cpu_virt_enabled(env);
139 
140     target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE);
141     target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE);
142 
143     target_ulong pending = env->mip & env->mie;
144 
145     target_ulong mie    = env->priv < PRV_M ||
146                           (env->priv == PRV_M && mstatus_mie);
147     target_ulong sie    = env->priv < PRV_S ||
148                           (env->priv == PRV_S && mstatus_sie);
149     target_ulong hsie   = virt_enabled || sie;
150     target_ulong vsie   = virt_enabled && sie;
151 
152     target_ulong irqs =
153             (pending & ~env->mideleg & -mie) |
154             (pending &  env->mideleg & ~env->hideleg & -hsie) |
155             (pending &  env->mideleg &  env->hideleg & -vsie);
156 
157     if (irqs) {
158         return ctz64(irqs); /* since non-zero */
159     } else {
160         return RISCV_EXCP_NONE; /* indicates no pending interrupt */
161     }
162 }
163 
164 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
165 {
166     if (interrupt_request & CPU_INTERRUPT_HARD) {
167         RISCVCPU *cpu = RISCV_CPU(cs);
168         CPURISCVState *env = &cpu->env;
169         int interruptno = riscv_cpu_local_irq_pending(env);
170         if (interruptno >= 0) {
171             cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
172             riscv_cpu_do_interrupt(cs);
173             return true;
174         }
175     }
176     return false;
177 }
178 
179 /* Return true is floating point support is currently enabled */
180 bool riscv_cpu_fp_enabled(CPURISCVState *env)
181 {
182     if (env->mstatus & MSTATUS_FS) {
183         if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_FS)) {
184             return false;
185         }
186         return true;
187     }
188 
189     return false;
190 }
191 
192 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
193 {
194     uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_FS |
195                             MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
196                             MSTATUS64_UXL;
197     bool current_virt = riscv_cpu_virt_enabled(env);
198 
199     g_assert(riscv_has_ext(env, RVH));
200 
201     if (current_virt) {
202         /* Current V=1 and we are about to change to V=0 */
203         env->vsstatus = env->mstatus & mstatus_mask;
204         env->mstatus &= ~mstatus_mask;
205         env->mstatus |= env->mstatus_hs;
206 
207         env->vstvec = env->stvec;
208         env->stvec = env->stvec_hs;
209 
210         env->vsscratch = env->sscratch;
211         env->sscratch = env->sscratch_hs;
212 
213         env->vsepc = env->sepc;
214         env->sepc = env->sepc_hs;
215 
216         env->vscause = env->scause;
217         env->scause = env->scause_hs;
218 
219         env->vstval = env->stval;
220         env->stval = env->stval_hs;
221 
222         env->vsatp = env->satp;
223         env->satp = env->satp_hs;
224     } else {
225         /* Current V=0 and we are about to change to V=1 */
226         env->mstatus_hs = env->mstatus & mstatus_mask;
227         env->mstatus &= ~mstatus_mask;
228         env->mstatus |= env->vsstatus;
229 
230         env->stvec_hs = env->stvec;
231         env->stvec = env->vstvec;
232 
233         env->sscratch_hs = env->sscratch;
234         env->sscratch = env->vsscratch;
235 
236         env->sepc_hs = env->sepc;
237         env->sepc = env->vsepc;
238 
239         env->scause_hs = env->scause;
240         env->scause = env->vscause;
241 
242         env->stval_hs = env->stval;
243         env->stval = env->vstval;
244 
245         env->satp_hs = env->satp;
246         env->satp = env->vsatp;
247     }
248 }
249 
250 bool riscv_cpu_virt_enabled(CPURISCVState *env)
251 {
252     if (!riscv_has_ext(env, RVH)) {
253         return false;
254     }
255 
256     return get_field(env->virt, VIRT_ONOFF);
257 }
258 
259 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
260 {
261     if (!riscv_has_ext(env, RVH)) {
262         return;
263     }
264 
265     /* Flush the TLB on all virt mode changes. */
266     if (get_field(env->virt, VIRT_ONOFF) != enable) {
267         tlb_flush(env_cpu(env));
268     }
269 
270     env->virt = set_field(env->virt, VIRT_ONOFF, enable);
271 }
272 
273 bool riscv_cpu_force_hs_excep_enabled(CPURISCVState *env)
274 {
275     if (!riscv_has_ext(env, RVH)) {
276         return false;
277     }
278 
279     return get_field(env->virt, FORCE_HS_EXCEP);
280 }
281 
282 void riscv_cpu_set_force_hs_excep(CPURISCVState *env, bool enable)
283 {
284     if (!riscv_has_ext(env, RVH)) {
285         return;
286     }
287 
288     env->virt = set_field(env->virt, FORCE_HS_EXCEP, enable);
289 }
290 
291 bool riscv_cpu_two_stage_lookup(int mmu_idx)
292 {
293     return mmu_idx & TB_FLAGS_PRIV_HYP_ACCESS_MASK;
294 }
295 
296 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts)
297 {
298     CPURISCVState *env = &cpu->env;
299     if (env->miclaim & interrupts) {
300         return -1;
301     } else {
302         env->miclaim |= interrupts;
303         return 0;
304     }
305 }
306 
307 uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value)
308 {
309     CPURISCVState *env = &cpu->env;
310     CPUState *cs = CPU(cpu);
311     uint32_t old = env->mip;
312     bool locked = false;
313 
314     if (!qemu_mutex_iothread_locked()) {
315         locked = true;
316         qemu_mutex_lock_iothread();
317     }
318 
319     env->mip = (env->mip & ~mask) | (value & mask);
320 
321     if (env->mip) {
322         cpu_interrupt(cs, CPU_INTERRUPT_HARD);
323     } else {
324         cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
325     }
326 
327     if (locked) {
328         qemu_mutex_unlock_iothread();
329     }
330 
331     return old;
332 }
333 
334 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t),
335                              uint32_t arg)
336 {
337     env->rdtime_fn = fn;
338     env->rdtime_fn_arg = arg;
339 }
340 
341 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
342 {
343     if (newpriv > PRV_M) {
344         g_assert_not_reached();
345     }
346     if (newpriv == PRV_H) {
347         newpriv = PRV_U;
348     }
349     /* tlb_flush is unnecessary as mode is contained in mmu_idx */
350     env->priv = newpriv;
351 
352     /*
353      * Clear the load reservation - otherwise a reservation placed in one
354      * context/process can be used by another, resulting in an SC succeeding
355      * incorrectly. Version 2.2 of the ISA specification explicitly requires
356      * this behaviour, while later revisions say that the kernel "should" use
357      * an SC instruction to force the yielding of a load reservation on a
358      * preemptive context switch. As a result, do both.
359      */
360     env->load_res = -1;
361 }
362 
363 /*
364  * get_physical_address_pmp - check PMP permission for this physical address
365  *
366  * Match the PMP region and check permission for this physical address and it's
367  * TLB page. Returns 0 if the permission checking was successful
368  *
369  * @env: CPURISCVState
370  * @prot: The returned protection attributes
371  * @tlb_size: TLB page size containing addr. It could be modified after PMP
372  *            permission checking. NULL if not set TLB page for addr.
373  * @addr: The physical address to be checked permission
374  * @access_type: The type of MMU access
375  * @mode: Indicates current privilege level.
376  */
377 static int get_physical_address_pmp(CPURISCVState *env, int *prot,
378                                     target_ulong *tlb_size, hwaddr addr,
379                                     int size, MMUAccessType access_type,
380                                     int mode)
381 {
382     pmp_priv_t pmp_priv;
383     target_ulong tlb_size_pmp = 0;
384 
385     if (!riscv_feature(env, RISCV_FEATURE_PMP)) {
386         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
387         return TRANSLATE_SUCCESS;
388     }
389 
390     if (!pmp_hart_has_privs(env, addr, size, 1 << access_type, &pmp_priv,
391                             mode)) {
392         *prot = 0;
393         return TRANSLATE_PMP_FAIL;
394     }
395 
396     *prot = pmp_priv_to_page_prot(pmp_priv);
397     if (tlb_size != NULL) {
398         if (pmp_is_range_in_tlb(env, addr & ~(*tlb_size - 1), &tlb_size_pmp)) {
399             *tlb_size = tlb_size_pmp;
400         }
401     }
402 
403     return TRANSLATE_SUCCESS;
404 }
405 
406 /* get_physical_address - get the physical address for this virtual address
407  *
408  * Do a page table walk to obtain the physical address corresponding to a
409  * virtual address. Returns 0 if the translation was successful
410  *
411  * Adapted from Spike's mmu_t::translate and mmu_t::walk
412  *
413  * @env: CPURISCVState
414  * @physical: This will be set to the calculated physical address
415  * @prot: The returned protection attributes
416  * @addr: The virtual address to be translated
417  * @fault_pte_addr: If not NULL, this will be set to fault pte address
418  *                  when a error occurs on pte address translation.
419  *                  This will already be shifted to match htval.
420  * @access_type: The type of MMU access
421  * @mmu_idx: Indicates current privilege level
422  * @first_stage: Are we in first stage translation?
423  *               Second stage is used for hypervisor guest translation
424  * @two_stage: Are we going to perform two stage translation
425  * @is_debug: Is this access from a debugger or the monitor?
426  */
427 static int get_physical_address(CPURISCVState *env, hwaddr *physical,
428                                 int *prot, target_ulong addr,
429                                 target_ulong *fault_pte_addr,
430                                 int access_type, int mmu_idx,
431                                 bool first_stage, bool two_stage,
432                                 bool is_debug)
433 {
434     /* NOTE: the env->pc value visible here will not be
435      * correct, but the value visible to the exception handler
436      * (riscv_cpu_do_interrupt) is correct */
437     MemTxResult res;
438     MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
439     int mode = mmu_idx & TB_FLAGS_PRIV_MMU_MASK;
440     bool use_background = false;
441 
442     /*
443      * Check if we should use the background registers for the two
444      * stage translation. We don't need to check if we actually need
445      * two stage translation as that happened before this function
446      * was called. Background registers will be used if the guest has
447      * forced a two stage translation to be on (in HS or M mode).
448      */
449     if (!riscv_cpu_virt_enabled(env) && two_stage) {
450         use_background = true;
451     }
452 
453     /* MPRV does not affect the virtual-machine load/store
454        instructions, HLV, HLVX, and HSV. */
455     if (riscv_cpu_two_stage_lookup(mmu_idx)) {
456         mode = get_field(env->hstatus, HSTATUS_SPVP);
457     } else if (mode == PRV_M && access_type != MMU_INST_FETCH) {
458         if (get_field(env->mstatus, MSTATUS_MPRV)) {
459             mode = get_field(env->mstatus, MSTATUS_MPP);
460         }
461     }
462 
463     if (first_stage == false) {
464         /* We are in stage 2 translation, this is similar to stage 1. */
465         /* Stage 2 is always taken as U-mode */
466         mode = PRV_U;
467     }
468 
469     if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) {
470         *physical = addr;
471         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
472         return TRANSLATE_SUCCESS;
473     }
474 
475     *prot = 0;
476 
477     hwaddr base;
478     int levels, ptidxbits, ptesize, vm, sum, mxr, widened;
479 
480     if (first_stage == true) {
481         mxr = get_field(env->mstatus, MSTATUS_MXR);
482     } else {
483         mxr = get_field(env->vsstatus, MSTATUS_MXR);
484     }
485 
486     if (first_stage == true) {
487         if (use_background) {
488             if (riscv_cpu_mxl(env) == MXL_RV32) {
489                 base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT;
490                 vm = get_field(env->vsatp, SATP32_MODE);
491             } else {
492                 base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT;
493                 vm = get_field(env->vsatp, SATP64_MODE);
494             }
495         } else {
496             if (riscv_cpu_mxl(env) == MXL_RV32) {
497                 base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT;
498                 vm = get_field(env->satp, SATP32_MODE);
499             } else {
500                 base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT;
501                 vm = get_field(env->satp, SATP64_MODE);
502             }
503         }
504         widened = 0;
505     } else {
506         if (riscv_cpu_mxl(env) == MXL_RV32) {
507             base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT;
508             vm = get_field(env->hgatp, SATP32_MODE);
509         } else {
510             base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT;
511             vm = get_field(env->hgatp, SATP64_MODE);
512         }
513         widened = 2;
514     }
515     /* status.SUM will be ignored if execute on background */
516     sum = get_field(env->mstatus, MSTATUS_SUM) || use_background || is_debug;
517     switch (vm) {
518     case VM_1_10_SV32:
519       levels = 2; ptidxbits = 10; ptesize = 4; break;
520     case VM_1_10_SV39:
521       levels = 3; ptidxbits = 9; ptesize = 8; break;
522     case VM_1_10_SV48:
523       levels = 4; ptidxbits = 9; ptesize = 8; break;
524     case VM_1_10_SV57:
525       levels = 5; ptidxbits = 9; ptesize = 8; break;
526     case VM_1_10_MBARE:
527         *physical = addr;
528         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
529         return TRANSLATE_SUCCESS;
530     default:
531       g_assert_not_reached();
532     }
533 
534     CPUState *cs = env_cpu(env);
535     int va_bits = PGSHIFT + levels * ptidxbits + widened;
536     target_ulong mask, masked_msbs;
537 
538     if (TARGET_LONG_BITS > (va_bits - 1)) {
539         mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
540     } else {
541         mask = 0;
542     }
543     masked_msbs = (addr >> (va_bits - 1)) & mask;
544 
545     if (masked_msbs != 0 && masked_msbs != mask) {
546         return TRANSLATE_FAIL;
547     }
548 
549     int ptshift = (levels - 1) * ptidxbits;
550     int i;
551 
552 #if !TCG_OVERSIZED_GUEST
553 restart:
554 #endif
555     for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
556         target_ulong idx;
557         if (i == 0) {
558             idx = (addr >> (PGSHIFT + ptshift)) &
559                            ((1 << (ptidxbits + widened)) - 1);
560         } else {
561             idx = (addr >> (PGSHIFT + ptshift)) &
562                            ((1 << ptidxbits) - 1);
563         }
564 
565         /* check that physical address of PTE is legal */
566         hwaddr pte_addr;
567 
568         if (two_stage && first_stage) {
569             int vbase_prot;
570             hwaddr vbase;
571 
572             /* Do the second stage translation on the base PTE address. */
573             int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
574                                                  base, NULL, MMU_DATA_LOAD,
575                                                  mmu_idx, false, true,
576                                                  is_debug);
577 
578             if (vbase_ret != TRANSLATE_SUCCESS) {
579                 if (fault_pte_addr) {
580                     *fault_pte_addr = (base + idx * ptesize) >> 2;
581                 }
582                 return TRANSLATE_G_STAGE_FAIL;
583             }
584 
585             pte_addr = vbase + idx * ptesize;
586         } else {
587             pte_addr = base + idx * ptesize;
588         }
589 
590         int pmp_prot;
591         int pmp_ret = get_physical_address_pmp(env, &pmp_prot, NULL, pte_addr,
592                                                sizeof(target_ulong),
593                                                MMU_DATA_LOAD, PRV_S);
594         if (pmp_ret != TRANSLATE_SUCCESS) {
595             return TRANSLATE_PMP_FAIL;
596         }
597 
598         target_ulong pte;
599         if (riscv_cpu_mxl(env) == MXL_RV32) {
600             pte = address_space_ldl(cs->as, pte_addr, attrs, &res);
601         } else {
602             pte = address_space_ldq(cs->as, pte_addr, attrs, &res);
603         }
604 
605         if (res != MEMTX_OK) {
606             return TRANSLATE_FAIL;
607         }
608 
609         hwaddr ppn = pte >> PTE_PPN_SHIFT;
610 
611         if (!(pte & PTE_V)) {
612             /* Invalid PTE */
613             return TRANSLATE_FAIL;
614         } else if (!(pte & (PTE_R | PTE_W | PTE_X))) {
615             /* Inner PTE, continue walking */
616             base = ppn << PGSHIFT;
617         } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) {
618             /* Reserved leaf PTE flags: PTE_W */
619             return TRANSLATE_FAIL;
620         } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) {
621             /* Reserved leaf PTE flags: PTE_W + PTE_X */
622             return TRANSLATE_FAIL;
623         } else if ((pte & PTE_U) && ((mode != PRV_U) &&
624                    (!sum || access_type == MMU_INST_FETCH))) {
625             /* User PTE flags when not U mode and mstatus.SUM is not set,
626                or the access type is an instruction fetch */
627             return TRANSLATE_FAIL;
628         } else if (!(pte & PTE_U) && (mode != PRV_S)) {
629             /* Supervisor PTE flags when not S mode */
630             return TRANSLATE_FAIL;
631         } else if (ppn & ((1ULL << ptshift) - 1)) {
632             /* Misaligned PPN */
633             return TRANSLATE_FAIL;
634         } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) ||
635                    ((pte & PTE_X) && mxr))) {
636             /* Read access check failed */
637             return TRANSLATE_FAIL;
638         } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) {
639             /* Write access check failed */
640             return TRANSLATE_FAIL;
641         } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) {
642             /* Fetch access check failed */
643             return TRANSLATE_FAIL;
644         } else {
645             /* if necessary, set accessed and dirty bits. */
646             target_ulong updated_pte = pte | PTE_A |
647                 (access_type == MMU_DATA_STORE ? PTE_D : 0);
648 
649             /* Page table updates need to be atomic with MTTCG enabled */
650             if (updated_pte != pte) {
651                 /*
652                  * - if accessed or dirty bits need updating, and the PTE is
653                  *   in RAM, then we do so atomically with a compare and swap.
654                  * - if the PTE is in IO space or ROM, then it can't be updated
655                  *   and we return TRANSLATE_FAIL.
656                  * - if the PTE changed by the time we went to update it, then
657                  *   it is no longer valid and we must re-walk the page table.
658                  */
659                 MemoryRegion *mr;
660                 hwaddr l = sizeof(target_ulong), addr1;
661                 mr = address_space_translate(cs->as, pte_addr,
662                     &addr1, &l, false, MEMTXATTRS_UNSPECIFIED);
663                 if (memory_region_is_ram(mr)) {
664                     target_ulong *pte_pa =
665                         qemu_map_ram_ptr(mr->ram_block, addr1);
666 #if TCG_OVERSIZED_GUEST
667                     /* MTTCG is not enabled on oversized TCG guests so
668                      * page table updates do not need to be atomic */
669                     *pte_pa = pte = updated_pte;
670 #else
671                     target_ulong old_pte =
672                         qatomic_cmpxchg(pte_pa, pte, updated_pte);
673                     if (old_pte != pte) {
674                         goto restart;
675                     } else {
676                         pte = updated_pte;
677                     }
678 #endif
679                 } else {
680                     /* misconfigured PTE in ROM (AD bits are not preset) or
681                      * PTE is in IO space and can't be updated atomically */
682                     return TRANSLATE_FAIL;
683                 }
684             }
685 
686             /* for superpage mappings, make a fake leaf PTE for the TLB's
687                benefit. */
688             target_ulong vpn = addr >> PGSHIFT;
689             *physical = ((ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT) |
690                         (addr & ~TARGET_PAGE_MASK);
691 
692             /* set permissions on the TLB entry */
693             if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) {
694                 *prot |= PAGE_READ;
695             }
696             if ((pte & PTE_X)) {
697                 *prot |= PAGE_EXEC;
698             }
699             /* add write permission on stores or if the page is already dirty,
700                so that we TLB miss on later writes to update the dirty bit */
701             if ((pte & PTE_W) &&
702                     (access_type == MMU_DATA_STORE || (pte & PTE_D))) {
703                 *prot |= PAGE_WRITE;
704             }
705             return TRANSLATE_SUCCESS;
706         }
707     }
708     return TRANSLATE_FAIL;
709 }
710 
711 static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
712                                 MMUAccessType access_type, bool pmp_violation,
713                                 bool first_stage, bool two_stage)
714 {
715     CPUState *cs = env_cpu(env);
716     int page_fault_exceptions, vm;
717     uint64_t stap_mode;
718 
719     if (riscv_cpu_mxl(env) == MXL_RV32) {
720         stap_mode = SATP32_MODE;
721     } else {
722         stap_mode = SATP64_MODE;
723     }
724 
725     if (first_stage) {
726         vm = get_field(env->satp, stap_mode);
727     } else {
728         vm = get_field(env->hgatp, stap_mode);
729     }
730 
731     page_fault_exceptions = vm != VM_1_10_MBARE && !pmp_violation;
732 
733     switch (access_type) {
734     case MMU_INST_FETCH:
735         if (riscv_cpu_virt_enabled(env) && !first_stage) {
736             cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT;
737         } else {
738             cs->exception_index = page_fault_exceptions ?
739                 RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT;
740         }
741         break;
742     case MMU_DATA_LOAD:
743         if (two_stage && !first_stage) {
744             cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
745         } else {
746             cs->exception_index = page_fault_exceptions ?
747                 RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT;
748         }
749         break;
750     case MMU_DATA_STORE:
751         if (two_stage && !first_stage) {
752             cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
753         } else {
754             cs->exception_index = page_fault_exceptions ?
755                 RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
756         }
757         break;
758     default:
759         g_assert_not_reached();
760     }
761     env->badaddr = address;
762     env->two_stage_lookup = two_stage;
763 }
764 
765 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
766 {
767     RISCVCPU *cpu = RISCV_CPU(cs);
768     CPURISCVState *env = &cpu->env;
769     hwaddr phys_addr;
770     int prot;
771     int mmu_idx = cpu_mmu_index(&cpu->env, false);
772 
773     if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
774                              true, riscv_cpu_virt_enabled(env), true)) {
775         return -1;
776     }
777 
778     if (riscv_cpu_virt_enabled(env)) {
779         if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
780                                  0, mmu_idx, false, true, true)) {
781             return -1;
782         }
783     }
784 
785     return phys_addr & TARGET_PAGE_MASK;
786 }
787 
788 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
789                                      vaddr addr, unsigned size,
790                                      MMUAccessType access_type,
791                                      int mmu_idx, MemTxAttrs attrs,
792                                      MemTxResult response, uintptr_t retaddr)
793 {
794     RISCVCPU *cpu = RISCV_CPU(cs);
795     CPURISCVState *env = &cpu->env;
796 
797     if (access_type == MMU_DATA_STORE) {
798         cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
799     } else if (access_type == MMU_DATA_LOAD) {
800         cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
801     } else {
802         cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
803     }
804 
805     env->badaddr = addr;
806     env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
807                             riscv_cpu_two_stage_lookup(mmu_idx);
808     riscv_raise_exception(&cpu->env, cs->exception_index, retaddr);
809 }
810 
811 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
812                                    MMUAccessType access_type, int mmu_idx,
813                                    uintptr_t retaddr)
814 {
815     RISCVCPU *cpu = RISCV_CPU(cs);
816     CPURISCVState *env = &cpu->env;
817     switch (access_type) {
818     case MMU_INST_FETCH:
819         cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
820         break;
821     case MMU_DATA_LOAD:
822         cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
823         break;
824     case MMU_DATA_STORE:
825         cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
826         break;
827     default:
828         g_assert_not_reached();
829     }
830     env->badaddr = addr;
831     env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
832                             riscv_cpu_two_stage_lookup(mmu_idx);
833     riscv_raise_exception(env, cs->exception_index, retaddr);
834 }
835 #endif /* !CONFIG_USER_ONLY */
836 
837 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
838                         MMUAccessType access_type, int mmu_idx,
839                         bool probe, uintptr_t retaddr)
840 {
841     RISCVCPU *cpu = RISCV_CPU(cs);
842     CPURISCVState *env = &cpu->env;
843 #ifndef CONFIG_USER_ONLY
844     vaddr im_address;
845     hwaddr pa = 0;
846     int prot, prot2, prot_pmp;
847     bool pmp_violation = false;
848     bool first_stage_error = true;
849     bool two_stage_lookup = false;
850     int ret = TRANSLATE_FAIL;
851     int mode = mmu_idx;
852     /* default TLB page size */
853     target_ulong tlb_size = TARGET_PAGE_SIZE;
854 
855     env->guest_phys_fault_addr = 0;
856 
857     qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
858                   __func__, address, access_type, mmu_idx);
859 
860     /* MPRV does not affect the virtual-machine load/store
861        instructions, HLV, HLVX, and HSV. */
862     if (riscv_cpu_two_stage_lookup(mmu_idx)) {
863         mode = get_field(env->hstatus, HSTATUS_SPVP);
864     } else if (mode == PRV_M && access_type != MMU_INST_FETCH &&
865                get_field(env->mstatus, MSTATUS_MPRV)) {
866         mode = get_field(env->mstatus, MSTATUS_MPP);
867         if (riscv_has_ext(env, RVH) && get_field(env->mstatus, MSTATUS_MPV)) {
868             two_stage_lookup = true;
869         }
870     }
871 
872     if (riscv_cpu_virt_enabled(env) ||
873         ((riscv_cpu_two_stage_lookup(mmu_idx) || two_stage_lookup) &&
874          access_type != MMU_INST_FETCH)) {
875         /* Two stage lookup */
876         ret = get_physical_address(env, &pa, &prot, address,
877                                    &env->guest_phys_fault_addr, access_type,
878                                    mmu_idx, true, true, false);
879 
880         /*
881          * A G-stage exception may be triggered during two state lookup.
882          * And the env->guest_phys_fault_addr has already been set in
883          * get_physical_address().
884          */
885         if (ret == TRANSLATE_G_STAGE_FAIL) {
886             first_stage_error = false;
887             access_type = MMU_DATA_LOAD;
888         }
889 
890         qemu_log_mask(CPU_LOG_MMU,
891                       "%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
892                       TARGET_FMT_plx " prot %d\n",
893                       __func__, address, ret, pa, prot);
894 
895         if (ret == TRANSLATE_SUCCESS) {
896             /* Second stage lookup */
897             im_address = pa;
898 
899             ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
900                                        access_type, mmu_idx, false, true,
901                                        false);
902 
903             qemu_log_mask(CPU_LOG_MMU,
904                     "%s 2nd-stage address=%" VADDR_PRIx " ret %d physical "
905                     TARGET_FMT_plx " prot %d\n",
906                     __func__, im_address, ret, pa, prot2);
907 
908             prot &= prot2;
909 
910             if (ret == TRANSLATE_SUCCESS) {
911                 ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
912                                                size, access_type, mode);
913 
914                 qemu_log_mask(CPU_LOG_MMU,
915                               "%s PMP address=" TARGET_FMT_plx " ret %d prot"
916                               " %d tlb_size " TARGET_FMT_lu "\n",
917                               __func__, pa, ret, prot_pmp, tlb_size);
918 
919                 prot &= prot_pmp;
920             }
921 
922             if (ret != TRANSLATE_SUCCESS) {
923                 /*
924                  * Guest physical address translation failed, this is a HS
925                  * level exception
926                  */
927                 first_stage_error = false;
928                 env->guest_phys_fault_addr = (im_address |
929                                               (address &
930                                                (TARGET_PAGE_SIZE - 1))) >> 2;
931             }
932         }
933     } else {
934         /* Single stage lookup */
935         ret = get_physical_address(env, &pa, &prot, address, NULL,
936                                    access_type, mmu_idx, true, false, false);
937 
938         qemu_log_mask(CPU_LOG_MMU,
939                       "%s address=%" VADDR_PRIx " ret %d physical "
940                       TARGET_FMT_plx " prot %d\n",
941                       __func__, address, ret, pa, prot);
942 
943         if (ret == TRANSLATE_SUCCESS) {
944             ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
945                                            size, access_type, mode);
946 
947             qemu_log_mask(CPU_LOG_MMU,
948                           "%s PMP address=" TARGET_FMT_plx " ret %d prot"
949                           " %d tlb_size " TARGET_FMT_lu "\n",
950                           __func__, pa, ret, prot_pmp, tlb_size);
951 
952             prot &= prot_pmp;
953         }
954     }
955 
956     if (ret == TRANSLATE_PMP_FAIL) {
957         pmp_violation = true;
958     }
959 
960     if (ret == TRANSLATE_SUCCESS) {
961         tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
962                      prot, mmu_idx, tlb_size);
963         return true;
964     } else if (probe) {
965         return false;
966     } else {
967         raise_mmu_exception(env, address, access_type, pmp_violation,
968                             first_stage_error,
969                             riscv_cpu_virt_enabled(env) ||
970                                 riscv_cpu_two_stage_lookup(mmu_idx));
971         riscv_raise_exception(env, cs->exception_index, retaddr);
972     }
973 
974     return true;
975 
976 #else
977     switch (access_type) {
978     case MMU_INST_FETCH:
979         cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT;
980         break;
981     case MMU_DATA_LOAD:
982         cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT;
983         break;
984     case MMU_DATA_STORE:
985         cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT;
986         break;
987     default:
988         g_assert_not_reached();
989     }
990     env->badaddr = address;
991     cpu_loop_exit_restore(cs, retaddr);
992 #endif
993 }
994 
995 /*
996  * Handle Traps
997  *
998  * Adapted from Spike's processor_t::take_trap.
999  *
1000  */
1001 void riscv_cpu_do_interrupt(CPUState *cs)
1002 {
1003 #if !defined(CONFIG_USER_ONLY)
1004 
1005     RISCVCPU *cpu = RISCV_CPU(cs);
1006     CPURISCVState *env = &cpu->env;
1007     bool force_hs_execp = riscv_cpu_force_hs_excep_enabled(env);
1008     uint64_t s;
1009 
1010     /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
1011      * so we mask off the MSB and separate into trap type and cause.
1012      */
1013     bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
1014     target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
1015     target_ulong deleg = async ? env->mideleg : env->medeleg;
1016     bool write_tval = false;
1017     target_ulong tval = 0;
1018     target_ulong htval = 0;
1019     target_ulong mtval2 = 0;
1020 
1021     if  (cause == RISCV_EXCP_SEMIHOST) {
1022         if (env->priv >= PRV_S) {
1023             env->gpr[xA0] = do_common_semihosting(cs);
1024             env->pc += 4;
1025             return;
1026         }
1027         cause = RISCV_EXCP_BREAKPOINT;
1028     }
1029 
1030     if (!async) {
1031         /* set tval to badaddr for traps with address information */
1032         switch (cause) {
1033         case RISCV_EXCP_INST_GUEST_PAGE_FAULT:
1034         case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
1035         case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
1036             force_hs_execp = true;
1037             /* fallthrough */
1038         case RISCV_EXCP_INST_ADDR_MIS:
1039         case RISCV_EXCP_INST_ACCESS_FAULT:
1040         case RISCV_EXCP_LOAD_ADDR_MIS:
1041         case RISCV_EXCP_STORE_AMO_ADDR_MIS:
1042         case RISCV_EXCP_LOAD_ACCESS_FAULT:
1043         case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
1044         case RISCV_EXCP_INST_PAGE_FAULT:
1045         case RISCV_EXCP_LOAD_PAGE_FAULT:
1046         case RISCV_EXCP_STORE_PAGE_FAULT:
1047             write_tval  = true;
1048             tval = env->badaddr;
1049             break;
1050         default:
1051             break;
1052         }
1053         /* ecall is dispatched as one cause so translate based on mode */
1054         if (cause == RISCV_EXCP_U_ECALL) {
1055             assert(env->priv <= 3);
1056 
1057             if (env->priv == PRV_M) {
1058                 cause = RISCV_EXCP_M_ECALL;
1059             } else if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) {
1060                 cause = RISCV_EXCP_VS_ECALL;
1061             } else if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) {
1062                 cause = RISCV_EXCP_S_ECALL;
1063             } else if (env->priv == PRV_U) {
1064                 cause = RISCV_EXCP_U_ECALL;
1065             }
1066         }
1067     }
1068 
1069     trace_riscv_trap(env->mhartid, async, cause, env->pc, tval,
1070                      riscv_cpu_get_trap_name(cause, async));
1071 
1072     qemu_log_mask(CPU_LOG_INT,
1073                   "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", "
1074                   "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n",
1075                   __func__, env->mhartid, async, cause, env->pc, tval,
1076                   riscv_cpu_get_trap_name(cause, async));
1077 
1078     if (env->priv <= PRV_S &&
1079             cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) {
1080         /* handle the trap in S-mode */
1081         if (riscv_has_ext(env, RVH)) {
1082             target_ulong hdeleg = async ? env->hideleg : env->hedeleg;
1083 
1084             if (env->two_stage_lookup && write_tval) {
1085                 /*
1086                  * If we are writing a guest virtual address to stval, set
1087                  * this to 1. If we are trapping to VS we will set this to 0
1088                  * later.
1089                  */
1090                 env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 1);
1091             } else {
1092                 /* For other HS-mode traps, we set this to 0. */
1093                 env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 0);
1094             }
1095 
1096             if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1) &&
1097                 !force_hs_execp) {
1098                 /* Trap to VS mode */
1099                 /*
1100                  * See if we need to adjust cause. Yes if its VS mode interrupt
1101                  * no if hypervisor has delegated one of hs mode's interrupt
1102                  */
1103                 if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
1104                     cause == IRQ_VS_EXT) {
1105                     cause = cause - 1;
1106                 }
1107                 env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 0);
1108             } else if (riscv_cpu_virt_enabled(env)) {
1109                 /* Trap into HS mode, from virt */
1110                 riscv_cpu_swap_hypervisor_regs(env);
1111                 env->hstatus = set_field(env->hstatus, HSTATUS_SPVP,
1112                                          env->priv);
1113                 env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
1114                                          riscv_cpu_virt_enabled(env));
1115 
1116                 htval = env->guest_phys_fault_addr;
1117 
1118                 riscv_cpu_set_virt_enabled(env, 0);
1119                 riscv_cpu_set_force_hs_excep(env, 0);
1120             } else {
1121                 /* Trap into HS mode */
1122                 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
1123                 htval = env->guest_phys_fault_addr;
1124             }
1125         }
1126 
1127         s = env->mstatus;
1128         s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
1129         s = set_field(s, MSTATUS_SPP, env->priv);
1130         s = set_field(s, MSTATUS_SIE, 0);
1131         env->mstatus = s;
1132         env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1));
1133         env->sepc = env->pc;
1134         env->stval = tval;
1135         env->htval = htval;
1136         env->pc = (env->stvec >> 2 << 2) +
1137             ((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
1138         riscv_cpu_set_mode(env, PRV_S);
1139     } else {
1140         /* handle the trap in M-mode */
1141         if (riscv_has_ext(env, RVH)) {
1142             if (riscv_cpu_virt_enabled(env)) {
1143                 riscv_cpu_swap_hypervisor_regs(env);
1144             }
1145             env->mstatus = set_field(env->mstatus, MSTATUS_MPV,
1146                                      riscv_cpu_virt_enabled(env));
1147             if (riscv_cpu_virt_enabled(env) && tval) {
1148                 env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1);
1149             }
1150 
1151             mtval2 = env->guest_phys_fault_addr;
1152 
1153             /* Trapping to M mode, virt is disabled */
1154             riscv_cpu_set_virt_enabled(env, 0);
1155             riscv_cpu_set_force_hs_excep(env, 0);
1156         }
1157 
1158         s = env->mstatus;
1159         s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
1160         s = set_field(s, MSTATUS_MPP, env->priv);
1161         s = set_field(s, MSTATUS_MIE, 0);
1162         env->mstatus = s;
1163         env->mcause = cause | ~(((target_ulong)-1) >> async);
1164         env->mepc = env->pc;
1165         env->mtval = tval;
1166         env->mtval2 = mtval2;
1167         env->pc = (env->mtvec >> 2 << 2) +
1168             ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
1169         riscv_cpu_set_mode(env, PRV_M);
1170     }
1171 
1172     /* NOTE: it is not necessary to yield load reservations here. It is only
1173      * necessary for an SC from "another hart" to cause a load reservation
1174      * to be yielded. Refer to the memory consistency model section of the
1175      * RISC-V ISA Specification.
1176      */
1177 
1178     env->two_stage_lookup = false;
1179 #endif
1180     cs->exception_index = RISCV_EXCP_NONE; /* mark handled to qemu */
1181 }
1182