xref: /openbmc/qemu/target/riscv/cpu_helper.c (revision db9ab38b)
1 /*
2  * RISC-V CPU helpers for qemu.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "trace.h"
27 #include "semihosting/common-semi.h"
28 
29 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
30 {
31 #ifdef CONFIG_USER_ONLY
32     return 0;
33 #else
34     return env->priv;
35 #endif
36 }
37 
38 #ifndef CONFIG_USER_ONLY
39 static int riscv_cpu_local_irq_pending(CPURISCVState *env)
40 {
41     target_ulong irqs;
42 
43     target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE);
44     target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE);
45     target_ulong hs_mstatus_sie = get_field(env->mstatus_hs, MSTATUS_SIE);
46 
47     target_ulong pending = env->mip & env->mie &
48                                ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
49     target_ulong vspending = (env->mip & env->mie &
50                               (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP));
51 
52     target_ulong mie    = env->priv < PRV_M ||
53                           (env->priv == PRV_M && mstatus_mie);
54     target_ulong sie    = env->priv < PRV_S ||
55                           (env->priv == PRV_S && mstatus_sie);
56     target_ulong hs_sie = env->priv < PRV_S ||
57                           (env->priv == PRV_S && hs_mstatus_sie);
58 
59     if (riscv_cpu_virt_enabled(env)) {
60         target_ulong pending_hs_irq = pending & -hs_sie;
61 
62         if (pending_hs_irq) {
63             riscv_cpu_set_force_hs_excep(env, FORCE_HS_EXCEP);
64             return ctz64(pending_hs_irq);
65         }
66 
67         pending = vspending;
68     }
69 
70     irqs = (pending & ~env->mideleg & -mie) | (pending &  env->mideleg & -sie);
71 
72     if (irqs) {
73         return ctz64(irqs); /* since non-zero */
74     } else {
75         return EXCP_NONE; /* indicates no pending interrupt */
76     }
77 }
78 #endif
79 
80 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
81 {
82 #if !defined(CONFIG_USER_ONLY)
83     if (interrupt_request & CPU_INTERRUPT_HARD) {
84         RISCVCPU *cpu = RISCV_CPU(cs);
85         CPURISCVState *env = &cpu->env;
86         int interruptno = riscv_cpu_local_irq_pending(env);
87         if (interruptno >= 0) {
88             cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
89             riscv_cpu_do_interrupt(cs);
90             return true;
91         }
92     }
93 #endif
94     return false;
95 }
96 
97 #if !defined(CONFIG_USER_ONLY)
98 
99 /* Return true is floating point support is currently enabled */
100 bool riscv_cpu_fp_enabled(CPURISCVState *env)
101 {
102     if (env->mstatus & MSTATUS_FS) {
103         if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_FS)) {
104             return false;
105         }
106         return true;
107     }
108 
109     return false;
110 }
111 
112 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
113 {
114     uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_FS |
115                             MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
116                             MSTATUS64_UXL;
117     bool current_virt = riscv_cpu_virt_enabled(env);
118 
119     g_assert(riscv_has_ext(env, RVH));
120 
121     if (current_virt) {
122         /* Current V=1 and we are about to change to V=0 */
123         env->vsstatus = env->mstatus & mstatus_mask;
124         env->mstatus &= ~mstatus_mask;
125         env->mstatus |= env->mstatus_hs;
126 
127         env->vstvec = env->stvec;
128         env->stvec = env->stvec_hs;
129 
130         env->vsscratch = env->sscratch;
131         env->sscratch = env->sscratch_hs;
132 
133         env->vsepc = env->sepc;
134         env->sepc = env->sepc_hs;
135 
136         env->vscause = env->scause;
137         env->scause = env->scause_hs;
138 
139         env->vstval = env->sbadaddr;
140         env->sbadaddr = env->stval_hs;
141 
142         env->vsatp = env->satp;
143         env->satp = env->satp_hs;
144     } else {
145         /* Current V=0 and we are about to change to V=1 */
146         env->mstatus_hs = env->mstatus & mstatus_mask;
147         env->mstatus &= ~mstatus_mask;
148         env->mstatus |= env->vsstatus;
149 
150         env->stvec_hs = env->stvec;
151         env->stvec = env->vstvec;
152 
153         env->sscratch_hs = env->sscratch;
154         env->sscratch = env->vsscratch;
155 
156         env->sepc_hs = env->sepc;
157         env->sepc = env->vsepc;
158 
159         env->scause_hs = env->scause;
160         env->scause = env->vscause;
161 
162         env->stval_hs = env->sbadaddr;
163         env->sbadaddr = env->vstval;
164 
165         env->satp_hs = env->satp;
166         env->satp = env->vsatp;
167     }
168 }
169 
170 bool riscv_cpu_virt_enabled(CPURISCVState *env)
171 {
172     if (!riscv_has_ext(env, RVH)) {
173         return false;
174     }
175 
176     return get_field(env->virt, VIRT_ONOFF);
177 }
178 
179 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
180 {
181     if (!riscv_has_ext(env, RVH)) {
182         return;
183     }
184 
185     /* Flush the TLB on all virt mode changes. */
186     if (get_field(env->virt, VIRT_ONOFF) != enable) {
187         tlb_flush(env_cpu(env));
188     }
189 
190     env->virt = set_field(env->virt, VIRT_ONOFF, enable);
191 }
192 
193 bool riscv_cpu_force_hs_excep_enabled(CPURISCVState *env)
194 {
195     if (!riscv_has_ext(env, RVH)) {
196         return false;
197     }
198 
199     return get_field(env->virt, FORCE_HS_EXCEP);
200 }
201 
202 void riscv_cpu_set_force_hs_excep(CPURISCVState *env, bool enable)
203 {
204     if (!riscv_has_ext(env, RVH)) {
205         return;
206     }
207 
208     env->virt = set_field(env->virt, FORCE_HS_EXCEP, enable);
209 }
210 
211 bool riscv_cpu_two_stage_lookup(int mmu_idx)
212 {
213     return mmu_idx & TB_FLAGS_PRIV_HYP_ACCESS_MASK;
214 }
215 
216 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts)
217 {
218     CPURISCVState *env = &cpu->env;
219     if (env->miclaim & interrupts) {
220         return -1;
221     } else {
222         env->miclaim |= interrupts;
223         return 0;
224     }
225 }
226 
227 uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value)
228 {
229     CPURISCVState *env = &cpu->env;
230     CPUState *cs = CPU(cpu);
231     uint32_t old = env->mip;
232     bool locked = false;
233 
234     if (!qemu_mutex_iothread_locked()) {
235         locked = true;
236         qemu_mutex_lock_iothread();
237     }
238 
239     env->mip = (env->mip & ~mask) | (value & mask);
240 
241     if (env->mip) {
242         cpu_interrupt(cs, CPU_INTERRUPT_HARD);
243     } else {
244         cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
245     }
246 
247     if (locked) {
248         qemu_mutex_unlock_iothread();
249     }
250 
251     return old;
252 }
253 
254 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t),
255                              uint32_t arg)
256 {
257     env->rdtime_fn = fn;
258     env->rdtime_fn_arg = arg;
259 }
260 
261 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
262 {
263     if (newpriv > PRV_M) {
264         g_assert_not_reached();
265     }
266     if (newpriv == PRV_H) {
267         newpriv = PRV_U;
268     }
269     /* tlb_flush is unnecessary as mode is contained in mmu_idx */
270     env->priv = newpriv;
271 
272     /*
273      * Clear the load reservation - otherwise a reservation placed in one
274      * context/process can be used by another, resulting in an SC succeeding
275      * incorrectly. Version 2.2 of the ISA specification explicitly requires
276      * this behaviour, while later revisions say that the kernel "should" use
277      * an SC instruction to force the yielding of a load reservation on a
278      * preemptive context switch. As a result, do both.
279      */
280     env->load_res = -1;
281 }
282 
283 /*
284  * get_physical_address_pmp - check PMP permission for this physical address
285  *
286  * Match the PMP region and check permission for this physical address and it's
287  * TLB page. Returns 0 if the permission checking was successful
288  *
289  * @env: CPURISCVState
290  * @prot: The returned protection attributes
291  * @tlb_size: TLB page size containing addr. It could be modified after PMP
292  *            permission checking. NULL if not set TLB page for addr.
293  * @addr: The physical address to be checked permission
294  * @access_type: The type of MMU access
295  * @mode: Indicates current privilege level.
296  */
297 static int get_physical_address_pmp(CPURISCVState *env, int *prot,
298                                     target_ulong *tlb_size, hwaddr addr,
299                                     int size, MMUAccessType access_type,
300                                     int mode)
301 {
302     pmp_priv_t pmp_priv;
303     target_ulong tlb_size_pmp = 0;
304 
305     if (!riscv_feature(env, RISCV_FEATURE_PMP)) {
306         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
307         return TRANSLATE_SUCCESS;
308     }
309 
310     if (!pmp_hart_has_privs(env, addr, size, 1 << access_type, &pmp_priv,
311                             mode)) {
312         *prot = 0;
313         return TRANSLATE_PMP_FAIL;
314     }
315 
316     *prot = pmp_priv_to_page_prot(pmp_priv);
317     if (tlb_size != NULL) {
318         if (pmp_is_range_in_tlb(env, addr & ~(*tlb_size - 1), &tlb_size_pmp)) {
319             *tlb_size = tlb_size_pmp;
320         }
321     }
322 
323     return TRANSLATE_SUCCESS;
324 }
325 
326 /* get_physical_address - get the physical address for this virtual address
327  *
328  * Do a page table walk to obtain the physical address corresponding to a
329  * virtual address. Returns 0 if the translation was successful
330  *
331  * Adapted from Spike's mmu_t::translate and mmu_t::walk
332  *
333  * @env: CPURISCVState
334  * @physical: This will be set to the calculated physical address
335  * @prot: The returned protection attributes
336  * @addr: The virtual address to be translated
337  * @fault_pte_addr: If not NULL, this will be set to fault pte address
338  *                  when a error occurs on pte address translation.
339  *                  This will already be shifted to match htval.
340  * @access_type: The type of MMU access
341  * @mmu_idx: Indicates current privilege level
342  * @first_stage: Are we in first stage translation?
343  *               Second stage is used for hypervisor guest translation
344  * @two_stage: Are we going to perform two stage translation
345  */
346 static int get_physical_address(CPURISCVState *env, hwaddr *physical,
347                                 int *prot, target_ulong addr,
348                                 target_ulong *fault_pte_addr,
349                                 int access_type, int mmu_idx,
350                                 bool first_stage, bool two_stage)
351 {
352     /* NOTE: the env->pc value visible here will not be
353      * correct, but the value visible to the exception handler
354      * (riscv_cpu_do_interrupt) is correct */
355     MemTxResult res;
356     MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
357     int mode = mmu_idx & TB_FLAGS_PRIV_MMU_MASK;
358     bool use_background = false;
359 
360     /*
361      * Check if we should use the background registers for the two
362      * stage translation. We don't need to check if we actually need
363      * two stage translation as that happened before this function
364      * was called. Background registers will be used if the guest has
365      * forced a two stage translation to be on (in HS or M mode).
366      */
367     if (!riscv_cpu_virt_enabled(env) && two_stage) {
368         use_background = true;
369     }
370 
371     /* MPRV does not affect the virtual-machine load/store
372        instructions, HLV, HLVX, and HSV. */
373     if (riscv_cpu_two_stage_lookup(mmu_idx)) {
374         mode = get_field(env->hstatus, HSTATUS_SPVP);
375     } else if (mode == PRV_M && access_type != MMU_INST_FETCH) {
376         if (get_field(env->mstatus, MSTATUS_MPRV)) {
377             mode = get_field(env->mstatus, MSTATUS_MPP);
378         }
379     }
380 
381     if (first_stage == false) {
382         /* We are in stage 2 translation, this is similar to stage 1. */
383         /* Stage 2 is always taken as U-mode */
384         mode = PRV_U;
385     }
386 
387     if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) {
388         *physical = addr;
389         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
390         return TRANSLATE_SUCCESS;
391     }
392 
393     *prot = 0;
394 
395     hwaddr base;
396     int levels, ptidxbits, ptesize, vm, sum, mxr, widened;
397 
398     if (first_stage == true) {
399         mxr = get_field(env->mstatus, MSTATUS_MXR);
400     } else {
401         mxr = get_field(env->vsstatus, MSTATUS_MXR);
402     }
403 
404     if (first_stage == true) {
405         if (use_background) {
406             base = (hwaddr)get_field(env->vsatp, SATP_PPN) << PGSHIFT;
407             vm = get_field(env->vsatp, SATP_MODE);
408         } else {
409             base = (hwaddr)get_field(env->satp, SATP_PPN) << PGSHIFT;
410             vm = get_field(env->satp, SATP_MODE);
411         }
412         widened = 0;
413     } else {
414         base = (hwaddr)get_field(env->hgatp, HGATP_PPN) << PGSHIFT;
415         vm = get_field(env->hgatp, HGATP_MODE);
416         widened = 2;
417     }
418     /* status.SUM will be ignored if execute on background */
419     sum = get_field(env->mstatus, MSTATUS_SUM) || use_background;
420     switch (vm) {
421     case VM_1_10_SV32:
422       levels = 2; ptidxbits = 10; ptesize = 4; break;
423     case VM_1_10_SV39:
424       levels = 3; ptidxbits = 9; ptesize = 8; break;
425     case VM_1_10_SV48:
426       levels = 4; ptidxbits = 9; ptesize = 8; break;
427     case VM_1_10_SV57:
428       levels = 5; ptidxbits = 9; ptesize = 8; break;
429     case VM_1_10_MBARE:
430         *physical = addr;
431         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
432         return TRANSLATE_SUCCESS;
433     default:
434       g_assert_not_reached();
435     }
436 
437     CPUState *cs = env_cpu(env);
438     int va_bits = PGSHIFT + levels * ptidxbits + widened;
439     target_ulong mask, masked_msbs;
440 
441     if (TARGET_LONG_BITS > (va_bits - 1)) {
442         mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
443     } else {
444         mask = 0;
445     }
446     masked_msbs = (addr >> (va_bits - 1)) & mask;
447 
448     if (masked_msbs != 0 && masked_msbs != mask) {
449         return TRANSLATE_FAIL;
450     }
451 
452     int ptshift = (levels - 1) * ptidxbits;
453     int i;
454 
455 #if !TCG_OVERSIZED_GUEST
456 restart:
457 #endif
458     for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
459         target_ulong idx;
460         if (i == 0) {
461             idx = (addr >> (PGSHIFT + ptshift)) &
462                            ((1 << (ptidxbits + widened)) - 1);
463         } else {
464             idx = (addr >> (PGSHIFT + ptshift)) &
465                            ((1 << ptidxbits) - 1);
466         }
467 
468         /* check that physical address of PTE is legal */
469         hwaddr pte_addr;
470 
471         if (two_stage && first_stage) {
472             int vbase_prot;
473             hwaddr vbase;
474 
475             /* Do the second stage translation on the base PTE address. */
476             int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
477                                                  base, NULL, MMU_DATA_LOAD,
478                                                  mmu_idx, false, true);
479 
480             if (vbase_ret != TRANSLATE_SUCCESS) {
481                 if (fault_pte_addr) {
482                     *fault_pte_addr = (base + idx * ptesize) >> 2;
483                 }
484                 return TRANSLATE_G_STAGE_FAIL;
485             }
486 
487             pte_addr = vbase + idx * ptesize;
488         } else {
489             pte_addr = base + idx * ptesize;
490         }
491 
492         int pmp_prot;
493         int pmp_ret = get_physical_address_pmp(env, &pmp_prot, NULL, pte_addr,
494                                                sizeof(target_ulong),
495                                                MMU_DATA_LOAD, PRV_S);
496         if (pmp_ret != TRANSLATE_SUCCESS) {
497             return TRANSLATE_PMP_FAIL;
498         }
499 
500         target_ulong pte;
501         if (riscv_cpu_is_32bit(env)) {
502             pte = address_space_ldl(cs->as, pte_addr, attrs, &res);
503         } else {
504             pte = address_space_ldq(cs->as, pte_addr, attrs, &res);
505         }
506 
507         if (res != MEMTX_OK) {
508             return TRANSLATE_FAIL;
509         }
510 
511         hwaddr ppn = pte >> PTE_PPN_SHIFT;
512 
513         if (!(pte & PTE_V)) {
514             /* Invalid PTE */
515             return TRANSLATE_FAIL;
516         } else if (!(pte & (PTE_R | PTE_W | PTE_X))) {
517             /* Inner PTE, continue walking */
518             base = ppn << PGSHIFT;
519         } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) {
520             /* Reserved leaf PTE flags: PTE_W */
521             return TRANSLATE_FAIL;
522         } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) {
523             /* Reserved leaf PTE flags: PTE_W + PTE_X */
524             return TRANSLATE_FAIL;
525         } else if ((pte & PTE_U) && ((mode != PRV_U) &&
526                    (!sum || access_type == MMU_INST_FETCH))) {
527             /* User PTE flags when not U mode and mstatus.SUM is not set,
528                or the access type is an instruction fetch */
529             return TRANSLATE_FAIL;
530         } else if (!(pte & PTE_U) && (mode != PRV_S)) {
531             /* Supervisor PTE flags when not S mode */
532             return TRANSLATE_FAIL;
533         } else if (ppn & ((1ULL << ptshift) - 1)) {
534             /* Misaligned PPN */
535             return TRANSLATE_FAIL;
536         } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) ||
537                    ((pte & PTE_X) && mxr))) {
538             /* Read access check failed */
539             return TRANSLATE_FAIL;
540         } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) {
541             /* Write access check failed */
542             return TRANSLATE_FAIL;
543         } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) {
544             /* Fetch access check failed */
545             return TRANSLATE_FAIL;
546         } else {
547             /* if necessary, set accessed and dirty bits. */
548             target_ulong updated_pte = pte | PTE_A |
549                 (access_type == MMU_DATA_STORE ? PTE_D : 0);
550 
551             /* Page table updates need to be atomic with MTTCG enabled */
552             if (updated_pte != pte) {
553                 /*
554                  * - if accessed or dirty bits need updating, and the PTE is
555                  *   in RAM, then we do so atomically with a compare and swap.
556                  * - if the PTE is in IO space or ROM, then it can't be updated
557                  *   and we return TRANSLATE_FAIL.
558                  * - if the PTE changed by the time we went to update it, then
559                  *   it is no longer valid and we must re-walk the page table.
560                  */
561                 MemoryRegion *mr;
562                 hwaddr l = sizeof(target_ulong), addr1;
563                 mr = address_space_translate(cs->as, pte_addr,
564                     &addr1, &l, false, MEMTXATTRS_UNSPECIFIED);
565                 if (memory_region_is_ram(mr)) {
566                     target_ulong *pte_pa =
567                         qemu_map_ram_ptr(mr->ram_block, addr1);
568 #if TCG_OVERSIZED_GUEST
569                     /* MTTCG is not enabled on oversized TCG guests so
570                      * page table updates do not need to be atomic */
571                     *pte_pa = pte = updated_pte;
572 #else
573                     target_ulong old_pte =
574                         qatomic_cmpxchg(pte_pa, pte, updated_pte);
575                     if (old_pte != pte) {
576                         goto restart;
577                     } else {
578                         pte = updated_pte;
579                     }
580 #endif
581                 } else {
582                     /* misconfigured PTE in ROM (AD bits are not preset) or
583                      * PTE is in IO space and can't be updated atomically */
584                     return TRANSLATE_FAIL;
585                 }
586             }
587 
588             /* for superpage mappings, make a fake leaf PTE for the TLB's
589                benefit. */
590             target_ulong vpn = addr >> PGSHIFT;
591             *physical = ((ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT) |
592                         (addr & ~TARGET_PAGE_MASK);
593 
594             /* set permissions on the TLB entry */
595             if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) {
596                 *prot |= PAGE_READ;
597             }
598             if ((pte & PTE_X)) {
599                 *prot |= PAGE_EXEC;
600             }
601             /* add write permission on stores or if the page is already dirty,
602                so that we TLB miss on later writes to update the dirty bit */
603             if ((pte & PTE_W) &&
604                     (access_type == MMU_DATA_STORE || (pte & PTE_D))) {
605                 *prot |= PAGE_WRITE;
606             }
607             return TRANSLATE_SUCCESS;
608         }
609     }
610     return TRANSLATE_FAIL;
611 }
612 
613 static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
614                                 MMUAccessType access_type, bool pmp_violation,
615                                 bool first_stage, bool two_stage)
616 {
617     CPUState *cs = env_cpu(env);
618     int page_fault_exceptions;
619     if (first_stage) {
620         page_fault_exceptions =
621             get_field(env->satp, SATP_MODE) != VM_1_10_MBARE &&
622             !pmp_violation;
623     } else {
624         page_fault_exceptions =
625             get_field(env->hgatp, HGATP_MODE) != VM_1_10_MBARE &&
626             !pmp_violation;
627     }
628     switch (access_type) {
629     case MMU_INST_FETCH:
630         if (riscv_cpu_virt_enabled(env) && !first_stage) {
631             cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT;
632         } else {
633             cs->exception_index = page_fault_exceptions ?
634                 RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT;
635         }
636         break;
637     case MMU_DATA_LOAD:
638         if (two_stage && !first_stage) {
639             cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
640         } else {
641             cs->exception_index = page_fault_exceptions ?
642                 RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT;
643         }
644         break;
645     case MMU_DATA_STORE:
646         if (two_stage && !first_stage) {
647             cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
648         } else {
649             cs->exception_index = page_fault_exceptions ?
650                 RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
651         }
652         break;
653     default:
654         g_assert_not_reached();
655     }
656     env->badaddr = address;
657 }
658 
659 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
660 {
661     RISCVCPU *cpu = RISCV_CPU(cs);
662     CPURISCVState *env = &cpu->env;
663     hwaddr phys_addr;
664     int prot;
665     int mmu_idx = cpu_mmu_index(&cpu->env, false);
666 
667     if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
668                              true, riscv_cpu_virt_enabled(env))) {
669         return -1;
670     }
671 
672     if (riscv_cpu_virt_enabled(env)) {
673         if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
674                                  0, mmu_idx, false, true)) {
675             return -1;
676         }
677     }
678 
679     return phys_addr & TARGET_PAGE_MASK;
680 }
681 
682 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
683                                      vaddr addr, unsigned size,
684                                      MMUAccessType access_type,
685                                      int mmu_idx, MemTxAttrs attrs,
686                                      MemTxResult response, uintptr_t retaddr)
687 {
688     RISCVCPU *cpu = RISCV_CPU(cs);
689     CPURISCVState *env = &cpu->env;
690 
691     if (access_type == MMU_DATA_STORE) {
692         cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
693     } else {
694         cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
695     }
696 
697     env->badaddr = addr;
698     riscv_raise_exception(&cpu->env, cs->exception_index, retaddr);
699 }
700 
701 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
702                                    MMUAccessType access_type, int mmu_idx,
703                                    uintptr_t retaddr)
704 {
705     RISCVCPU *cpu = RISCV_CPU(cs);
706     CPURISCVState *env = &cpu->env;
707     switch (access_type) {
708     case MMU_INST_FETCH:
709         cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
710         break;
711     case MMU_DATA_LOAD:
712         cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
713         break;
714     case MMU_DATA_STORE:
715         cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
716         break;
717     default:
718         g_assert_not_reached();
719     }
720     env->badaddr = addr;
721     riscv_raise_exception(env, cs->exception_index, retaddr);
722 }
723 #endif /* !CONFIG_USER_ONLY */
724 
725 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
726                         MMUAccessType access_type, int mmu_idx,
727                         bool probe, uintptr_t retaddr)
728 {
729     RISCVCPU *cpu = RISCV_CPU(cs);
730     CPURISCVState *env = &cpu->env;
731 #ifndef CONFIG_USER_ONLY
732     vaddr im_address;
733     hwaddr pa = 0;
734     int prot, prot2, prot_pmp;
735     bool pmp_violation = false;
736     bool first_stage_error = true;
737     bool two_stage_lookup = false;
738     int ret = TRANSLATE_FAIL;
739     int mode = mmu_idx;
740     /* default TLB page size */
741     target_ulong tlb_size = TARGET_PAGE_SIZE;
742 
743     env->guest_phys_fault_addr = 0;
744 
745     qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
746                   __func__, address, access_type, mmu_idx);
747 
748     /* MPRV does not affect the virtual-machine load/store
749        instructions, HLV, HLVX, and HSV. */
750     if (riscv_cpu_two_stage_lookup(mmu_idx)) {
751         mode = get_field(env->hstatus, HSTATUS_SPVP);
752     } else if (mode == PRV_M && access_type != MMU_INST_FETCH &&
753                get_field(env->mstatus, MSTATUS_MPRV)) {
754         mode = get_field(env->mstatus, MSTATUS_MPP);
755         if (riscv_has_ext(env, RVH) && get_field(env->mstatus, MSTATUS_MPV)) {
756             two_stage_lookup = true;
757         }
758     }
759 
760     if (riscv_cpu_virt_enabled(env) ||
761         ((riscv_cpu_two_stage_lookup(mmu_idx) || two_stage_lookup) &&
762          access_type != MMU_INST_FETCH)) {
763         /* Two stage lookup */
764         ret = get_physical_address(env, &pa, &prot, address,
765                                    &env->guest_phys_fault_addr, access_type,
766                                    mmu_idx, true, true);
767 
768         /*
769          * A G-stage exception may be triggered during two state lookup.
770          * And the env->guest_phys_fault_addr has already been set in
771          * get_physical_address().
772          */
773         if (ret == TRANSLATE_G_STAGE_FAIL) {
774             first_stage_error = false;
775             access_type = MMU_DATA_LOAD;
776         }
777 
778         qemu_log_mask(CPU_LOG_MMU,
779                       "%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
780                       TARGET_FMT_plx " prot %d\n",
781                       __func__, address, ret, pa, prot);
782 
783         if (ret == TRANSLATE_SUCCESS) {
784             /* Second stage lookup */
785             im_address = pa;
786 
787             ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
788                                        access_type, mmu_idx, false, true);
789 
790             qemu_log_mask(CPU_LOG_MMU,
791                     "%s 2nd-stage address=%" VADDR_PRIx " ret %d physical "
792                     TARGET_FMT_plx " prot %d\n",
793                     __func__, im_address, ret, pa, prot2);
794 
795             prot &= prot2;
796 
797             if (ret == TRANSLATE_SUCCESS) {
798                 ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
799                                                size, access_type, mode);
800 
801                 qemu_log_mask(CPU_LOG_MMU,
802                               "%s PMP address=" TARGET_FMT_plx " ret %d prot"
803                               " %d tlb_size " TARGET_FMT_lu "\n",
804                               __func__, pa, ret, prot_pmp, tlb_size);
805 
806                 prot &= prot_pmp;
807             }
808 
809             if (ret != TRANSLATE_SUCCESS) {
810                 /*
811                  * Guest physical address translation failed, this is a HS
812                  * level exception
813                  */
814                 first_stage_error = false;
815                 env->guest_phys_fault_addr = (im_address |
816                                               (address &
817                                                (TARGET_PAGE_SIZE - 1))) >> 2;
818             }
819         }
820     } else {
821         /* Single stage lookup */
822         ret = get_physical_address(env, &pa, &prot, address, NULL,
823                                    access_type, mmu_idx, true, false);
824 
825         qemu_log_mask(CPU_LOG_MMU,
826                       "%s address=%" VADDR_PRIx " ret %d physical "
827                       TARGET_FMT_plx " prot %d\n",
828                       __func__, address, ret, pa, prot);
829 
830         if (ret == TRANSLATE_SUCCESS) {
831             ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
832                                            size, access_type, mode);
833 
834             qemu_log_mask(CPU_LOG_MMU,
835                           "%s PMP address=" TARGET_FMT_plx " ret %d prot"
836                           " %d tlb_size " TARGET_FMT_lu "\n",
837                           __func__, pa, ret, prot_pmp, tlb_size);
838 
839             prot &= prot_pmp;
840         }
841     }
842 
843     if (ret == TRANSLATE_PMP_FAIL) {
844         pmp_violation = true;
845     }
846 
847     if (ret == TRANSLATE_SUCCESS) {
848         tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
849                      prot, mmu_idx, tlb_size);
850         return true;
851     } else if (probe) {
852         return false;
853     } else {
854         raise_mmu_exception(env, address, access_type, pmp_violation,
855                             first_stage_error,
856                             riscv_cpu_virt_enabled(env) ||
857                                 riscv_cpu_two_stage_lookup(mmu_idx));
858         riscv_raise_exception(env, cs->exception_index, retaddr);
859     }
860 
861     return true;
862 
863 #else
864     switch (access_type) {
865     case MMU_INST_FETCH:
866         cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT;
867         break;
868     case MMU_DATA_LOAD:
869         cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT;
870         break;
871     case MMU_DATA_STORE:
872         cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT;
873         break;
874     default:
875         g_assert_not_reached();
876     }
877     env->badaddr = address;
878     cpu_loop_exit_restore(cs, retaddr);
879 #endif
880 }
881 
882 /*
883  * Handle Traps
884  *
885  * Adapted from Spike's processor_t::take_trap.
886  *
887  */
888 void riscv_cpu_do_interrupt(CPUState *cs)
889 {
890 #if !defined(CONFIG_USER_ONLY)
891 
892     RISCVCPU *cpu = RISCV_CPU(cs);
893     CPURISCVState *env = &cpu->env;
894     bool force_hs_execp = riscv_cpu_force_hs_excep_enabled(env);
895     uint64_t s;
896 
897     /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
898      * so we mask off the MSB and separate into trap type and cause.
899      */
900     bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
901     target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
902     target_ulong deleg = async ? env->mideleg : env->medeleg;
903     bool write_tval = false;
904     target_ulong tval = 0;
905     target_ulong htval = 0;
906     target_ulong mtval2 = 0;
907 
908     if  (cause == RISCV_EXCP_SEMIHOST) {
909         if (env->priv >= PRV_S) {
910             env->gpr[xA0] = do_common_semihosting(cs);
911             env->pc += 4;
912             return;
913         }
914         cause = RISCV_EXCP_BREAKPOINT;
915     }
916 
917     if (!async) {
918         /* set tval to badaddr for traps with address information */
919         switch (cause) {
920         case RISCV_EXCP_INST_GUEST_PAGE_FAULT:
921         case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
922         case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
923             force_hs_execp = true;
924             /* fallthrough */
925         case RISCV_EXCP_INST_ADDR_MIS:
926         case RISCV_EXCP_INST_ACCESS_FAULT:
927         case RISCV_EXCP_LOAD_ADDR_MIS:
928         case RISCV_EXCP_STORE_AMO_ADDR_MIS:
929         case RISCV_EXCP_LOAD_ACCESS_FAULT:
930         case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
931         case RISCV_EXCP_INST_PAGE_FAULT:
932         case RISCV_EXCP_LOAD_PAGE_FAULT:
933         case RISCV_EXCP_STORE_PAGE_FAULT:
934             write_tval  = true;
935             tval = env->badaddr;
936             break;
937         default:
938             break;
939         }
940         /* ecall is dispatched as one cause so translate based on mode */
941         if (cause == RISCV_EXCP_U_ECALL) {
942             assert(env->priv <= 3);
943 
944             if (env->priv == PRV_M) {
945                 cause = RISCV_EXCP_M_ECALL;
946             } else if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) {
947                 cause = RISCV_EXCP_VS_ECALL;
948             } else if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) {
949                 cause = RISCV_EXCP_S_ECALL;
950             } else if (env->priv == PRV_U) {
951                 cause = RISCV_EXCP_U_ECALL;
952             }
953         }
954     }
955 
956     trace_riscv_trap(env->mhartid, async, cause, env->pc, tval,
957                      riscv_cpu_get_trap_name(cause, async));
958 
959     qemu_log_mask(CPU_LOG_INT,
960                   "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", "
961                   "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n",
962                   __func__, env->mhartid, async, cause, env->pc, tval,
963                   riscv_cpu_get_trap_name(cause, async));
964 
965     if (env->priv <= PRV_S &&
966             cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) {
967         /* handle the trap in S-mode */
968         if (riscv_has_ext(env, RVH)) {
969             target_ulong hdeleg = async ? env->hideleg : env->hedeleg;
970             bool two_stage_lookup = false;
971 
972             if (env->priv == PRV_M ||
973                 (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
974                 (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
975                     get_field(env->hstatus, HSTATUS_HU))) {
976                     two_stage_lookup = true;
977             }
978 
979             if ((riscv_cpu_virt_enabled(env) || two_stage_lookup) && write_tval) {
980                 /*
981                  * If we are writing a guest virtual address to stval, set
982                  * this to 1. If we are trapping to VS we will set this to 0
983                  * later.
984                  */
985                 env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 1);
986             } else {
987                 /* For other HS-mode traps, we set this to 0. */
988                 env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 0);
989             }
990 
991             if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1) &&
992                 !force_hs_execp) {
993                 /* Trap to VS mode */
994                 /*
995                  * See if we need to adjust cause. Yes if its VS mode interrupt
996                  * no if hypervisor has delegated one of hs mode's interrupt
997                  */
998                 if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
999                     cause == IRQ_VS_EXT) {
1000                     cause = cause - 1;
1001                 }
1002                 env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 0);
1003             } else if (riscv_cpu_virt_enabled(env)) {
1004                 /* Trap into HS mode, from virt */
1005                 riscv_cpu_swap_hypervisor_regs(env);
1006                 env->hstatus = set_field(env->hstatus, HSTATUS_SPVP,
1007                                          env->priv);
1008                 env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
1009                                          riscv_cpu_virt_enabled(env));
1010 
1011                 htval = env->guest_phys_fault_addr;
1012 
1013                 riscv_cpu_set_virt_enabled(env, 0);
1014                 riscv_cpu_set_force_hs_excep(env, 0);
1015             } else {
1016                 /* Trap into HS mode */
1017                 if (!two_stage_lookup) {
1018                     env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
1019                                              riscv_cpu_virt_enabled(env));
1020                 }
1021                 htval = env->guest_phys_fault_addr;
1022             }
1023         }
1024 
1025         s = env->mstatus;
1026         s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
1027         s = set_field(s, MSTATUS_SPP, env->priv);
1028         s = set_field(s, MSTATUS_SIE, 0);
1029         env->mstatus = s;
1030         env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1));
1031         env->sepc = env->pc;
1032         env->sbadaddr = tval;
1033         env->htval = htval;
1034         env->pc = (env->stvec >> 2 << 2) +
1035             ((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
1036         riscv_cpu_set_mode(env, PRV_S);
1037     } else {
1038         /* handle the trap in M-mode */
1039         if (riscv_has_ext(env, RVH)) {
1040             if (riscv_cpu_virt_enabled(env)) {
1041                 riscv_cpu_swap_hypervisor_regs(env);
1042             }
1043             env->mstatus = set_field(env->mstatus, MSTATUS_MPV,
1044                                      riscv_cpu_virt_enabled(env));
1045             if (riscv_cpu_virt_enabled(env) && tval) {
1046                 env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1);
1047             }
1048 
1049             mtval2 = env->guest_phys_fault_addr;
1050 
1051             /* Trapping to M mode, virt is disabled */
1052             riscv_cpu_set_virt_enabled(env, 0);
1053             riscv_cpu_set_force_hs_excep(env, 0);
1054         }
1055 
1056         s = env->mstatus;
1057         s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
1058         s = set_field(s, MSTATUS_MPP, env->priv);
1059         s = set_field(s, MSTATUS_MIE, 0);
1060         env->mstatus = s;
1061         env->mcause = cause | ~(((target_ulong)-1) >> async);
1062         env->mepc = env->pc;
1063         env->mbadaddr = tval;
1064         env->mtval2 = mtval2;
1065         env->pc = (env->mtvec >> 2 << 2) +
1066             ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
1067         riscv_cpu_set_mode(env, PRV_M);
1068     }
1069 
1070     /* NOTE: it is not necessary to yield load reservations here. It is only
1071      * necessary for an SC from "another hart" to cause a load reservation
1072      * to be yielded. Refer to the memory consistency model section of the
1073      * RISC-V ISA Specification.
1074      */
1075 
1076 #endif
1077     cs->exception_index = EXCP_NONE; /* mark handled to qemu */
1078 }
1079