xref: /openbmc/qemu/target/riscv/op_helper.c (revision e8779f3d1509cd07620c6166a9a280376e01ff2f)
1 /*
2  * RISC-V Emulation Helpers for QEMU.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  * Copyright (c) 2022      VRULL GmbH
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2 or later, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internals.h"
24 #include "exec/cputlb.h"
25 #include "accel/tcg/cpu-ldst.h"
26 #include "accel/tcg/probe.h"
27 #include "exec/helper-proto.h"
28 #include "exec/tlb-flags.h"
29 #include "trace.h"
30 
31 #ifndef CONFIG_USER_ONLY
32 static inline MemOp mo_endian_env(CPURISCVState *env)
33 {
34     /*
35      * A couple of bits in MSTATUS set the endianness:
36      *  - MSTATUS_UBE (User-mode),
37      *  - MSTATUS_SBE (Supervisor-mode),
38      *  - MSTATUS_MBE (Machine-mode)
39      * but we don't implement that yet.
40      */
41     return MO_TE;
42 }
43 #endif
44 
45 /* Exceptions processing helpers */
46 G_NORETURN void riscv_raise_exception(CPURISCVState *env,
47                                       RISCVException exception,
48                                       uintptr_t pc)
49 {
50     CPUState *cs = env_cpu(env);
51 
52     trace_riscv_exception(exception,
53                           riscv_cpu_get_trap_name(exception, false),
54                           env->pc);
55 
56     cs->exception_index = exception;
57     cpu_loop_exit_restore(cs, pc);
58 }
59 
60 void helper_raise_exception(CPURISCVState *env, uint32_t exception)
61 {
62     riscv_raise_exception(env, exception, 0);
63 }
64 
65 target_ulong helper_csrr(CPURISCVState *env, int csr)
66 {
67     /*
68      * The seed CSR must be accessed with a read-write instruction. A
69      * read-only instruction such as CSRRS/CSRRC with rs1=x0 or CSRRSI/
70      * CSRRCI with uimm=0 will raise an illegal instruction exception.
71      */
72     if (csr == CSR_SEED) {
73         riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
74     }
75 
76     target_ulong val = 0;
77     RISCVException ret = riscv_csrr(env, csr, &val);
78 
79     if (ret != RISCV_EXCP_NONE) {
80         riscv_raise_exception(env, ret, GETPC());
81     }
82     return val;
83 }
84 
85 void helper_csrw(CPURISCVState *env, int csr, target_ulong src)
86 {
87     target_ulong mask = env->xl == MXL_RV32 ? UINT32_MAX : (target_ulong)-1;
88     RISCVException ret = riscv_csrrw(env, csr, NULL, src, mask, GETPC());
89 
90     if (ret != RISCV_EXCP_NONE) {
91         riscv_raise_exception(env, ret, GETPC());
92     }
93 }
94 
95 target_ulong helper_csrrw(CPURISCVState *env, int csr,
96                           target_ulong src, target_ulong write_mask)
97 {
98     target_ulong val = 0;
99     RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask, GETPC());
100 
101     if (ret != RISCV_EXCP_NONE) {
102         riscv_raise_exception(env, ret, GETPC());
103     }
104     return val;
105 }
106 
107 target_ulong helper_csrr_i128(CPURISCVState *env, int csr)
108 {
109     Int128 rv = int128_zero();
110     RISCVException ret = riscv_csrr_i128(env, csr, &rv);
111 
112     if (ret != RISCV_EXCP_NONE) {
113         riscv_raise_exception(env, ret, GETPC());
114     }
115 
116     env->retxh = int128_gethi(rv);
117     return int128_getlo(rv);
118 }
119 
120 void helper_csrw_i128(CPURISCVState *env, int csr,
121                       target_ulong srcl, target_ulong srch)
122 {
123     RISCVException ret = riscv_csrrw_i128(env, csr, NULL,
124                                           int128_make128(srcl, srch),
125                                           UINT128_MAX, GETPC());
126 
127     if (ret != RISCV_EXCP_NONE) {
128         riscv_raise_exception(env, ret, GETPC());
129     }
130 }
131 
132 target_ulong helper_csrrw_i128(CPURISCVState *env, int csr,
133                                target_ulong srcl, target_ulong srch,
134                                target_ulong maskl, target_ulong maskh)
135 {
136     Int128 rv = int128_zero();
137     RISCVException ret = riscv_csrrw_i128(env, csr, &rv,
138                                           int128_make128(srcl, srch),
139                                           int128_make128(maskl, maskh),
140                                           GETPC());
141 
142     if (ret != RISCV_EXCP_NONE) {
143         riscv_raise_exception(env, ret, GETPC());
144     }
145 
146     env->retxh = int128_gethi(rv);
147     return int128_getlo(rv);
148 }
149 
150 
151 /*
152  * check_zicbo_envcfg
153  *
154  * Raise virtual exceptions and illegal instruction exceptions for
155  * Zicbo[mz] instructions based on the settings of [mhs]envcfg as
156  * specified in section 2.5.1 of the CMO specification.
157  */
158 static void check_zicbo_envcfg(CPURISCVState *env, target_ulong envbits,
159                                 uintptr_t ra)
160 {
161 #ifndef CONFIG_USER_ONLY
162     if ((env->priv < PRV_M) && !get_field(env->menvcfg, envbits)) {
163         riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, ra);
164     }
165 
166     if (env->virt_enabled &&
167         (((env->priv <= PRV_S) && !get_field(env->henvcfg, envbits)) ||
168          ((env->priv < PRV_S) && !get_field(env->senvcfg, envbits)))) {
169         riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, ra);
170     }
171 
172     if ((env->priv < PRV_S) && !get_field(env->senvcfg, envbits)) {
173         riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, ra);
174     }
175 #endif
176 }
177 
178 void helper_cbo_zero(CPURISCVState *env, target_ulong address)
179 {
180     RISCVCPU *cpu = env_archcpu(env);
181     uint16_t cbozlen = cpu->cfg.cboz_blocksize;
182     int mmu_idx = riscv_env_mmu_index(env, false);
183     uintptr_t ra = GETPC();
184     void *mem;
185 
186     check_zicbo_envcfg(env, MENVCFG_CBZE, ra);
187 
188     /* Mask off low-bits to align-down to the cache-block. */
189     address &= ~(cbozlen - 1);
190 
191     /*
192      * cbo.zero requires MMU_DATA_STORE access. Do a probe_write()
193      * to raise any exceptions, including PMP.
194      */
195     mem = probe_write(env, address, cbozlen, mmu_idx, ra);
196 
197     if (likely(mem)) {
198         memset(mem, 0, cbozlen);
199     } else {
200         /*
201          * This means that we're dealing with an I/O page. Section 4.2
202          * of cmobase v1.0.1 says:
203          *
204          * "Cache-block zero instructions store zeros independently
205          * of whether data from the underlying memory locations are
206          * cacheable."
207          *
208          * Write zeros in address + cbozlen regardless of not being
209          * a RAM page.
210          */
211         for (int i = 0; i < cbozlen; i++) {
212             cpu_stb_mmuidx_ra(env, address + i, 0, mmu_idx, ra);
213         }
214     }
215 }
216 
217 /*
218  * check_zicbom_access
219  *
220  * Check access permissions (LOAD, STORE or FETCH as specified in
221  * section 2.5.2 of the CMO specification) for Zicbom, raising
222  * either store page-fault (non-virtualized) or store guest-page
223  * fault (virtualized).
224  */
225 static void check_zicbom_access(CPURISCVState *env,
226                                 target_ulong address,
227                                 uintptr_t ra)
228 {
229     RISCVCPU *cpu = env_archcpu(env);
230     int mmu_idx = riscv_env_mmu_index(env, false);
231     uint16_t cbomlen = cpu->cfg.cbom_blocksize;
232     void *phost;
233     int ret;
234 
235     /* Mask off low-bits to align-down to the cache-block. */
236     address &= ~(cbomlen - 1);
237 
238     /*
239      * Section 2.5.2 of cmobase v1.0.1:
240      *
241      * "A cache-block management instruction is permitted to
242      * access the specified cache block whenever a load instruction
243      * or store instruction is permitted to access the corresponding
244      * physical addresses. If neither a load instruction nor store
245      * instruction is permitted to access the physical addresses,
246      * but an instruction fetch is permitted to access the physical
247      * addresses, whether a cache-block management instruction is
248      * permitted to access the cache block is UNSPECIFIED."
249      */
250     ret = probe_access_flags(env, address, cbomlen, MMU_DATA_LOAD,
251                              mmu_idx, true, &phost, ra);
252     if (ret != TLB_INVALID_MASK) {
253         /* Success: readable */
254         return;
255     }
256 
257     /*
258      * Since not readable, must be writable. On failure, store
259      * fault/store guest amo fault will be raised by
260      * riscv_cpu_tlb_fill(). PMP exceptions will be caught
261      * there as well.
262      */
263     probe_write(env, address, cbomlen, mmu_idx, ra);
264 }
265 
266 void helper_cbo_clean_flush(CPURISCVState *env, target_ulong address)
267 {
268     uintptr_t ra = GETPC();
269     check_zicbo_envcfg(env, MENVCFG_CBCFE, ra);
270     check_zicbom_access(env, address, ra);
271 
272     /* We don't emulate the cache-hierarchy, so we're done. */
273 }
274 
275 void helper_cbo_inval(CPURISCVState *env, target_ulong address)
276 {
277     uintptr_t ra = GETPC();
278     check_zicbo_envcfg(env, MENVCFG_CBIE, ra);
279     check_zicbom_access(env, address, ra);
280 
281     /* We don't emulate the cache-hierarchy, so we're done. */
282 }
283 
284 #ifndef CONFIG_USER_ONLY
285 
286 target_ulong helper_sret(CPURISCVState *env)
287 {
288     uint64_t mstatus;
289     target_ulong prev_priv, prev_virt = env->virt_enabled;
290     const target_ulong src_priv = env->priv;
291     const bool src_virt = env->virt_enabled;
292 
293     if (!(env->priv >= PRV_S)) {
294         riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
295     }
296 
297     target_ulong retpc = env->sepc & get_xepc_mask(env);
298     if (!riscv_cpu_allow_16bit_insn(&env_archcpu(env)->cfg,
299                                     env->priv_ver,
300                                     env->misa_ext) && (retpc & 0x3)) {
301         riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
302     }
303 
304     if (get_field(env->mstatus, MSTATUS_TSR) && !(env->priv >= PRV_M)) {
305         riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
306     }
307 
308     if (env->virt_enabled && get_field(env->hstatus, HSTATUS_VTSR)) {
309         riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC());
310     }
311 
312     mstatus = env->mstatus;
313     prev_priv = get_field(mstatus, MSTATUS_SPP);
314     mstatus = set_field(mstatus, MSTATUS_SIE,
315                         get_field(mstatus, MSTATUS_SPIE));
316     mstatus = set_field(mstatus, MSTATUS_SPIE, 1);
317     mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U);
318 
319     if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
320         if (riscv_has_ext(env, RVH)) {
321             target_ulong prev_vu = get_field(env->hstatus, HSTATUS_SPV) &&
322                                    prev_priv == PRV_U;
323             /* Returning to VU from HS, vsstatus.sdt = 0 */
324             if (!env->virt_enabled && prev_vu) {
325                 env->vsstatus = set_field(env->vsstatus, MSTATUS_SDT, 0);
326             }
327         }
328         mstatus = set_field(mstatus, MSTATUS_SDT, 0);
329     }
330     if (riscv_cpu_cfg(env)->ext_smdbltrp && env->priv >= PRV_M) {
331         mstatus = set_field(mstatus, MSTATUS_MDT, 0);
332     }
333     if (env->priv_ver >= PRIV_VERSION_1_12_0) {
334         mstatus = set_field(mstatus, MSTATUS_MPRV, 0);
335     }
336     env->mstatus = mstatus;
337 
338     if (riscv_has_ext(env, RVH) && !env->virt_enabled) {
339         /* We support Hypervisor extensions and virtulisation is disabled */
340         target_ulong hstatus = env->hstatus;
341 
342         prev_virt = get_field(hstatus, HSTATUS_SPV);
343         hstatus = set_field(hstatus, HSTATUS_SPV, 0);
344 
345         env->hstatus = hstatus;
346 
347         if (prev_virt) {
348             riscv_cpu_swap_hypervisor_regs(env);
349         }
350     }
351 
352     riscv_cpu_set_mode(env, prev_priv, prev_virt);
353 
354     /*
355      * If forward cfi enabled for new priv, restore elp status
356      * and clear spelp in mstatus
357      */
358     if (cpu_get_fcfien(env)) {
359         env->elp = get_field(env->mstatus, MSTATUS_SPELP);
360     }
361     env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, 0);
362 
363     if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
364         riscv_ctr_add_entry(env, env->pc, retpc, CTRDATA_TYPE_EXCEP_INT_RET,
365                             src_priv, src_virt);
366     }
367 
368     return retpc;
369 }
370 
371 static void check_ret_from_m_mode(CPURISCVState *env, target_ulong retpc,
372                                   target_ulong prev_priv,
373                                   uintptr_t ra)
374 {
375     if (!(env->priv >= PRV_M)) {
376         riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, ra);
377     }
378 
379     if (!riscv_cpu_allow_16bit_insn(&env_archcpu(env)->cfg,
380                                     env->priv_ver,
381                                     env->misa_ext) && (retpc & 0x3)) {
382         riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, ra);
383     }
384 
385     if (riscv_cpu_cfg(env)->pmp &&
386         !pmp_get_num_rules(env) && (prev_priv != PRV_M)) {
387         riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, ra);
388     }
389 }
390 static target_ulong ssdbltrp_mxret(CPURISCVState *env, target_ulong mstatus,
391                                    target_ulong prev_priv,
392                                    target_ulong prev_virt)
393 {
394     /* If returning to U, VS or VU, sstatus.sdt = 0 */
395     if (prev_priv == PRV_U || (prev_virt &&
396         (prev_priv == PRV_S || prev_priv == PRV_U))) {
397         mstatus = set_field(mstatus, MSTATUS_SDT, 0);
398         /* If returning to VU, vsstatus.sdt = 0 */
399         if (prev_virt && prev_priv == PRV_U) {
400             env->vsstatus = set_field(env->vsstatus, MSTATUS_SDT, 0);
401         }
402     }
403 
404     return mstatus;
405 }
406 
407 target_ulong helper_mret(CPURISCVState *env)
408 {
409     target_ulong retpc = env->mepc & get_xepc_mask(env);
410     uint64_t mstatus = env->mstatus;
411     target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
412     uintptr_t ra = GETPC();
413 
414     check_ret_from_m_mode(env, retpc, prev_priv, ra);
415 
416     target_ulong prev_virt = get_field(env->mstatus, MSTATUS_MPV) &&
417                              (prev_priv != PRV_M);
418     mstatus = set_field(mstatus, MSTATUS_MIE,
419                         get_field(mstatus, MSTATUS_MPIE));
420     mstatus = set_field(mstatus, MSTATUS_MPIE, 1);
421     mstatus = set_field(mstatus, MSTATUS_MPP,
422                         riscv_has_ext(env, RVU) ? PRV_U : PRV_M);
423     mstatus = set_field(mstatus, MSTATUS_MPV, 0);
424     if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
425         mstatus = ssdbltrp_mxret(env, mstatus, prev_priv, prev_virt);
426     }
427     if (riscv_cpu_cfg(env)->ext_smdbltrp) {
428         mstatus = set_field(mstatus, MSTATUS_MDT, 0);
429     }
430     if ((env->priv_ver >= PRIV_VERSION_1_12_0) && (prev_priv != PRV_M)) {
431         mstatus = set_field(mstatus, MSTATUS_MPRV, 0);
432     }
433     env->mstatus = mstatus;
434 
435     if (riscv_has_ext(env, RVH) && prev_virt) {
436         riscv_cpu_swap_hypervisor_regs(env);
437     }
438 
439     riscv_cpu_set_mode(env, prev_priv, prev_virt);
440     /*
441      * If forward cfi enabled for new priv, restore elp status
442      * and clear mpelp in mstatus
443      */
444     if (cpu_get_fcfien(env)) {
445         env->elp = get_field(env->mstatus, MSTATUS_MPELP);
446     }
447     env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, 0);
448 
449     if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
450         riscv_ctr_add_entry(env, env->pc, retpc, CTRDATA_TYPE_EXCEP_INT_RET,
451                             PRV_M, false);
452     }
453 
454     return retpc;
455 }
456 
457 target_ulong helper_mnret(CPURISCVState *env)
458 {
459     target_ulong retpc = env->mnepc;
460     target_ulong prev_priv = get_field(env->mnstatus, MNSTATUS_MNPP);
461     target_ulong prev_virt;
462     uintptr_t ra = GETPC();
463 
464     check_ret_from_m_mode(env, retpc, prev_priv, ra);
465 
466     prev_virt = get_field(env->mnstatus, MNSTATUS_MNPV) &&
467                 (prev_priv != PRV_M);
468     env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, true);
469 
470     /*
471      * If MNRET changes the privilege mode to a mode
472      * less privileged than M, it also sets mstatus.MPRV to 0.
473      */
474     if (prev_priv < PRV_M) {
475         env->mstatus = set_field(env->mstatus, MSTATUS_MPRV, false);
476     }
477     if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
478         env->mstatus = ssdbltrp_mxret(env, env->mstatus, prev_priv, prev_virt);
479     }
480 
481     if (riscv_cpu_cfg(env)->ext_smdbltrp) {
482         if (prev_priv < PRV_M) {
483             env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 0);
484         }
485     }
486 
487     if (riscv_has_ext(env, RVH) && prev_virt) {
488         riscv_cpu_swap_hypervisor_regs(env);
489     }
490 
491     riscv_cpu_set_mode(env, prev_priv, prev_virt);
492 
493     /*
494      * If forward cfi enabled for new priv, restore elp status
495      * and clear mnpelp in mnstatus
496      */
497     if (cpu_get_fcfien(env)) {
498         env->elp = get_field(env->mnstatus, MNSTATUS_MNPELP);
499     }
500     env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, 0);
501 
502     return retpc;
503 }
504 
505 void helper_ctr_add_entry(CPURISCVState *env, target_ulong src,
506                           target_ulong dest, target_ulong type)
507 {
508     riscv_ctr_add_entry(env, src, dest, (enum CTRType)type,
509                         env->priv, env->virt_enabled);
510 }
511 
512 void helper_ctr_clear(CPURISCVState *env)
513 {
514     /*
515      * It's safe to call smstateen_acc_ok() for umode access regardless of the
516      * state of bit 54 (CTR bit in case of m/hstateen) of sstateen. If the bit
517      * is zero, smstateen_acc_ok() will return the correct exception code and
518      * if it's one, smstateen_acc_ok() will return RISCV_EXCP_NONE. In that
519      * scenario the U-mode check below will handle that case.
520      */
521     RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_CTR);
522     if (ret != RISCV_EXCP_NONE) {
523         riscv_raise_exception(env, ret, GETPC());
524     }
525 
526     if (env->priv == PRV_U) {
527         /*
528          * One corner case is when sctrclr is executed from VU-mode and
529          * mstateen.CTR = 0, in which case we are supposed to raise
530          * RISCV_EXCP_ILLEGAL_INST. This case is already handled in
531          * smstateen_acc_ok().
532          */
533         uint32_t excep = env->virt_enabled ? RISCV_EXCP_VIRT_INSTRUCTION_FAULT :
534             RISCV_EXCP_ILLEGAL_INST;
535         riscv_raise_exception(env, excep, GETPC());
536     }
537 
538     riscv_ctr_clear(env);
539 }
540 
541 void helper_wfi(CPURISCVState *env)
542 {
543     CPUState *cs = env_cpu(env);
544     bool rvs = riscv_has_ext(env, RVS);
545     bool prv_u = env->priv == PRV_U;
546     bool prv_s = env->priv == PRV_S;
547 
548     if (((prv_s || (!rvs && prv_u)) && get_field(env->mstatus, MSTATUS_TW)) ||
549         (rvs && prv_u && !env->virt_enabled)) {
550         riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
551     } else if (env->virt_enabled &&
552                (prv_u || (prv_s && get_field(env->hstatus, HSTATUS_VTW)))) {
553         riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC());
554     } else {
555         cs->halted = 1;
556         cs->exception_index = EXCP_HLT;
557         cpu_loop_exit(cs);
558     }
559 }
560 
561 void helper_wrs_nto(CPURISCVState *env)
562 {
563     if (env->virt_enabled && (env->priv == PRV_S || env->priv == PRV_U) &&
564         get_field(env->hstatus, HSTATUS_VTW) &&
565         !get_field(env->mstatus, MSTATUS_TW)) {
566         riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC());
567     } else if (env->priv != PRV_M && get_field(env->mstatus, MSTATUS_TW)) {
568         riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
569     }
570 }
571 
572 void helper_tlb_flush(CPURISCVState *env)
573 {
574     CPUState *cs = env_cpu(env);
575     if (!env->virt_enabled &&
576         (env->priv == PRV_U ||
577          (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)))) {
578         riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
579     } else if (env->virt_enabled &&
580                (env->priv == PRV_U || get_field(env->hstatus, HSTATUS_VTVM))) {
581         riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC());
582     } else {
583         tlb_flush(cs);
584     }
585 }
586 
587 void helper_tlb_flush_all(CPURISCVState *env)
588 {
589     CPUState *cs = env_cpu(env);
590     tlb_flush_all_cpus_synced(cs);
591 }
592 
593 void helper_hyp_tlb_flush(CPURISCVState *env)
594 {
595     CPUState *cs = env_cpu(env);
596 
597     if (env->virt_enabled) {
598         riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC());
599     }
600 
601     if (env->priv == PRV_M ||
602         (env->priv == PRV_S && !env->virt_enabled)) {
603         tlb_flush(cs);
604         return;
605     }
606 
607     riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
608 }
609 
610 void helper_hyp_gvma_tlb_flush(CPURISCVState *env)
611 {
612     if (env->priv == PRV_S && !env->virt_enabled &&
613         get_field(env->mstatus, MSTATUS_TVM)) {
614         riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
615     }
616 
617     helper_hyp_tlb_flush(env);
618 }
619 
620 static int check_access_hlsv(CPURISCVState *env, bool x, uintptr_t ra)
621 {
622     if (env->priv == PRV_M) {
623         /* always allowed */
624     } else if (env->virt_enabled) {
625         riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, ra);
626     } else if (env->priv == PRV_U && !get_field(env->hstatus, HSTATUS_HU)) {
627         riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, ra);
628     }
629 
630     int mode = get_field(env->hstatus, HSTATUS_SPVP);
631     if (!x && mode == PRV_S && get_field(env->vsstatus, MSTATUS_SUM)) {
632         mode = MMUIdx_S_SUM;
633     }
634     return mode | MMU_2STAGE_BIT;
635 }
636 
637 target_ulong helper_hyp_hlv_bu(CPURISCVState *env, target_ulong addr)
638 {
639     uintptr_t ra = GETPC();
640     int mmu_idx = check_access_hlsv(env, false, ra);
641     MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
642 
643     return cpu_ldb_mmu(env, adjust_addr_virt(env, addr), oi, ra);
644 }
645 
646 target_ulong helper_hyp_hlv_hu(CPURISCVState *env, target_ulong addr)
647 {
648     uintptr_t ra = GETPC();
649     int mmu_idx = check_access_hlsv(env, false, ra);
650     MemOpIdx oi = make_memop_idx(mo_endian_env(env) | MO_UW, mmu_idx);
651 
652     return cpu_ldw_mmu(env, adjust_addr_virt(env, addr), oi, ra);
653 }
654 
655 target_ulong helper_hyp_hlv_wu(CPURISCVState *env, target_ulong addr)
656 {
657     uintptr_t ra = GETPC();
658     int mmu_idx = check_access_hlsv(env, false, ra);
659     MemOpIdx oi = make_memop_idx(mo_endian_env(env) | MO_UL, mmu_idx);
660 
661     return cpu_ldl_mmu(env, adjust_addr_virt(env, addr), oi, ra);
662 }
663 
664 target_ulong helper_hyp_hlv_d(CPURISCVState *env, target_ulong addr)
665 {
666     uintptr_t ra = GETPC();
667     int mmu_idx = check_access_hlsv(env, false, ra);
668     MemOpIdx oi = make_memop_idx(mo_endian_env(env) | MO_UQ, mmu_idx);
669 
670     return cpu_ldq_mmu(env, adjust_addr_virt(env, addr), oi, ra);
671 }
672 
673 void helper_hyp_hsv_b(CPURISCVState *env, target_ulong addr, target_ulong val)
674 {
675     uintptr_t ra = GETPC();
676     int mmu_idx = check_access_hlsv(env, false, ra);
677     MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
678 
679     cpu_stb_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
680 }
681 
682 void helper_hyp_hsv_h(CPURISCVState *env, target_ulong addr, target_ulong val)
683 {
684     uintptr_t ra = GETPC();
685     int mmu_idx = check_access_hlsv(env, false, ra);
686     MemOpIdx oi = make_memop_idx(mo_endian_env(env) | MO_UW, mmu_idx);
687 
688     cpu_stw_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
689 }
690 
691 void helper_hyp_hsv_w(CPURISCVState *env, target_ulong addr, target_ulong val)
692 {
693     uintptr_t ra = GETPC();
694     int mmu_idx = check_access_hlsv(env, false, ra);
695     MemOpIdx oi = make_memop_idx(mo_endian_env(env) | MO_UL, mmu_idx);
696 
697     cpu_stl_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
698 }
699 
700 void helper_hyp_hsv_d(CPURISCVState *env, target_ulong addr, target_ulong val)
701 {
702     uintptr_t ra = GETPC();
703     int mmu_idx = check_access_hlsv(env, false, ra);
704     MemOpIdx oi = make_memop_idx(mo_endian_env(env) | MO_UQ, mmu_idx);
705 
706     cpu_stq_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
707 }
708 
709 /*
710  * TODO: These implementations are not quite correct.  They perform the
711  * access using execute permission just fine, but the final PMP check
712  * is supposed to have read permission as well.  Without replicating
713  * a fair fraction of cputlb.c, fixing this requires adding new mmu_idx
714  * which would imply that exact check in tlb_fill.
715  */
716 target_ulong helper_hyp_hlvx_hu(CPURISCVState *env, target_ulong addr)
717 {
718     uintptr_t ra = GETPC();
719     int mmu_idx = check_access_hlsv(env, true, ra);
720     MemOpIdx oi = make_memop_idx(mo_endian_env(env) | MO_UW, mmu_idx);
721 
722     return cpu_ldw_code_mmu(env, addr, oi, GETPC());
723 }
724 
725 target_ulong helper_hyp_hlvx_wu(CPURISCVState *env, target_ulong addr)
726 {
727     uintptr_t ra = GETPC();
728     int mmu_idx = check_access_hlsv(env, true, ra);
729     MemOpIdx oi = make_memop_idx(mo_endian_env(env) | MO_UL, mmu_idx);
730 
731     return cpu_ldl_code_mmu(env, addr, oi, ra);
732 }
733 
734 void helper_ssamoswap_disabled(CPURISCVState *env)
735 {
736     int exception = RISCV_EXCP_ILLEGAL_INST;
737 
738     /*
739      * Here we follow the RISC-V CFI spec [1] to implement the exception type
740      * of ssamoswap* instruction.
741      *
742      * [1] RISC-V CFI spec v1.0, ch2.7 Atomic Swap from a Shadow Stack Location
743      *
744      * Note: We have already checked some conditions in trans_* functions:
745      *   1. The effective priv mode is not M-mode.
746      *   2. The xSSE specific to the effictive priv mode is disabled.
747      */
748     if (!get_field(env->menvcfg, MENVCFG_SSE)) {
749         /*
750          * Disabled M-mode SSE always trigger illegal instruction when
751          * current priv mode is not M-mode.
752          */
753         exception = RISCV_EXCP_ILLEGAL_INST;
754         goto done;
755     }
756 
757     if (!riscv_has_ext(env, RVS)) {
758         /* S-mode is not implemented */
759         exception = RISCV_EXCP_ILLEGAL_INST;
760         goto done;
761     } else if (env->virt_enabled) {
762         /*
763          * VU/VS-mode with disabled xSSE will trigger the virtual instruction
764          * exception.
765          */
766         exception = RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
767         goto done;
768     } else {
769         /*
770          * U-mode with disabled S-mode SSE will trigger the illegal instruction
771          * exception.
772          *
773          * Note: S-mode is already handled in the disabled M-mode SSE case.
774          */
775         exception = RISCV_EXCP_ILLEGAL_INST;
776         goto done;
777     }
778 
779 done:
780     riscv_raise_exception(env, exception, GETPC());
781 }
782 
783 #endif /* !CONFIG_USER_ONLY */
784