1 /*
2 * RISC-V CPU helpers for qemu.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "pmu.h"
26 #include "exec/cputlb.h"
27 #include "exec/exec-all.h"
28 #include "exec/page-protection.h"
29 #include "instmap.h"
30 #include "tcg/tcg-op.h"
31 #include "accel/tcg/cpu-ops.h"
32 #include "trace.h"
33 #include "semihosting/common-semi.h"
34 #include "system/cpu-timers.h"
35 #include "cpu_bits.h"
36 #include "debug.h"
37 #include "pmp.h"
38
riscv_env_mmu_index(CPURISCVState * env,bool ifetch)39 int riscv_env_mmu_index(CPURISCVState *env, bool ifetch)
40 {
41 #ifdef CONFIG_USER_ONLY
42 return 0;
43 #else
44 bool virt = env->virt_enabled;
45 int mode = env->priv;
46
47 /* All priv -> mmu_idx mapping are here */
48 if (!ifetch) {
49 uint64_t status = env->mstatus;
50
51 if (mode == PRV_M && get_field(status, MSTATUS_MPRV)) {
52 mode = get_field(env->mstatus, MSTATUS_MPP);
53 virt = get_field(env->mstatus, MSTATUS_MPV) &&
54 (mode != PRV_M);
55 if (virt) {
56 status = env->vsstatus;
57 }
58 }
59 if (mode == PRV_S && get_field(status, MSTATUS_SUM)) {
60 mode = MMUIdx_S_SUM;
61 }
62 }
63
64 return mode | (virt ? MMU_2STAGE_BIT : 0);
65 #endif
66 }
67
cpu_get_fcfien(CPURISCVState * env)68 bool cpu_get_fcfien(CPURISCVState *env)
69 {
70 /* no cfi extension, return false */
71 if (!env_archcpu(env)->cfg.ext_zicfilp) {
72 return false;
73 }
74
75 switch (env->priv) {
76 case PRV_U:
77 if (riscv_has_ext(env, RVS)) {
78 return env->senvcfg & SENVCFG_LPE;
79 }
80 return env->menvcfg & MENVCFG_LPE;
81 #ifndef CONFIG_USER_ONLY
82 case PRV_S:
83 if (env->virt_enabled) {
84 return env->henvcfg & HENVCFG_LPE;
85 }
86 return env->menvcfg & MENVCFG_LPE;
87 case PRV_M:
88 return env->mseccfg & MSECCFG_MLPE;
89 #endif
90 default:
91 g_assert_not_reached();
92 }
93 }
94
cpu_get_bcfien(CPURISCVState * env)95 bool cpu_get_bcfien(CPURISCVState *env)
96 {
97 /* no cfi extension, return false */
98 if (!env_archcpu(env)->cfg.ext_zicfiss) {
99 return false;
100 }
101
102 switch (env->priv) {
103 case PRV_U:
104 /*
105 * If S is not implemented then shadow stack for U can't be turned on
106 * It is checked in `riscv_cpu_validate_set_extensions`, so no need to
107 * check here or assert here
108 */
109 return env->senvcfg & SENVCFG_SSE;
110 #ifndef CONFIG_USER_ONLY
111 case PRV_S:
112 if (env->virt_enabled) {
113 return env->henvcfg & HENVCFG_SSE;
114 }
115 return env->menvcfg & MENVCFG_SSE;
116 case PRV_M: /* M-mode shadow stack is always off */
117 return false;
118 #endif
119 default:
120 g_assert_not_reached();
121 }
122 }
123
riscv_env_smode_dbltrp_enabled(CPURISCVState * env,bool virt)124 bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt)
125 {
126 #ifdef CONFIG_USER_ONLY
127 return false;
128 #else
129 if (virt) {
130 return (env->henvcfg & HENVCFG_DTE) != 0;
131 } else {
132 return (env->menvcfg & MENVCFG_DTE) != 0;
133 }
134 #endif
135 }
136
cpu_get_tb_cpu_state(CPURISCVState * env,vaddr * pc,uint64_t * cs_base,uint32_t * pflags)137 void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
138 uint64_t *cs_base, uint32_t *pflags)
139 {
140 RISCVCPU *cpu = env_archcpu(env);
141 RISCVExtStatus fs, vs;
142 uint32_t flags = 0;
143 bool pm_signext = riscv_cpu_virt_mem_enabled(env);
144
145 *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
146 *cs_base = 0;
147
148 if (cpu->cfg.ext_zve32x) {
149 /*
150 * If env->vl equals to VLMAX, we can use generic vector operation
151 * expanders (GVEC) to accerlate the vector operations.
152 * However, as LMUL could be a fractional number. The maximum
153 * vector size can be operated might be less than 8 bytes,
154 * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
155 * only when maxsz >= 8 bytes.
156 */
157
158 /* lmul encoded as in DisasContext::lmul */
159 int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3);
160 uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW);
161 uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
162 uint32_t maxsz = vlmax << vsew;
163 bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
164 (maxsz >= 8);
165 flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
166 flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew);
167 flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
168 FIELD_EX64(env->vtype, VTYPE, VLMUL));
169 flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
170 flags = FIELD_DP32(flags, TB_FLAGS, VTA,
171 FIELD_EX64(env->vtype, VTYPE, VTA));
172 flags = FIELD_DP32(flags, TB_FLAGS, VMA,
173 FIELD_EX64(env->vtype, VTYPE, VMA));
174 flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0);
175 } else {
176 flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
177 }
178
179 if (cpu_get_fcfien(env)) {
180 /*
181 * For Forward CFI, only the expectation of a lpad at
182 * the start of the block is tracked via env->elp. env->elp
183 * is turned on during jalr translation.
184 */
185 flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED, env->elp);
186 flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1);
187 }
188
189 if (cpu_get_bcfien(env)) {
190 flags = FIELD_DP32(flags, TB_FLAGS, BCFI_ENABLED, 1);
191 }
192
193 #ifdef CONFIG_USER_ONLY
194 fs = EXT_STATUS_DIRTY;
195 vs = EXT_STATUS_DIRTY;
196 #else
197 flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv);
198
199 flags |= riscv_env_mmu_index(env, 0);
200 fs = get_field(env->mstatus, MSTATUS_FS);
201 vs = get_field(env->mstatus, MSTATUS_VS);
202
203 if (env->virt_enabled) {
204 flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1);
205 /*
206 * Merge DISABLED and !DIRTY states using MIN.
207 * We will set both fields when dirtying.
208 */
209 fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS));
210 vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS));
211 }
212
213 /* With Zfinx, floating point is enabled/disabled by Smstateen. */
214 if (!riscv_has_ext(env, RVF)) {
215 fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE)
216 ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED;
217 }
218
219 if (cpu->cfg.debug && !icount_enabled()) {
220 flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
221 }
222 #endif
223
224 flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
225 flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
226 flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
227 flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
228 flags = FIELD_DP32(flags, TB_FLAGS, PM_PMM, riscv_pm_get_pmm(env));
229 flags = FIELD_DP32(flags, TB_FLAGS, PM_SIGNEXTEND, pm_signext);
230
231 *pflags = flags;
232 }
233
riscv_pm_get_pmm(CPURISCVState * env)234 RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env)
235 {
236 #ifndef CONFIG_USER_ONLY
237 int priv_mode = cpu_address_mode(env);
238
239 if (get_field(env->mstatus, MSTATUS_MPRV) &&
240 get_field(env->mstatus, MSTATUS_MXR)) {
241 return PMM_FIELD_DISABLED;
242 }
243
244 /* Get current PMM field */
245 switch (priv_mode) {
246 case PRV_M:
247 if (riscv_cpu_cfg(env)->ext_smmpm) {
248 return get_field(env->mseccfg, MSECCFG_PMM);
249 }
250 break;
251 case PRV_S:
252 if (riscv_cpu_cfg(env)->ext_smnpm) {
253 if (get_field(env->mstatus, MSTATUS_MPV)) {
254 return get_field(env->henvcfg, HENVCFG_PMM);
255 } else {
256 return get_field(env->menvcfg, MENVCFG_PMM);
257 }
258 }
259 break;
260 case PRV_U:
261 if (riscv_has_ext(env, RVS)) {
262 if (riscv_cpu_cfg(env)->ext_ssnpm) {
263 return get_field(env->senvcfg, SENVCFG_PMM);
264 }
265 } else {
266 if (riscv_cpu_cfg(env)->ext_smnpm) {
267 return get_field(env->menvcfg, MENVCFG_PMM);
268 }
269 }
270 break;
271 default:
272 g_assert_not_reached();
273 }
274 return PMM_FIELD_DISABLED;
275 #else
276 return PMM_FIELD_DISABLED;
277 #endif
278 }
279
riscv_pm_get_virt_pmm(CPURISCVState * env)280 RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env)
281 {
282 #ifndef CONFIG_USER_ONLY
283 int priv_mode = cpu_address_mode(env);
284
285 if (priv_mode == PRV_U) {
286 return get_field(env->hstatus, HSTATUS_HUPMM);
287 } else {
288 if (get_field(env->hstatus, HSTATUS_SPVP)) {
289 return get_field(env->henvcfg, HENVCFG_PMM);
290 } else {
291 return get_field(env->senvcfg, SENVCFG_PMM);
292 }
293 }
294 #else
295 return PMM_FIELD_DISABLED;
296 #endif
297 }
298
riscv_cpu_virt_mem_enabled(CPURISCVState * env)299 bool riscv_cpu_virt_mem_enabled(CPURISCVState *env)
300 {
301 #ifndef CONFIG_USER_ONLY
302 int satp_mode = 0;
303 int priv_mode = cpu_address_mode(env);
304
305 if (riscv_cpu_mxl(env) == MXL_RV32) {
306 satp_mode = get_field(env->satp, SATP32_MODE);
307 } else {
308 satp_mode = get_field(env->satp, SATP64_MODE);
309 }
310
311 return ((satp_mode != VM_1_10_MBARE) && (priv_mode != PRV_M));
312 #else
313 return false;
314 #endif
315 }
316
riscv_pm_get_pmlen(RISCVPmPmm pmm)317 uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm)
318 {
319 switch (pmm) {
320 case PMM_FIELD_DISABLED:
321 return 0;
322 case PMM_FIELD_PMLEN7:
323 return 7;
324 case PMM_FIELD_PMLEN16:
325 return 16;
326 default:
327 g_assert_not_reached();
328 }
329 }
330
331 #ifndef CONFIG_USER_ONLY
332
333 /*
334 * The HS-mode is allowed to configure priority only for the
335 * following VS-mode local interrupts:
336 *
337 * 0 (Reserved interrupt, reads as zero)
338 * 1 Supervisor software interrupt
339 * 4 (Reserved interrupt, reads as zero)
340 * 5 Supervisor timer interrupt
341 * 8 (Reserved interrupt, reads as zero)
342 * 13 (Reserved interrupt)
343 * 14 "
344 * 15 "
345 * 16 "
346 * 17 "
347 * 18 "
348 * 19 "
349 * 20 "
350 * 21 "
351 * 22 "
352 * 23 "
353 */
354
355 static const int hviprio_index2irq[] = {
356 0, 1, 4, 5, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 };
357 static const int hviprio_index2rdzero[] = {
358 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
359
riscv_cpu_hviprio_index2irq(int index,int * out_irq,int * out_rdzero)360 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero)
361 {
362 if (index < 0 || ARRAY_SIZE(hviprio_index2irq) <= index) {
363 return -EINVAL;
364 }
365
366 if (out_irq) {
367 *out_irq = hviprio_index2irq[index];
368 }
369
370 if (out_rdzero) {
371 *out_rdzero = hviprio_index2rdzero[index];
372 }
373
374 return 0;
375 }
376
377 /*
378 * Default priorities of local interrupts are defined in the
379 * RISC-V Advanced Interrupt Architecture specification.
380 *
381 * ----------------------------------------------------------------
382 * Default |
383 * Priority | Major Interrupt Numbers
384 * ----------------------------------------------------------------
385 * Highest | 47, 23, 46, 45, 22, 44,
386 * | 43, 21, 42, 41, 20, 40
387 * |
388 * | 11 (0b), 3 (03), 7 (07)
389 * | 9 (09), 1 (01), 5 (05)
390 * | 12 (0c)
391 * | 10 (0a), 2 (02), 6 (06)
392 * |
393 * | 39, 19, 38, 37, 18, 36,
394 * Lowest | 35, 17, 34, 33, 16, 32
395 * ----------------------------------------------------------------
396 */
397 static const uint8_t default_iprio[64] = {
398 /* Custom interrupts 48 to 63 */
399 [63] = IPRIO_MMAXIPRIO,
400 [62] = IPRIO_MMAXIPRIO,
401 [61] = IPRIO_MMAXIPRIO,
402 [60] = IPRIO_MMAXIPRIO,
403 [59] = IPRIO_MMAXIPRIO,
404 [58] = IPRIO_MMAXIPRIO,
405 [57] = IPRIO_MMAXIPRIO,
406 [56] = IPRIO_MMAXIPRIO,
407 [55] = IPRIO_MMAXIPRIO,
408 [54] = IPRIO_MMAXIPRIO,
409 [53] = IPRIO_MMAXIPRIO,
410 [52] = IPRIO_MMAXIPRIO,
411 [51] = IPRIO_MMAXIPRIO,
412 [50] = IPRIO_MMAXIPRIO,
413 [49] = IPRIO_MMAXIPRIO,
414 [48] = IPRIO_MMAXIPRIO,
415
416 /* Custom interrupts 24 to 31 */
417 [31] = IPRIO_MMAXIPRIO,
418 [30] = IPRIO_MMAXIPRIO,
419 [29] = IPRIO_MMAXIPRIO,
420 [28] = IPRIO_MMAXIPRIO,
421 [27] = IPRIO_MMAXIPRIO,
422 [26] = IPRIO_MMAXIPRIO,
423 [25] = IPRIO_MMAXIPRIO,
424 [24] = IPRIO_MMAXIPRIO,
425
426 [47] = IPRIO_DEFAULT_UPPER,
427 [23] = IPRIO_DEFAULT_UPPER + 1,
428 [46] = IPRIO_DEFAULT_UPPER + 2,
429 [45] = IPRIO_DEFAULT_UPPER + 3,
430 [22] = IPRIO_DEFAULT_UPPER + 4,
431 [44] = IPRIO_DEFAULT_UPPER + 5,
432
433 [43] = IPRIO_DEFAULT_UPPER + 6,
434 [21] = IPRIO_DEFAULT_UPPER + 7,
435 [42] = IPRIO_DEFAULT_UPPER + 8,
436 [41] = IPRIO_DEFAULT_UPPER + 9,
437 [20] = IPRIO_DEFAULT_UPPER + 10,
438 [40] = IPRIO_DEFAULT_UPPER + 11,
439
440 [11] = IPRIO_DEFAULT_M,
441 [3] = IPRIO_DEFAULT_M + 1,
442 [7] = IPRIO_DEFAULT_M + 2,
443
444 [9] = IPRIO_DEFAULT_S,
445 [1] = IPRIO_DEFAULT_S + 1,
446 [5] = IPRIO_DEFAULT_S + 2,
447
448 [12] = IPRIO_DEFAULT_SGEXT,
449
450 [10] = IPRIO_DEFAULT_VS,
451 [2] = IPRIO_DEFAULT_VS + 1,
452 [6] = IPRIO_DEFAULT_VS + 2,
453
454 [39] = IPRIO_DEFAULT_LOWER,
455 [19] = IPRIO_DEFAULT_LOWER + 1,
456 [38] = IPRIO_DEFAULT_LOWER + 2,
457 [37] = IPRIO_DEFAULT_LOWER + 3,
458 [18] = IPRIO_DEFAULT_LOWER + 4,
459 [36] = IPRIO_DEFAULT_LOWER + 5,
460
461 [35] = IPRIO_DEFAULT_LOWER + 6,
462 [17] = IPRIO_DEFAULT_LOWER + 7,
463 [34] = IPRIO_DEFAULT_LOWER + 8,
464 [33] = IPRIO_DEFAULT_LOWER + 9,
465 [16] = IPRIO_DEFAULT_LOWER + 10,
466 [32] = IPRIO_DEFAULT_LOWER + 11,
467 };
468
riscv_cpu_default_priority(int irq)469 uint8_t riscv_cpu_default_priority(int irq)
470 {
471 if (irq < 0 || irq > 63) {
472 return IPRIO_MMAXIPRIO;
473 }
474
475 return default_iprio[irq] ? default_iprio[irq] : IPRIO_MMAXIPRIO;
476 };
477
riscv_cpu_pending_to_irq(CPURISCVState * env,int extirq,unsigned int extirq_def_prio,uint64_t pending,uint8_t * iprio)478 static int riscv_cpu_pending_to_irq(CPURISCVState *env,
479 int extirq, unsigned int extirq_def_prio,
480 uint64_t pending, uint8_t *iprio)
481 {
482 int irq, best_irq = RISCV_EXCP_NONE;
483 unsigned int prio, best_prio = UINT_MAX;
484
485 if (!pending) {
486 return RISCV_EXCP_NONE;
487 }
488
489 irq = ctz64(pending);
490 if (!((extirq == IRQ_M_EXT) ? riscv_cpu_cfg(env)->ext_smaia :
491 riscv_cpu_cfg(env)->ext_ssaia)) {
492 return irq;
493 }
494
495 pending = pending >> irq;
496 while (pending) {
497 prio = iprio[irq];
498 if (!prio) {
499 if (irq == extirq) {
500 prio = extirq_def_prio;
501 } else {
502 prio = (riscv_cpu_default_priority(irq) < extirq_def_prio) ?
503 1 : IPRIO_MMAXIPRIO;
504 }
505 }
506 if ((pending & 0x1) && (prio <= best_prio)) {
507 best_irq = irq;
508 best_prio = prio;
509 }
510 irq++;
511 pending = pending >> 1;
512 }
513
514 return best_irq;
515 }
516
517 /*
518 * Doesn't report interrupts inserted using mvip from M-mode firmware or
519 * using hvip bits 13:63 from HS-mode. Those are returned in
520 * riscv_cpu_sirq_pending() and riscv_cpu_vsirq_pending().
521 */
riscv_cpu_all_pending(CPURISCVState * env)522 uint64_t riscv_cpu_all_pending(CPURISCVState *env)
523 {
524 uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN);
525 uint64_t vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
526 uint64_t vstip = (env->vstime_irq) ? MIP_VSTIP : 0;
527
528 return (env->mip | vsgein | vstip) & env->mie;
529 }
530
riscv_cpu_mirq_pending(CPURISCVState * env)531 int riscv_cpu_mirq_pending(CPURISCVState *env)
532 {
533 uint64_t irqs = riscv_cpu_all_pending(env) & ~env->mideleg &
534 ~(MIP_SGEIP | MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
535
536 return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
537 irqs, env->miprio);
538 }
539
riscv_cpu_sirq_pending(CPURISCVState * env)540 int riscv_cpu_sirq_pending(CPURISCVState *env)
541 {
542 uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
543 ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
544 uint64_t irqs_f = env->mvip & env->mvien & ~env->mideleg & env->sie;
545
546 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
547 irqs | irqs_f, env->siprio);
548 }
549
riscv_cpu_vsirq_pending(CPURISCVState * env)550 int riscv_cpu_vsirq_pending(CPURISCVState *env)
551 {
552 uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & env->hideleg;
553 uint64_t irqs_f_vs = env->hvip & env->hvien & ~env->hideleg & env->vsie;
554 uint64_t vsbits;
555
556 /* Bring VS-level bits to correct position */
557 vsbits = irqs & VS_MODE_INTERRUPTS;
558 irqs &= ~VS_MODE_INTERRUPTS;
559 irqs |= vsbits >> 1;
560
561 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
562 (irqs | irqs_f_vs), env->hviprio);
563 }
564
riscv_cpu_local_irq_pending(CPURISCVState * env)565 static int riscv_cpu_local_irq_pending(CPURISCVState *env)
566 {
567 uint64_t irqs, pending, mie, hsie, vsie, irqs_f, irqs_f_vs;
568 uint64_t vsbits, irq_delegated;
569 int virq;
570
571 /* Priority: RNMI > Other interrupt. */
572 if (riscv_cpu_cfg(env)->ext_smrnmi) {
573 /* If mnstatus.NMIE == 0, all interrupts are disabled. */
574 if (!get_field(env->mnstatus, MNSTATUS_NMIE)) {
575 return RISCV_EXCP_NONE;
576 }
577
578 if (env->rnmip) {
579 return ctz64(env->rnmip); /* since non-zero */
580 }
581 }
582
583 /* Determine interrupt enable state of all privilege modes */
584 if (env->virt_enabled) {
585 mie = 1;
586 hsie = 1;
587 vsie = (env->priv < PRV_S) ||
588 (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
589 } else {
590 mie = (env->priv < PRV_M) ||
591 (env->priv == PRV_M && get_field(env->mstatus, MSTATUS_MIE));
592 hsie = (env->priv < PRV_S) ||
593 (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
594 vsie = 0;
595 }
596
597 /* Determine all pending interrupts */
598 pending = riscv_cpu_all_pending(env);
599
600 /* Check M-mode interrupts */
601 irqs = pending & ~env->mideleg & -mie;
602 if (irqs) {
603 return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
604 irqs, env->miprio);
605 }
606
607 /* Check for virtual S-mode interrupts. */
608 irqs_f = env->mvip & (env->mvien & ~env->mideleg) & env->sie;
609
610 /* Check HS-mode interrupts */
611 irqs = ((pending & env->mideleg & ~env->hideleg) | irqs_f) & -hsie;
612 if (irqs) {
613 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
614 irqs, env->siprio);
615 }
616
617 /* Check for virtual VS-mode interrupts. */
618 irqs_f_vs = env->hvip & env->hvien & ~env->hideleg & env->vsie;
619
620 /* Check VS-mode interrupts */
621 irq_delegated = pending & env->mideleg & env->hideleg;
622
623 /* Bring VS-level bits to correct position */
624 vsbits = irq_delegated & VS_MODE_INTERRUPTS;
625 irq_delegated &= ~VS_MODE_INTERRUPTS;
626 irq_delegated |= vsbits >> 1;
627
628 irqs = (irq_delegated | irqs_f_vs) & -vsie;
629 if (irqs) {
630 virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
631 irqs, env->hviprio);
632 if (virq <= 0 || (virq > 12 && virq <= 63)) {
633 return virq;
634 } else {
635 return virq + 1;
636 }
637 }
638
639 /* Indicate no pending interrupt */
640 return RISCV_EXCP_NONE;
641 }
642
riscv_cpu_exec_interrupt(CPUState * cs,int interrupt_request)643 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
644 {
645 uint32_t mask = CPU_INTERRUPT_HARD | CPU_INTERRUPT_RNMI;
646
647 if (interrupt_request & mask) {
648 RISCVCPU *cpu = RISCV_CPU(cs);
649 CPURISCVState *env = &cpu->env;
650 int interruptno = riscv_cpu_local_irq_pending(env);
651 if (interruptno >= 0) {
652 cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
653 riscv_cpu_do_interrupt(cs);
654 return true;
655 }
656 }
657 return false;
658 }
659
660 /* Return true is floating point support is currently enabled */
riscv_cpu_fp_enabled(CPURISCVState * env)661 bool riscv_cpu_fp_enabled(CPURISCVState *env)
662 {
663 if (env->mstatus & MSTATUS_FS) {
664 if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_FS)) {
665 return false;
666 }
667 return true;
668 }
669
670 return false;
671 }
672
673 /* Return true is vector support is currently enabled */
riscv_cpu_vector_enabled(CPURISCVState * env)674 bool riscv_cpu_vector_enabled(CPURISCVState *env)
675 {
676 if (env->mstatus & MSTATUS_VS) {
677 if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_VS)) {
678 return false;
679 }
680 return true;
681 }
682
683 return false;
684 }
685
riscv_cpu_swap_hypervisor_regs(CPURISCVState * env)686 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
687 {
688 uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM |
689 MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
690 MSTATUS64_UXL | MSTATUS_VS;
691
692 if (riscv_has_ext(env, RVF)) {
693 mstatus_mask |= MSTATUS_FS;
694 }
695 bool current_virt = env->virt_enabled;
696
697 /*
698 * If zicfilp extension available and henvcfg.LPE = 1,
699 * then apply SPELP mask on mstatus
700 */
701 if (env_archcpu(env)->cfg.ext_zicfilp &&
702 get_field(env->henvcfg, HENVCFG_LPE)) {
703 mstatus_mask |= SSTATUS_SPELP;
704 }
705
706 g_assert(riscv_has_ext(env, RVH));
707
708 if (riscv_env_smode_dbltrp_enabled(env, current_virt)) {
709 mstatus_mask |= MSTATUS_SDT;
710 }
711
712 if (current_virt) {
713 /* Current V=1 and we are about to change to V=0 */
714 env->vsstatus = env->mstatus & mstatus_mask;
715 env->mstatus &= ~mstatus_mask;
716 env->mstatus |= env->mstatus_hs;
717
718 env->vstvec = env->stvec;
719 env->stvec = env->stvec_hs;
720
721 env->vsscratch = env->sscratch;
722 env->sscratch = env->sscratch_hs;
723
724 env->vsepc = env->sepc;
725 env->sepc = env->sepc_hs;
726
727 env->vscause = env->scause;
728 env->scause = env->scause_hs;
729
730 env->vstval = env->stval;
731 env->stval = env->stval_hs;
732
733 env->vsatp = env->satp;
734 env->satp = env->satp_hs;
735 } else {
736 /* Current V=0 and we are about to change to V=1 */
737 env->mstatus_hs = env->mstatus & mstatus_mask;
738 env->mstatus &= ~mstatus_mask;
739 env->mstatus |= env->vsstatus;
740
741 env->stvec_hs = env->stvec;
742 env->stvec = env->vstvec;
743
744 env->sscratch_hs = env->sscratch;
745 env->sscratch = env->vsscratch;
746
747 env->sepc_hs = env->sepc;
748 env->sepc = env->vsepc;
749
750 env->scause_hs = env->scause;
751 env->scause = env->vscause;
752
753 env->stval_hs = env->stval;
754 env->stval = env->vstval;
755
756 env->satp_hs = env->satp;
757 env->satp = env->vsatp;
758 }
759 }
760
riscv_cpu_get_geilen(CPURISCVState * env)761 target_ulong riscv_cpu_get_geilen(CPURISCVState *env)
762 {
763 if (!riscv_has_ext(env, RVH)) {
764 return 0;
765 }
766
767 return env->geilen;
768 }
769
riscv_cpu_set_geilen(CPURISCVState * env,target_ulong geilen)770 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
771 {
772 if (!riscv_has_ext(env, RVH)) {
773 return;
774 }
775
776 if (geilen > (TARGET_LONG_BITS - 1)) {
777 return;
778 }
779
780 env->geilen = geilen;
781 }
782
riscv_cpu_set_rnmi(RISCVCPU * cpu,uint32_t irq,bool level)783 void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level)
784 {
785 CPURISCVState *env = &cpu->env;
786 CPUState *cs = CPU(cpu);
787 bool release_lock = false;
788
789 if (!bql_locked()) {
790 release_lock = true;
791 bql_lock();
792 }
793
794 if (level) {
795 env->rnmip |= 1 << irq;
796 cpu_interrupt(cs, CPU_INTERRUPT_RNMI);
797 } else {
798 env->rnmip &= ~(1 << irq);
799 cpu_reset_interrupt(cs, CPU_INTERRUPT_RNMI);
800 }
801
802 if (release_lock) {
803 bql_unlock();
804 }
805 }
806
riscv_cpu_claim_interrupts(RISCVCPU * cpu,uint64_t interrupts)807 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
808 {
809 CPURISCVState *env = &cpu->env;
810 if (env->miclaim & interrupts) {
811 return -1;
812 } else {
813 env->miclaim |= interrupts;
814 return 0;
815 }
816 }
817
riscv_cpu_interrupt(CPURISCVState * env)818 void riscv_cpu_interrupt(CPURISCVState *env)
819 {
820 uint64_t gein, vsgein = 0, vstip = 0, irqf = 0;
821 CPUState *cs = env_cpu(env);
822
823 BQL_LOCK_GUARD();
824
825 if (env->virt_enabled) {
826 gein = get_field(env->hstatus, HSTATUS_VGEIN);
827 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
828 irqf = env->hvien & env->hvip & env->vsie;
829 } else {
830 irqf = env->mvien & env->mvip & env->sie;
831 }
832
833 vstip = env->vstime_irq ? MIP_VSTIP : 0;
834
835 if (env->mip | vsgein | vstip | irqf) {
836 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
837 } else {
838 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
839 }
840 }
841
riscv_cpu_update_mip(CPURISCVState * env,uint64_t mask,uint64_t value)842 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, uint64_t value)
843 {
844 uint64_t old = env->mip;
845
846 /* No need to update mip for VSTIP */
847 mask = ((mask == MIP_VSTIP) && env->vstime_irq) ? 0 : mask;
848
849 BQL_LOCK_GUARD();
850
851 env->mip = (env->mip & ~mask) | (value & mask);
852
853 riscv_cpu_interrupt(env);
854
855 return old;
856 }
857
riscv_cpu_set_rdtime_fn(CPURISCVState * env,uint64_t (* fn)(void *),void * arg)858 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
859 void *arg)
860 {
861 env->rdtime_fn = fn;
862 env->rdtime_fn_arg = arg;
863 }
864
riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState * env,uint32_t priv,int (* rmw_fn)(void * arg,target_ulong reg,target_ulong * val,target_ulong new_val,target_ulong write_mask),void * rmw_fn_arg)865 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
866 int (*rmw_fn)(void *arg,
867 target_ulong reg,
868 target_ulong *val,
869 target_ulong new_val,
870 target_ulong write_mask),
871 void *rmw_fn_arg)
872 {
873 if (priv <= PRV_M) {
874 env->aia_ireg_rmw_fn[priv] = rmw_fn;
875 env->aia_ireg_rmw_fn_arg[priv] = rmw_fn_arg;
876 }
877 }
878
riscv_ctr_freeze(CPURISCVState * env,uint64_t freeze_mask,bool virt)879 static void riscv_ctr_freeze(CPURISCVState *env, uint64_t freeze_mask,
880 bool virt)
881 {
882 uint64_t ctl = virt ? env->vsctrctl : env->mctrctl;
883
884 assert((freeze_mask & (~(XCTRCTL_BPFRZ | XCTRCTL_LCOFIFRZ))) == 0);
885
886 if (ctl & freeze_mask) {
887 env->sctrstatus |= SCTRSTATUS_FROZEN;
888 }
889 }
890
riscv_ctr_clear(CPURISCVState * env)891 void riscv_ctr_clear(CPURISCVState *env)
892 {
893 memset(env->ctr_src, 0x0, sizeof(env->ctr_src));
894 memset(env->ctr_dst, 0x0, sizeof(env->ctr_dst));
895 memset(env->ctr_data, 0x0, sizeof(env->ctr_data));
896 }
897
riscv_ctr_priv_to_mask(target_ulong priv,bool virt)898 static uint64_t riscv_ctr_priv_to_mask(target_ulong priv, bool virt)
899 {
900 switch (priv) {
901 case PRV_M:
902 return MCTRCTL_M;
903 case PRV_S:
904 if (virt) {
905 return XCTRCTL_S;
906 }
907 return XCTRCTL_S;
908 case PRV_U:
909 if (virt) {
910 return XCTRCTL_U;
911 }
912 return XCTRCTL_U;
913 }
914
915 g_assert_not_reached();
916 }
917
riscv_ctr_get_control(CPURISCVState * env,target_long priv,bool virt)918 static uint64_t riscv_ctr_get_control(CPURISCVState *env, target_long priv,
919 bool virt)
920 {
921 switch (priv) {
922 case PRV_M:
923 return env->mctrctl;
924 case PRV_S:
925 case PRV_U:
926 if (virt) {
927 return env->vsctrctl;
928 }
929 return env->mctrctl;
930 }
931
932 g_assert_not_reached();
933 }
934
935 /*
936 * This function assumes that src privilege and target privilege are not same
937 * and src privilege is less than target privilege. This includes the virtual
938 * state as well.
939 */
riscv_ctr_check_xte(CPURISCVState * env,target_long src_prv,bool src_virt)940 static bool riscv_ctr_check_xte(CPURISCVState *env, target_long src_prv,
941 bool src_virt)
942 {
943 target_long tgt_prv = env->priv;
944 bool res = true;
945
946 /*
947 * VS and U mode are same in terms of xTE bits required to record an
948 * external trap. See 6.1.2. External Traps, table 8 External Trap Enable
949 * Requirements. This changes VS to U to simplify the logic a bit.
950 */
951 if (src_virt && src_prv == PRV_S) {
952 src_prv = PRV_U;
953 } else if (env->virt_enabled && tgt_prv == PRV_S) {
954 tgt_prv = PRV_U;
955 }
956
957 /* VU mode is an outlier here. */
958 if (src_virt && src_prv == PRV_U) {
959 res &= !!(env->vsctrctl & XCTRCTL_STE);
960 }
961
962 switch (src_prv) {
963 case PRV_U:
964 if (tgt_prv == PRV_U) {
965 break;
966 }
967 res &= !!(env->mctrctl & XCTRCTL_STE);
968 /* fall-through */
969 case PRV_S:
970 if (tgt_prv == PRV_S) {
971 break;
972 }
973 res &= !!(env->mctrctl & MCTRCTL_MTE);
974 /* fall-through */
975 case PRV_M:
976 break;
977 }
978
979 return res;
980 }
981
982 /*
983 * Special cases for traps and trap returns:
984 *
985 * 1- Traps, and trap returns, between enabled modes are recorded as normal.
986 * 2- Traps from an inhibited mode to an enabled mode, and trap returns from an
987 * enabled mode back to an inhibited mode, are partially recorded. In such
988 * cases, the PC from the inhibited mode (source PC for traps, and target PC
989 * for trap returns) is 0.
990 *
991 * 3- Trap returns from an inhibited mode to an enabled mode are not recorded.
992 * Traps from an enabled mode to an inhibited mode, known as external traps,
993 * receive special handling.
994 * By default external traps are not recorded, but a handshake mechanism exists
995 * to allow partial recording. Software running in the target mode of the trap
996 * can opt-in to allowing CTR to record traps into that mode even when the mode
997 * is inhibited. The MTE, STE, and VSTE bits allow M-mode, S-mode, and VS-mode,
998 * respectively, to opt-in. When an External Trap occurs, and xTE=1, such that
999 * x is the target privilege mode of the trap, will CTR record the trap. In such
1000 * cases, the target PC is 0.
1001 */
1002 /*
1003 * CTR arrays are implemented as circular buffers and new entry is stored at
1004 * sctrstatus.WRPTR, but they are presented to software as moving circular
1005 * buffers. Which means, software get's the illusion that whenever a new entry
1006 * is added the whole buffer is moved by one place and the new entry is added at
1007 * the start keeping new entry at idx 0 and older ones follow.
1008 *
1009 * Depth = 16.
1010 *
1011 * buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
1012 * WRPTR W
1013 * entry 7 6 5 4 3 2 1 0 F E D C B A 9 8
1014 *
1015 * When a new entry is added:
1016 * buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
1017 * WRPTR W
1018 * entry 8 7 6 5 4 3 2 1 0 F E D C B A 9
1019 *
1020 * entry here denotes the logical entry number that software can access
1021 * using ctrsource, ctrtarget and ctrdata registers. So xiselect 0x200
1022 * will return entry 0 i-e buffer[8] and 0x201 will return entry 1 i-e
1023 * buffer[7]. Here is how we convert entry to buffer idx.
1024 *
1025 * entry = isel - CTR_ENTRIES_FIRST;
1026 * idx = (sctrstatus.WRPTR - entry - 1) & (depth - 1);
1027 */
riscv_ctr_add_entry(CPURISCVState * env,target_long src,target_long dst,enum CTRType type,target_ulong src_priv,bool src_virt)1028 void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
1029 enum CTRType type, target_ulong src_priv, bool src_virt)
1030 {
1031 bool tgt_virt = env->virt_enabled;
1032 uint64_t src_mask = riscv_ctr_priv_to_mask(src_priv, src_virt);
1033 uint64_t tgt_mask = riscv_ctr_priv_to_mask(env->priv, tgt_virt);
1034 uint64_t src_ctrl = riscv_ctr_get_control(env, src_priv, src_virt);
1035 uint64_t tgt_ctrl = riscv_ctr_get_control(env, env->priv, tgt_virt);
1036 uint64_t depth, head;
1037 bool ext_trap = false;
1038
1039 /*
1040 * Return immediately if both target and src recording is disabled or if
1041 * CTR is in frozen state.
1042 */
1043 if ((!(src_ctrl & src_mask) && !(tgt_ctrl & tgt_mask)) ||
1044 env->sctrstatus & SCTRSTATUS_FROZEN) {
1045 return;
1046 }
1047
1048 /*
1049 * With RAS Emul enabled, only allow Indirect, direct calls, Function
1050 * returns and Co-routine swap types.
1051 */
1052 if (tgt_ctrl & XCTRCTL_RASEMU &&
1053 type != CTRDATA_TYPE_INDIRECT_CALL &&
1054 type != CTRDATA_TYPE_DIRECT_CALL &&
1055 type != CTRDATA_TYPE_RETURN &&
1056 type != CTRDATA_TYPE_CO_ROUTINE_SWAP) {
1057 return;
1058 }
1059
1060 if (type == CTRDATA_TYPE_EXCEPTION || type == CTRDATA_TYPE_INTERRUPT) {
1061 /* Case 2 for traps. */
1062 if (!(src_ctrl & src_mask)) {
1063 src = 0;
1064 } else if (!(tgt_ctrl & tgt_mask)) {
1065 /* Check if target priv-mode has allowed external trap recording. */
1066 if (!riscv_ctr_check_xte(env, src_priv, src_virt)) {
1067 return;
1068 }
1069
1070 ext_trap = true;
1071 dst = 0;
1072 }
1073 } else if (type == CTRDATA_TYPE_EXCEP_INT_RET) {
1074 /*
1075 * Case 3 for trap returns. Trap returns from inhibited mode are not
1076 * recorded.
1077 */
1078 if (!(src_ctrl & src_mask)) {
1079 return;
1080 }
1081
1082 /* Case 2 for trap returns. */
1083 if (!(tgt_ctrl & tgt_mask)) {
1084 dst = 0;
1085 }
1086 }
1087
1088 /* Ignore filters in case of RASEMU mode or External trap. */
1089 if (!(tgt_ctrl & XCTRCTL_RASEMU) && !ext_trap) {
1090 /*
1091 * Check if the specific type is inhibited. Not taken branch filter is
1092 * an enable bit and needs to be checked separatly.
1093 */
1094 bool check = tgt_ctrl & BIT_ULL(type + XCTRCTL_INH_START);
1095 if ((type == CTRDATA_TYPE_NONTAKEN_BRANCH && !check) ||
1096 (type != CTRDATA_TYPE_NONTAKEN_BRANCH && check)) {
1097 return;
1098 }
1099 }
1100
1101 head = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
1102
1103 depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
1104 if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_RETURN) {
1105 head = (head - 1) & (depth - 1);
1106
1107 env->ctr_src[head] &= ~CTRSOURCE_VALID;
1108 env->sctrstatus =
1109 set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
1110 return;
1111 }
1112
1113 /* In case of Co-routine SWAP we overwrite latest entry. */
1114 if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_CO_ROUTINE_SWAP) {
1115 head = (head - 1) & (depth - 1);
1116 }
1117
1118 env->ctr_src[head] = src | CTRSOURCE_VALID;
1119 env->ctr_dst[head] = dst & ~CTRTARGET_MISP;
1120 env->ctr_data[head] = set_field(0, CTRDATA_TYPE_MASK, type);
1121
1122 head = (head + 1) & (depth - 1);
1123
1124 env->sctrstatus = set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
1125 }
1126
riscv_cpu_set_mode(CPURISCVState * env,target_ulong newpriv,bool virt_en)1127 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
1128 {
1129 g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
1130
1131 if (newpriv != env->priv || env->virt_enabled != virt_en) {
1132 if (icount_enabled()) {
1133 riscv_itrigger_update_priv(env);
1134 }
1135
1136 riscv_pmu_update_fixed_ctrs(env, newpriv, virt_en);
1137 }
1138
1139 /* tlb_flush is unnecessary as mode is contained in mmu_idx */
1140 env->priv = newpriv;
1141 env->xl = cpu_recompute_xl(env);
1142
1143 /*
1144 * Clear the load reservation - otherwise a reservation placed in one
1145 * context/process can be used by another, resulting in an SC succeeding
1146 * incorrectly. Version 2.2 of the ISA specification explicitly requires
1147 * this behaviour, while later revisions say that the kernel "should" use
1148 * an SC instruction to force the yielding of a load reservation on a
1149 * preemptive context switch. As a result, do both.
1150 */
1151 env->load_res = -1;
1152
1153 if (riscv_has_ext(env, RVH)) {
1154 /* Flush the TLB on all virt mode changes. */
1155 if (env->virt_enabled != virt_en) {
1156 tlb_flush(env_cpu(env));
1157 }
1158
1159 env->virt_enabled = virt_en;
1160 if (virt_en) {
1161 /*
1162 * The guest external interrupts from an interrupt controller are
1163 * delivered only when the Guest/VM is running (i.e. V=1). This
1164 * means any guest external interrupt which is triggered while the
1165 * Guest/VM is not running (i.e. V=0) will be missed on QEMU
1166 * resulting in guest with sluggish response to serial console
1167 * input and other I/O events.
1168 *
1169 * To solve this, we check and inject interrupt after setting V=1.
1170 */
1171 riscv_cpu_update_mip(env, 0, 0);
1172 }
1173 }
1174 }
1175
1176 /*
1177 * get_physical_address_pmp - check PMP permission for this physical address
1178 *
1179 * Match the PMP region and check permission for this physical address and it's
1180 * TLB page. Returns 0 if the permission checking was successful
1181 *
1182 * @env: CPURISCVState
1183 * @prot: The returned protection attributes
1184 * @addr: The physical address to be checked permission
1185 * @access_type: The type of MMU access
1186 * @mode: Indicates current privilege level.
1187 */
get_physical_address_pmp(CPURISCVState * env,int * prot,hwaddr addr,int size,MMUAccessType access_type,int mode)1188 static int get_physical_address_pmp(CPURISCVState *env, int *prot, hwaddr addr,
1189 int size, MMUAccessType access_type,
1190 int mode)
1191 {
1192 pmp_priv_t pmp_priv;
1193 bool pmp_has_privs;
1194
1195 if (!riscv_cpu_cfg(env)->pmp) {
1196 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1197 return TRANSLATE_SUCCESS;
1198 }
1199
1200 pmp_has_privs = pmp_hart_has_privs(env, addr, size, 1 << access_type,
1201 &pmp_priv, mode);
1202 if (!pmp_has_privs) {
1203 *prot = 0;
1204 return TRANSLATE_PMP_FAIL;
1205 }
1206
1207 *prot = pmp_priv_to_page_prot(pmp_priv);
1208
1209 return TRANSLATE_SUCCESS;
1210 }
1211
1212 /* Returns 'true' if a svukte address check is needed */
do_svukte_check(CPURISCVState * env,bool first_stage,int mode,bool virt)1213 static bool do_svukte_check(CPURISCVState *env, bool first_stage,
1214 int mode, bool virt)
1215 {
1216 /* Svukte extension depends on Sv39. */
1217 if (!(env_archcpu(env)->cfg.ext_svukte ||
1218 !first_stage ||
1219 VM_1_10_SV39 != get_field(env->satp, SATP64_MODE))) {
1220 return false;
1221 }
1222
1223 /*
1224 * Check hstatus.HUKTE if the effective mode is switched to VU-mode by
1225 * executing HLV/HLVX/HSV in U-mode.
1226 * For other cases, check senvcfg.UKTE.
1227 */
1228 if (env->priv == PRV_U && !env->virt_enabled && virt) {
1229 if (!get_field(env->hstatus, HSTATUS_HUKTE)) {
1230 return false;
1231 }
1232 } else if (!get_field(env->senvcfg, SENVCFG_UKTE)) {
1233 return false;
1234 }
1235
1236 /*
1237 * Svukte extension is qualified only in U or VU-mode.
1238 *
1239 * Effective mode can be switched to U or VU-mode by:
1240 * - M-mode + mstatus.MPRV=1 + mstatus.MPP=U-mode.
1241 * - Execute HLV/HLVX/HSV from HS-mode + hstatus.SPVP=0.
1242 * - U-mode.
1243 * - VU-mode.
1244 * - Execute HLV/HLVX/HSV from U-mode + hstatus.HU=1.
1245 */
1246 if (mode != PRV_U) {
1247 return false;
1248 }
1249
1250 return true;
1251 }
1252
check_svukte_addr(CPURISCVState * env,vaddr addr)1253 static bool check_svukte_addr(CPURISCVState *env, vaddr addr)
1254 {
1255 /* svukte extension excludes RV32 */
1256 uint32_t sxlen = 32 * riscv_cpu_sxl(env);
1257 uint64_t high_bit = addr & (1UL << (sxlen - 1));
1258 return !high_bit;
1259 }
1260
1261 /*
1262 * get_physical_address - get the physical address for this virtual address
1263 *
1264 * Do a page table walk to obtain the physical address corresponding to a
1265 * virtual address. Returns 0 if the translation was successful
1266 *
1267 * Adapted from Spike's mmu_t::translate and mmu_t::walk
1268 *
1269 * @env: CPURISCVState
1270 * @physical: This will be set to the calculated physical address
1271 * @prot: The returned protection attributes
1272 * @addr: The virtual address or guest physical address to be translated
1273 * @fault_pte_addr: If not NULL, this will be set to fault pte address
1274 * when a error occurs on pte address translation.
1275 * This will already be shifted to match htval.
1276 * @access_type: The type of MMU access
1277 * @mmu_idx: Indicates current privilege level
1278 * @first_stage: Are we in first stage translation?
1279 * Second stage is used for hypervisor guest translation
1280 * @two_stage: Are we going to perform two stage translation
1281 * @is_debug: Is this access from a debugger or the monitor?
1282 */
get_physical_address(CPURISCVState * env,hwaddr * physical,int * ret_prot,vaddr addr,target_ulong * fault_pte_addr,int access_type,int mmu_idx,bool first_stage,bool two_stage,bool is_debug,bool is_probe)1283 static int get_physical_address(CPURISCVState *env, hwaddr *physical,
1284 int *ret_prot, vaddr addr,
1285 target_ulong *fault_pte_addr,
1286 int access_type, int mmu_idx,
1287 bool first_stage, bool two_stage,
1288 bool is_debug, bool is_probe)
1289 {
1290 /*
1291 * NOTE: the env->pc value visible here will not be
1292 * correct, but the value visible to the exception handler
1293 * (riscv_cpu_do_interrupt) is correct
1294 */
1295 MemTxResult res;
1296 MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
1297 int mode = mmuidx_priv(mmu_idx);
1298 bool virt = mmuidx_2stage(mmu_idx);
1299 bool use_background = false;
1300 hwaddr ppn;
1301 int napot_bits = 0;
1302 target_ulong napot_mask;
1303 bool is_sstack_idx = ((mmu_idx & MMU_IDX_SS_WRITE) == MMU_IDX_SS_WRITE);
1304 bool sstack_page = false;
1305
1306 if (do_svukte_check(env, first_stage, mode, virt) &&
1307 !check_svukte_addr(env, addr)) {
1308 return TRANSLATE_FAIL;
1309 }
1310
1311 /*
1312 * Check if we should use the background registers for the two
1313 * stage translation. We don't need to check if we actually need
1314 * two stage translation as that happened before this function
1315 * was called. Background registers will be used if the guest has
1316 * forced a two stage translation to be on (in HS or M mode).
1317 */
1318 if (!env->virt_enabled && two_stage) {
1319 use_background = true;
1320 }
1321
1322 if (mode == PRV_M || !riscv_cpu_cfg(env)->mmu) {
1323 *physical = addr;
1324 *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1325 return TRANSLATE_SUCCESS;
1326 }
1327
1328 *ret_prot = 0;
1329
1330 hwaddr base;
1331 int levels, ptidxbits, ptesize, vm, widened;
1332
1333 if (first_stage == true) {
1334 if (use_background) {
1335 if (riscv_cpu_mxl(env) == MXL_RV32) {
1336 base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT;
1337 vm = get_field(env->vsatp, SATP32_MODE);
1338 } else {
1339 base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT;
1340 vm = get_field(env->vsatp, SATP64_MODE);
1341 }
1342 } else {
1343 if (riscv_cpu_mxl(env) == MXL_RV32) {
1344 base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT;
1345 vm = get_field(env->satp, SATP32_MODE);
1346 } else {
1347 base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT;
1348 vm = get_field(env->satp, SATP64_MODE);
1349 }
1350 }
1351 widened = 0;
1352 } else {
1353 if (riscv_cpu_mxl(env) == MXL_RV32) {
1354 base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT;
1355 vm = get_field(env->hgatp, SATP32_MODE);
1356 } else {
1357 base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT;
1358 vm = get_field(env->hgatp, SATP64_MODE);
1359 }
1360 widened = 2;
1361 }
1362
1363 switch (vm) {
1364 case VM_1_10_SV32:
1365 levels = 2; ptidxbits = 10; ptesize = 4; break;
1366 case VM_1_10_SV39:
1367 levels = 3; ptidxbits = 9; ptesize = 8; break;
1368 case VM_1_10_SV48:
1369 levels = 4; ptidxbits = 9; ptesize = 8; break;
1370 case VM_1_10_SV57:
1371 levels = 5; ptidxbits = 9; ptesize = 8; break;
1372 case VM_1_10_MBARE:
1373 *physical = addr;
1374 *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1375 return TRANSLATE_SUCCESS;
1376 default:
1377 g_assert_not_reached();
1378 }
1379
1380 CPUState *cs = env_cpu(env);
1381 int va_bits = PGSHIFT + levels * ptidxbits + widened;
1382 int sxlen = 16 << riscv_cpu_sxl(env);
1383 int sxlen_bytes = sxlen / 8;
1384
1385 if (first_stage == true) {
1386 target_ulong mask, masked_msbs;
1387
1388 if (sxlen > (va_bits - 1)) {
1389 mask = (1L << (sxlen - (va_bits - 1))) - 1;
1390 } else {
1391 mask = 0;
1392 }
1393 masked_msbs = (addr >> (va_bits - 1)) & mask;
1394
1395 if (masked_msbs != 0 && masked_msbs != mask) {
1396 return TRANSLATE_FAIL;
1397 }
1398 } else {
1399 if (vm != VM_1_10_SV32 && addr >> va_bits != 0) {
1400 return TRANSLATE_FAIL;
1401 }
1402 }
1403
1404 bool pbmte = env->menvcfg & MENVCFG_PBMTE;
1405 bool svade = riscv_cpu_cfg(env)->ext_svade;
1406 bool svadu = riscv_cpu_cfg(env)->ext_svadu;
1407 bool adue = svadu ? env->menvcfg & MENVCFG_ADUE : !svade;
1408
1409 if (first_stage && two_stage && env->virt_enabled) {
1410 pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE);
1411 adue = adue && (env->henvcfg & HENVCFG_ADUE);
1412 }
1413
1414 int ptshift = (levels - 1) * ptidxbits;
1415 target_ulong pte;
1416 hwaddr pte_addr;
1417 int i;
1418
1419 restart:
1420 for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
1421 target_ulong idx;
1422 if (i == 0) {
1423 idx = (addr >> (PGSHIFT + ptshift)) &
1424 ((1 << (ptidxbits + widened)) - 1);
1425 } else {
1426 idx = (addr >> (PGSHIFT + ptshift)) &
1427 ((1 << ptidxbits) - 1);
1428 }
1429
1430 /* check that physical address of PTE is legal */
1431
1432 if (two_stage && first_stage) {
1433 int vbase_prot;
1434 hwaddr vbase;
1435
1436 /* Do the second stage translation on the base PTE address. */
1437 int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
1438 base, NULL, MMU_DATA_LOAD,
1439 MMUIdx_U, false, true,
1440 is_debug, false);
1441
1442 if (vbase_ret != TRANSLATE_SUCCESS) {
1443 if (fault_pte_addr) {
1444 *fault_pte_addr = (base + idx * ptesize) >> 2;
1445 }
1446 return TRANSLATE_G_STAGE_FAIL;
1447 }
1448
1449 pte_addr = vbase + idx * ptesize;
1450 } else {
1451 pte_addr = base + idx * ptesize;
1452 }
1453
1454 int pmp_prot;
1455 int pmp_ret = get_physical_address_pmp(env, &pmp_prot, pte_addr,
1456 sxlen_bytes,
1457 MMU_DATA_LOAD, PRV_S);
1458 if (pmp_ret != TRANSLATE_SUCCESS) {
1459 return TRANSLATE_PMP_FAIL;
1460 }
1461
1462 if (riscv_cpu_mxl(env) == MXL_RV32) {
1463 pte = address_space_ldl(cs->as, pte_addr, attrs, &res);
1464 } else {
1465 pte = address_space_ldq(cs->as, pte_addr, attrs, &res);
1466 }
1467
1468 if (res != MEMTX_OK) {
1469 return TRANSLATE_FAIL;
1470 }
1471
1472 if (riscv_cpu_sxl(env) == MXL_RV32) {
1473 ppn = pte >> PTE_PPN_SHIFT;
1474 } else {
1475 if (pte & PTE_RESERVED) {
1476 qemu_log_mask(LOG_GUEST_ERROR, "%s: reserved bits set in PTE: "
1477 "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1478 __func__, pte_addr, pte);
1479 return TRANSLATE_FAIL;
1480 }
1481
1482 if (!pbmte && (pte & PTE_PBMT)) {
1483 /* Reserved without Svpbmt. */
1484 qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
1485 "and Svpbmt extension is disabled: "
1486 "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1487 __func__, pte_addr, pte);
1488 return TRANSLATE_FAIL;
1489 }
1490
1491 if (!riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
1492 /* Reserved without Svnapot extension */
1493 qemu_log_mask(LOG_GUEST_ERROR, "%s: N bit set in PTE, "
1494 "and Svnapot extension is disabled: "
1495 "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1496 __func__, pte_addr, pte);
1497 return TRANSLATE_FAIL;
1498 }
1499
1500 ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT;
1501 }
1502
1503 if (!(pte & PTE_V)) {
1504 /* Invalid PTE */
1505 return TRANSLATE_FAIL;
1506 }
1507
1508 if (pte & (PTE_R | PTE_W | PTE_X)) {
1509 goto leaf;
1510 }
1511
1512 if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
1513 /* D, A, and U bits are reserved in non-leaf/inner PTEs */
1514 qemu_log_mask(LOG_GUEST_ERROR, "%s: D, A, or U bits set in non-leaf PTE: "
1515 "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1516 __func__, pte_addr, pte);
1517 return TRANSLATE_FAIL;
1518 }
1519 /* Inner PTE, continue walking */
1520 base = ppn << PGSHIFT;
1521 }
1522
1523 /* No leaf pte at any translation level. */
1524 return TRANSLATE_FAIL;
1525
1526 leaf:
1527 if (ppn & ((1ULL << ptshift) - 1)) {
1528 /* Misaligned PPN */
1529 qemu_log_mask(LOG_GUEST_ERROR, "%s: PPN bits in PTE is misaligned: "
1530 "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1531 __func__, pte_addr, pte);
1532 return TRANSLATE_FAIL;
1533 }
1534 if (!pbmte && (pte & PTE_PBMT)) {
1535 /* Reserved without Svpbmt. */
1536 qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
1537 "and Svpbmt extension is disabled: "
1538 "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1539 __func__, pte_addr, pte);
1540 return TRANSLATE_FAIL;
1541 }
1542
1543 target_ulong rwx = pte & (PTE_R | PTE_W | PTE_X);
1544 /* Check for reserved combinations of RWX flags. */
1545 switch (rwx) {
1546 case PTE_W | PTE_X:
1547 return TRANSLATE_FAIL;
1548 case PTE_W:
1549 /* if bcfi enabled, PTE_W is not reserved and shadow stack page */
1550 if (cpu_get_bcfien(env) && first_stage) {
1551 sstack_page = true;
1552 /*
1553 * if ss index, read and write allowed. else if not a probe
1554 * then only read allowed
1555 */
1556 rwx = is_sstack_idx ? (PTE_R | PTE_W) : (is_probe ? 0 : PTE_R);
1557 break;
1558 }
1559 return TRANSLATE_FAIL;
1560 case PTE_R:
1561 /*
1562 * no matter what's the `access_type`, shadow stack access to readonly
1563 * memory are always store page faults. During unwind, loads will be
1564 * promoted as store fault.
1565 */
1566 if (is_sstack_idx) {
1567 return TRANSLATE_FAIL;
1568 }
1569 break;
1570 }
1571
1572 int prot = 0;
1573 if (rwx & PTE_R) {
1574 prot |= PAGE_READ;
1575 }
1576 if (rwx & PTE_W) {
1577 prot |= PAGE_WRITE;
1578 }
1579 if (rwx & PTE_X) {
1580 bool mxr = false;
1581
1582 /*
1583 * Use mstatus for first stage or for the second stage without
1584 * virt_enabled (MPRV+MPV)
1585 */
1586 if (first_stage || !env->virt_enabled) {
1587 mxr = get_field(env->mstatus, MSTATUS_MXR);
1588 }
1589
1590 /* MPRV+MPV case, check VSSTATUS */
1591 if (first_stage && two_stage && !env->virt_enabled) {
1592 mxr |= get_field(env->vsstatus, MSTATUS_MXR);
1593 }
1594
1595 /*
1596 * Setting MXR at HS-level overrides both VS-stage and G-stage
1597 * execute-only permissions
1598 */
1599 if (env->virt_enabled) {
1600 mxr |= get_field(env->mstatus_hs, MSTATUS_MXR);
1601 }
1602
1603 if (mxr) {
1604 prot |= PAGE_READ;
1605 }
1606 prot |= PAGE_EXEC;
1607 }
1608
1609 if (pte & PTE_U) {
1610 if (mode != PRV_U) {
1611 if (!mmuidx_sum(mmu_idx)) {
1612 return TRANSLATE_FAIL;
1613 }
1614 /* SUM allows only read+write, not execute. */
1615 prot &= PAGE_READ | PAGE_WRITE;
1616 }
1617 } else if (mode != PRV_S) {
1618 /* Supervisor PTE flags when not S mode */
1619 return TRANSLATE_FAIL;
1620 }
1621
1622 if (!((prot >> access_type) & 1)) {
1623 /*
1624 * Access check failed, access check failures for shadow stack are
1625 * access faults.
1626 */
1627 return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL;
1628 }
1629
1630 target_ulong updated_pte = pte;
1631
1632 /*
1633 * If ADUE is enabled, set accessed and dirty bits.
1634 * Otherwise raise an exception if necessary.
1635 */
1636 if (adue) {
1637 updated_pte |= PTE_A | (access_type == MMU_DATA_STORE ? PTE_D : 0);
1638 } else if (!(pte & PTE_A) ||
1639 (access_type == MMU_DATA_STORE && !(pte & PTE_D))) {
1640 return TRANSLATE_FAIL;
1641 }
1642
1643 /* Page table updates need to be atomic with MTTCG enabled */
1644 if (updated_pte != pte && !is_debug) {
1645 if (!adue) {
1646 return TRANSLATE_FAIL;
1647 }
1648
1649 /*
1650 * - if accessed or dirty bits need updating, and the PTE is
1651 * in RAM, then we do so atomically with a compare and swap.
1652 * - if the PTE is in IO space or ROM, then it can't be updated
1653 * and we return TRANSLATE_FAIL.
1654 * - if the PTE changed by the time we went to update it, then
1655 * it is no longer valid and we must re-walk the page table.
1656 */
1657 MemoryRegion *mr;
1658 hwaddr l = sxlen_bytes, addr1;
1659 mr = address_space_translate(cs->as, pte_addr, &addr1, &l,
1660 false, MEMTXATTRS_UNSPECIFIED);
1661 if (memory_region_is_ram(mr)) {
1662 target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1);
1663 target_ulong old_pte;
1664 if (riscv_cpu_sxl(env) == MXL_RV32) {
1665 old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, cpu_to_le32(pte), cpu_to_le32(updated_pte));
1666 old_pte = le32_to_cpu(old_pte);
1667 } else {
1668 old_pte = qatomic_cmpxchg(pte_pa, cpu_to_le64(pte), cpu_to_le64(updated_pte));
1669 old_pte = le64_to_cpu(old_pte);
1670 }
1671 if (old_pte != pte) {
1672 goto restart;
1673 }
1674 pte = updated_pte;
1675 } else {
1676 /*
1677 * Misconfigured PTE in ROM (AD bits are not preset) or
1678 * PTE is in IO space and can't be updated atomically.
1679 */
1680 return TRANSLATE_FAIL;
1681 }
1682 }
1683
1684 /* For superpage mappings, make a fake leaf PTE for the TLB's benefit. */
1685 target_ulong vpn = addr >> PGSHIFT;
1686
1687 if (riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
1688 napot_bits = ctzl(ppn) + 1;
1689 if ((i != (levels - 1)) || (napot_bits != 4)) {
1690 return TRANSLATE_FAIL;
1691 }
1692 }
1693
1694 napot_mask = (1 << napot_bits) - 1;
1695 *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) |
1696 (vpn & (((target_ulong)1 << ptshift) - 1))
1697 ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK);
1698
1699 /*
1700 * Remove write permission unless this is a store, or the page is
1701 * already dirty, so that we TLB miss on later writes to update
1702 * the dirty bit.
1703 */
1704 if (access_type != MMU_DATA_STORE && !(pte & PTE_D)) {
1705 prot &= ~PAGE_WRITE;
1706 }
1707 *ret_prot = prot;
1708
1709 return TRANSLATE_SUCCESS;
1710 }
1711
raise_mmu_exception(CPURISCVState * env,target_ulong address,MMUAccessType access_type,bool pmp_violation,bool first_stage,bool two_stage,bool two_stage_indirect)1712 static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
1713 MMUAccessType access_type, bool pmp_violation,
1714 bool first_stage, bool two_stage,
1715 bool two_stage_indirect)
1716 {
1717 CPUState *cs = env_cpu(env);
1718
1719 switch (access_type) {
1720 case MMU_INST_FETCH:
1721 if (pmp_violation) {
1722 cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
1723 } else if (env->virt_enabled && !first_stage) {
1724 cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT;
1725 } else {
1726 cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT;
1727 }
1728 break;
1729 case MMU_DATA_LOAD:
1730 if (pmp_violation) {
1731 cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1732 } else if (two_stage && !first_stage) {
1733 cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
1734 } else {
1735 cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT;
1736 }
1737 break;
1738 case MMU_DATA_STORE:
1739 if (pmp_violation) {
1740 cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1741 } else if (two_stage && !first_stage) {
1742 cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
1743 } else {
1744 cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT;
1745 }
1746 break;
1747 default:
1748 g_assert_not_reached();
1749 }
1750 env->badaddr = address;
1751 env->two_stage_lookup = two_stage;
1752 env->two_stage_indirect_lookup = two_stage_indirect;
1753 }
1754
riscv_cpu_get_phys_page_debug(CPUState * cs,vaddr addr)1755 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1756 {
1757 RISCVCPU *cpu = RISCV_CPU(cs);
1758 CPURISCVState *env = &cpu->env;
1759 hwaddr phys_addr;
1760 int prot;
1761 int mmu_idx = riscv_env_mmu_index(&cpu->env, false);
1762
1763 if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
1764 true, env->virt_enabled, true, false)) {
1765 return -1;
1766 }
1767
1768 if (env->virt_enabled) {
1769 if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
1770 0, MMUIdx_U, false, true, true, false)) {
1771 return -1;
1772 }
1773 }
1774
1775 return phys_addr & TARGET_PAGE_MASK;
1776 }
1777
riscv_cpu_do_transaction_failed(CPUState * cs,hwaddr physaddr,vaddr addr,unsigned size,MMUAccessType access_type,int mmu_idx,MemTxAttrs attrs,MemTxResult response,uintptr_t retaddr)1778 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
1779 vaddr addr, unsigned size,
1780 MMUAccessType access_type,
1781 int mmu_idx, MemTxAttrs attrs,
1782 MemTxResult response, uintptr_t retaddr)
1783 {
1784 RISCVCPU *cpu = RISCV_CPU(cs);
1785 CPURISCVState *env = &cpu->env;
1786
1787 if (access_type == MMU_DATA_STORE) {
1788 cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1789 } else if (access_type == MMU_DATA_LOAD) {
1790 cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1791 } else {
1792 cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
1793 }
1794
1795 env->badaddr = addr;
1796 env->two_stage_lookup = mmuidx_2stage(mmu_idx);
1797 env->two_stage_indirect_lookup = false;
1798 cpu_loop_exit_restore(cs, retaddr);
1799 }
1800
riscv_cpu_do_unaligned_access(CPUState * cs,vaddr addr,MMUAccessType access_type,int mmu_idx,uintptr_t retaddr)1801 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
1802 MMUAccessType access_type, int mmu_idx,
1803 uintptr_t retaddr)
1804 {
1805 RISCVCPU *cpu = RISCV_CPU(cs);
1806 CPURISCVState *env = &cpu->env;
1807 switch (access_type) {
1808 case MMU_INST_FETCH:
1809 cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
1810 break;
1811 case MMU_DATA_LOAD:
1812 cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
1813 /* shadow stack mis aligned accesses are access faults */
1814 if (mmu_idx & MMU_IDX_SS_WRITE) {
1815 cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1816 }
1817 break;
1818 case MMU_DATA_STORE:
1819 cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
1820 /* shadow stack mis aligned accesses are access faults */
1821 if (mmu_idx & MMU_IDX_SS_WRITE) {
1822 cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1823 }
1824 break;
1825 default:
1826 g_assert_not_reached();
1827 }
1828 env->badaddr = addr;
1829 env->two_stage_lookup = mmuidx_2stage(mmu_idx);
1830 env->two_stage_indirect_lookup = false;
1831 cpu_loop_exit_restore(cs, retaddr);
1832 }
1833
1834
pmu_tlb_fill_incr_ctr(RISCVCPU * cpu,MMUAccessType access_type)1835 static void pmu_tlb_fill_incr_ctr(RISCVCPU *cpu, MMUAccessType access_type)
1836 {
1837 enum riscv_pmu_event_idx pmu_event_type;
1838
1839 switch (access_type) {
1840 case MMU_INST_FETCH:
1841 pmu_event_type = RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS;
1842 break;
1843 case MMU_DATA_LOAD:
1844 pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS;
1845 break;
1846 case MMU_DATA_STORE:
1847 pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS;
1848 break;
1849 default:
1850 return;
1851 }
1852
1853 riscv_pmu_incr_ctr(cpu, pmu_event_type);
1854 }
1855
riscv_cpu_tlb_fill(CPUState * cs,vaddr address,int size,MMUAccessType access_type,int mmu_idx,bool probe,uintptr_t retaddr)1856 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
1857 MMUAccessType access_type, int mmu_idx,
1858 bool probe, uintptr_t retaddr)
1859 {
1860 RISCVCPU *cpu = RISCV_CPU(cs);
1861 CPURISCVState *env = &cpu->env;
1862 vaddr im_address;
1863 hwaddr pa = 0;
1864 int prot, prot2, prot_pmp;
1865 bool pmp_violation = false;
1866 bool first_stage_error = true;
1867 bool two_stage_lookup = mmuidx_2stage(mmu_idx);
1868 bool two_stage_indirect_error = false;
1869 int ret = TRANSLATE_FAIL;
1870 int mode = mmuidx_priv(mmu_idx);
1871 /* default TLB page size */
1872 hwaddr tlb_size = TARGET_PAGE_SIZE;
1873
1874 env->guest_phys_fault_addr = 0;
1875
1876 qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
1877 __func__, address, access_type, mmu_idx);
1878
1879 pmu_tlb_fill_incr_ctr(cpu, access_type);
1880 if (two_stage_lookup) {
1881 /* Two stage lookup */
1882 ret = get_physical_address(env, &pa, &prot, address,
1883 &env->guest_phys_fault_addr, access_type,
1884 mmu_idx, true, true, false, probe);
1885
1886 /*
1887 * A G-stage exception may be triggered during two state lookup.
1888 * And the env->guest_phys_fault_addr has already been set in
1889 * get_physical_address().
1890 */
1891 if (ret == TRANSLATE_G_STAGE_FAIL) {
1892 first_stage_error = false;
1893 two_stage_indirect_error = true;
1894 }
1895
1896 qemu_log_mask(CPU_LOG_MMU,
1897 "%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
1898 HWADDR_FMT_plx " prot %d\n",
1899 __func__, address, ret, pa, prot);
1900
1901 if (ret == TRANSLATE_SUCCESS) {
1902 /* Second stage lookup */
1903 im_address = pa;
1904
1905 ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
1906 access_type, MMUIdx_U, false, true,
1907 false, probe);
1908
1909 qemu_log_mask(CPU_LOG_MMU,
1910 "%s 2nd-stage address=%" VADDR_PRIx
1911 " ret %d physical "
1912 HWADDR_FMT_plx " prot %d\n",
1913 __func__, im_address, ret, pa, prot2);
1914
1915 prot &= prot2;
1916
1917 if (ret == TRANSLATE_SUCCESS) {
1918 ret = get_physical_address_pmp(env, &prot_pmp, pa,
1919 size, access_type, mode);
1920 tlb_size = pmp_get_tlb_size(env, pa);
1921
1922 qemu_log_mask(CPU_LOG_MMU,
1923 "%s PMP address=" HWADDR_FMT_plx " ret %d prot"
1924 " %d tlb_size %" HWADDR_PRIu "\n",
1925 __func__, pa, ret, prot_pmp, tlb_size);
1926
1927 prot &= prot_pmp;
1928 } else {
1929 /*
1930 * Guest physical address translation failed, this is a HS
1931 * level exception
1932 */
1933 first_stage_error = false;
1934 if (ret != TRANSLATE_PMP_FAIL) {
1935 env->guest_phys_fault_addr = (im_address |
1936 (address &
1937 (TARGET_PAGE_SIZE - 1))) >> 2;
1938 }
1939 }
1940 }
1941 } else {
1942 /* Single stage lookup */
1943 ret = get_physical_address(env, &pa, &prot, address, NULL,
1944 access_type, mmu_idx, true, false, false,
1945 probe);
1946
1947 qemu_log_mask(CPU_LOG_MMU,
1948 "%s address=%" VADDR_PRIx " ret %d physical "
1949 HWADDR_FMT_plx " prot %d\n",
1950 __func__, address, ret, pa, prot);
1951
1952 if (ret == TRANSLATE_SUCCESS) {
1953 ret = get_physical_address_pmp(env, &prot_pmp, pa,
1954 size, access_type, mode);
1955 tlb_size = pmp_get_tlb_size(env, pa);
1956
1957 qemu_log_mask(CPU_LOG_MMU,
1958 "%s PMP address=" HWADDR_FMT_plx " ret %d prot"
1959 " %d tlb_size %" HWADDR_PRIu "\n",
1960 __func__, pa, ret, prot_pmp, tlb_size);
1961
1962 prot &= prot_pmp;
1963 }
1964 }
1965
1966 if (ret == TRANSLATE_PMP_FAIL) {
1967 pmp_violation = true;
1968 }
1969
1970 if (ret == TRANSLATE_SUCCESS) {
1971 tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
1972 prot, mmu_idx, tlb_size);
1973 return true;
1974 } else if (probe) {
1975 return false;
1976 } else {
1977 int wp_access = 0;
1978
1979 if (access_type == MMU_DATA_LOAD) {
1980 wp_access |= BP_MEM_READ;
1981 } else if (access_type == MMU_DATA_STORE) {
1982 wp_access |= BP_MEM_WRITE;
1983 }
1984
1985 /*
1986 * If a watchpoint isn't found for 'addr' this will
1987 * be a no-op and we'll resume the mmu_exception path.
1988 * Otherwise we'll throw a debug exception and execution
1989 * will continue elsewhere.
1990 */
1991 cpu_check_watchpoint(cs, address, size, MEMTXATTRS_UNSPECIFIED,
1992 wp_access, retaddr);
1993
1994 raise_mmu_exception(env, address, access_type, pmp_violation,
1995 first_stage_error, two_stage_lookup,
1996 two_stage_indirect_error);
1997 cpu_loop_exit_restore(cs, retaddr);
1998 }
1999
2000 return true;
2001 }
2002
riscv_transformed_insn(CPURISCVState * env,target_ulong insn,target_ulong taddr)2003 static target_ulong riscv_transformed_insn(CPURISCVState *env,
2004 target_ulong insn,
2005 target_ulong taddr)
2006 {
2007 target_ulong xinsn = 0;
2008 target_ulong access_rs1 = 0, access_imm = 0, access_size = 0;
2009
2010 /*
2011 * Only Quadrant 0 and Quadrant 2 of RVC instruction space need to
2012 * be uncompressed. The Quadrant 1 of RVC instruction space need
2013 * not be transformed because these instructions won't generate
2014 * any load/store trap.
2015 */
2016
2017 if ((insn & 0x3) != 0x3) {
2018 /* Transform 16bit instruction into 32bit instruction */
2019 switch (GET_C_OP(insn)) {
2020 case OPC_RISC_C_OP_QUAD0: /* Quadrant 0 */
2021 switch (GET_C_FUNC(insn)) {
2022 case OPC_RISC_C_FUNC_FLD_LQ:
2023 if (riscv_cpu_xlen(env) != 128) { /* C.FLD (RV32/64) */
2024 xinsn = OPC_RISC_FLD;
2025 xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
2026 access_rs1 = GET_C_RS1S(insn);
2027 access_imm = GET_C_LD_IMM(insn);
2028 access_size = 8;
2029 }
2030 break;
2031 case OPC_RISC_C_FUNC_LW: /* C.LW */
2032 xinsn = OPC_RISC_LW;
2033 xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
2034 access_rs1 = GET_C_RS1S(insn);
2035 access_imm = GET_C_LW_IMM(insn);
2036 access_size = 4;
2037 break;
2038 case OPC_RISC_C_FUNC_FLW_LD:
2039 if (riscv_cpu_xlen(env) == 32) { /* C.FLW (RV32) */
2040 xinsn = OPC_RISC_FLW;
2041 xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
2042 access_rs1 = GET_C_RS1S(insn);
2043 access_imm = GET_C_LW_IMM(insn);
2044 access_size = 4;
2045 } else { /* C.LD (RV64/RV128) */
2046 xinsn = OPC_RISC_LD;
2047 xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
2048 access_rs1 = GET_C_RS1S(insn);
2049 access_imm = GET_C_LD_IMM(insn);
2050 access_size = 8;
2051 }
2052 break;
2053 case OPC_RISC_C_FUNC_FSD_SQ:
2054 if (riscv_cpu_xlen(env) != 128) { /* C.FSD (RV32/64) */
2055 xinsn = OPC_RISC_FSD;
2056 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
2057 access_rs1 = GET_C_RS1S(insn);
2058 access_imm = GET_C_SD_IMM(insn);
2059 access_size = 8;
2060 }
2061 break;
2062 case OPC_RISC_C_FUNC_SW: /* C.SW */
2063 xinsn = OPC_RISC_SW;
2064 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
2065 access_rs1 = GET_C_RS1S(insn);
2066 access_imm = GET_C_SW_IMM(insn);
2067 access_size = 4;
2068 break;
2069 case OPC_RISC_C_FUNC_FSW_SD:
2070 if (riscv_cpu_xlen(env) == 32) { /* C.FSW (RV32) */
2071 xinsn = OPC_RISC_FSW;
2072 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
2073 access_rs1 = GET_C_RS1S(insn);
2074 access_imm = GET_C_SW_IMM(insn);
2075 access_size = 4;
2076 } else { /* C.SD (RV64/RV128) */
2077 xinsn = OPC_RISC_SD;
2078 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
2079 access_rs1 = GET_C_RS1S(insn);
2080 access_imm = GET_C_SD_IMM(insn);
2081 access_size = 8;
2082 }
2083 break;
2084 default:
2085 break;
2086 }
2087 break;
2088 case OPC_RISC_C_OP_QUAD2: /* Quadrant 2 */
2089 switch (GET_C_FUNC(insn)) {
2090 case OPC_RISC_C_FUNC_FLDSP_LQSP:
2091 if (riscv_cpu_xlen(env) != 128) { /* C.FLDSP (RV32/64) */
2092 xinsn = OPC_RISC_FLD;
2093 xinsn = SET_RD(xinsn, GET_C_RD(insn));
2094 access_rs1 = 2;
2095 access_imm = GET_C_LDSP_IMM(insn);
2096 access_size = 8;
2097 }
2098 break;
2099 case OPC_RISC_C_FUNC_LWSP: /* C.LWSP */
2100 xinsn = OPC_RISC_LW;
2101 xinsn = SET_RD(xinsn, GET_C_RD(insn));
2102 access_rs1 = 2;
2103 access_imm = GET_C_LWSP_IMM(insn);
2104 access_size = 4;
2105 break;
2106 case OPC_RISC_C_FUNC_FLWSP_LDSP:
2107 if (riscv_cpu_xlen(env) == 32) { /* C.FLWSP (RV32) */
2108 xinsn = OPC_RISC_FLW;
2109 xinsn = SET_RD(xinsn, GET_C_RD(insn));
2110 access_rs1 = 2;
2111 access_imm = GET_C_LWSP_IMM(insn);
2112 access_size = 4;
2113 } else { /* C.LDSP (RV64/RV128) */
2114 xinsn = OPC_RISC_LD;
2115 xinsn = SET_RD(xinsn, GET_C_RD(insn));
2116 access_rs1 = 2;
2117 access_imm = GET_C_LDSP_IMM(insn);
2118 access_size = 8;
2119 }
2120 break;
2121 case OPC_RISC_C_FUNC_FSDSP_SQSP:
2122 if (riscv_cpu_xlen(env) != 128) { /* C.FSDSP (RV32/64) */
2123 xinsn = OPC_RISC_FSD;
2124 xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
2125 access_rs1 = 2;
2126 access_imm = GET_C_SDSP_IMM(insn);
2127 access_size = 8;
2128 }
2129 break;
2130 case OPC_RISC_C_FUNC_SWSP: /* C.SWSP */
2131 xinsn = OPC_RISC_SW;
2132 xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
2133 access_rs1 = 2;
2134 access_imm = GET_C_SWSP_IMM(insn);
2135 access_size = 4;
2136 break;
2137 case 7:
2138 if (riscv_cpu_xlen(env) == 32) { /* C.FSWSP (RV32) */
2139 xinsn = OPC_RISC_FSW;
2140 xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
2141 access_rs1 = 2;
2142 access_imm = GET_C_SWSP_IMM(insn);
2143 access_size = 4;
2144 } else { /* C.SDSP (RV64/RV128) */
2145 xinsn = OPC_RISC_SD;
2146 xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
2147 access_rs1 = 2;
2148 access_imm = GET_C_SDSP_IMM(insn);
2149 access_size = 8;
2150 }
2151 break;
2152 default:
2153 break;
2154 }
2155 break;
2156 default:
2157 break;
2158 }
2159
2160 /*
2161 * Clear Bit1 of transformed instruction to indicate that
2162 * original insruction was a 16bit instruction
2163 */
2164 xinsn &= ~((target_ulong)0x2);
2165 } else {
2166 /* Transform 32bit (or wider) instructions */
2167 switch (MASK_OP_MAJOR(insn)) {
2168 case OPC_RISC_ATOMIC:
2169 xinsn = insn;
2170 access_rs1 = GET_RS1(insn);
2171 access_size = 1 << GET_FUNCT3(insn);
2172 break;
2173 case OPC_RISC_LOAD:
2174 case OPC_RISC_FP_LOAD:
2175 xinsn = SET_I_IMM(insn, 0);
2176 access_rs1 = GET_RS1(insn);
2177 access_imm = GET_IMM(insn);
2178 access_size = 1 << GET_FUNCT3(insn);
2179 break;
2180 case OPC_RISC_STORE:
2181 case OPC_RISC_FP_STORE:
2182 xinsn = SET_S_IMM(insn, 0);
2183 access_rs1 = GET_RS1(insn);
2184 access_imm = GET_STORE_IMM(insn);
2185 access_size = 1 << GET_FUNCT3(insn);
2186 break;
2187 case OPC_RISC_SYSTEM:
2188 if (MASK_OP_SYSTEM(insn) == OPC_RISC_HLVHSV) {
2189 xinsn = insn;
2190 access_rs1 = GET_RS1(insn);
2191 access_size = 1 << ((GET_FUNCT7(insn) >> 1) & 0x3);
2192 access_size = 1 << access_size;
2193 }
2194 break;
2195 default:
2196 break;
2197 }
2198 }
2199
2200 if (access_size) {
2201 xinsn = SET_RS1(xinsn, (taddr - (env->gpr[access_rs1] + access_imm)) &
2202 (access_size - 1));
2203 }
2204
2205 return xinsn;
2206 }
2207
promote_load_fault(target_ulong orig_cause)2208 static target_ulong promote_load_fault(target_ulong orig_cause)
2209 {
2210 switch (orig_cause) {
2211 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
2212 return RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
2213
2214 case RISCV_EXCP_LOAD_ACCESS_FAULT:
2215 return RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
2216
2217 case RISCV_EXCP_LOAD_PAGE_FAULT:
2218 return RISCV_EXCP_STORE_PAGE_FAULT;
2219 }
2220
2221 /* if no promotion, return original cause */
2222 return orig_cause;
2223 }
2224
riscv_do_nmi(CPURISCVState * env,target_ulong cause,bool virt)2225 static void riscv_do_nmi(CPURISCVState *env, target_ulong cause, bool virt)
2226 {
2227 env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
2228 env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPV, virt);
2229 env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPP, env->priv);
2230 env->mncause = cause;
2231 env->mnepc = env->pc;
2232 env->pc = env->rnmi_irqvec;
2233
2234 if (cpu_get_fcfien(env)) {
2235 env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, env->elp);
2236 }
2237
2238 /* Trapping to M mode, virt is disabled */
2239 riscv_cpu_set_mode(env, PRV_M, false);
2240 }
2241
2242 /*
2243 * Handle Traps
2244 *
2245 * Adapted from Spike's processor_t::take_trap.
2246 *
2247 */
riscv_cpu_do_interrupt(CPUState * cs)2248 void riscv_cpu_do_interrupt(CPUState *cs)
2249 {
2250 RISCVCPU *cpu = RISCV_CPU(cs);
2251 CPURISCVState *env = &cpu->env;
2252 bool virt = env->virt_enabled;
2253 bool write_gva = false;
2254 bool always_storeamo = (env->excp_uw2 & RISCV_UW2_ALWAYS_STORE_AMO);
2255 bool vsmode_exc;
2256 uint64_t s;
2257 int mode;
2258
2259 /*
2260 * cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
2261 * so we mask off the MSB and separate into trap type and cause.
2262 */
2263 bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
2264 target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
2265 uint64_t deleg = async ? env->mideleg : env->medeleg;
2266 bool s_injected = env->mvip & (1ULL << cause) & env->mvien &&
2267 !(env->mip & (1ULL << cause));
2268 bool vs_injected = env->hvip & (1ULL << cause) & env->hvien &&
2269 !(env->mip & (1ULL << cause));
2270 bool smode_double_trap = false;
2271 uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
2272 const bool prev_virt = env->virt_enabled;
2273 const target_ulong prev_priv = env->priv;
2274 target_ulong tval = 0;
2275 target_ulong tinst = 0;
2276 target_ulong htval = 0;
2277 target_ulong mtval2 = 0;
2278 target_ulong src;
2279 int sxlen = 0;
2280 int mxlen = 16 << riscv_cpu_mxl(env);
2281 bool nnmi_excep = false;
2282
2283 if (cpu->cfg.ext_smrnmi && env->rnmip && async) {
2284 riscv_do_nmi(env, cause | ((target_ulong)1U << (mxlen - 1)),
2285 env->virt_enabled);
2286 return;
2287 }
2288
2289 if (!async) {
2290 /* set tval to badaddr for traps with address information */
2291 switch (cause) {
2292 #ifdef CONFIG_TCG
2293 case RISCV_EXCP_SEMIHOST:
2294 do_common_semihosting(cs);
2295 env->pc += 4;
2296 return;
2297 #endif
2298 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
2299 case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
2300 case RISCV_EXCP_LOAD_ADDR_MIS:
2301 case RISCV_EXCP_STORE_AMO_ADDR_MIS:
2302 case RISCV_EXCP_LOAD_ACCESS_FAULT:
2303 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
2304 case RISCV_EXCP_LOAD_PAGE_FAULT:
2305 case RISCV_EXCP_STORE_PAGE_FAULT:
2306 if (always_storeamo) {
2307 cause = promote_load_fault(cause);
2308 }
2309 write_gva = env->two_stage_lookup;
2310 tval = env->badaddr;
2311 if (env->two_stage_indirect_lookup) {
2312 /*
2313 * special pseudoinstruction for G-stage fault taken while
2314 * doing VS-stage page table walk.
2315 */
2316 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000;
2317 } else {
2318 /*
2319 * The "Addr. Offset" field in transformed instruction is
2320 * non-zero only for misaligned access.
2321 */
2322 tinst = riscv_transformed_insn(env, env->bins, tval);
2323 }
2324 break;
2325 case RISCV_EXCP_INST_GUEST_PAGE_FAULT:
2326 case RISCV_EXCP_INST_ADDR_MIS:
2327 case RISCV_EXCP_INST_ACCESS_FAULT:
2328 case RISCV_EXCP_INST_PAGE_FAULT:
2329 write_gva = env->two_stage_lookup;
2330 tval = env->badaddr;
2331 if (env->two_stage_indirect_lookup) {
2332 /*
2333 * special pseudoinstruction for G-stage fault taken while
2334 * doing VS-stage page table walk.
2335 */
2336 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000;
2337 }
2338 break;
2339 case RISCV_EXCP_ILLEGAL_INST:
2340 case RISCV_EXCP_VIRT_INSTRUCTION_FAULT:
2341 tval = env->bins;
2342 break;
2343 case RISCV_EXCP_BREAKPOINT:
2344 tval = env->badaddr;
2345 if (cs->watchpoint_hit) {
2346 tval = cs->watchpoint_hit->hitaddr;
2347 cs->watchpoint_hit = NULL;
2348 }
2349 break;
2350 case RISCV_EXCP_SW_CHECK:
2351 tval = env->sw_check_code;
2352 break;
2353 default:
2354 break;
2355 }
2356 /* ecall is dispatched as one cause so translate based on mode */
2357 if (cause == RISCV_EXCP_U_ECALL) {
2358 assert(env->priv <= 3);
2359
2360 if (env->priv == PRV_M) {
2361 cause = RISCV_EXCP_M_ECALL;
2362 } else if (env->priv == PRV_S && env->virt_enabled) {
2363 cause = RISCV_EXCP_VS_ECALL;
2364 } else if (env->priv == PRV_S && !env->virt_enabled) {
2365 cause = RISCV_EXCP_S_ECALL;
2366 } else if (env->priv == PRV_U) {
2367 cause = RISCV_EXCP_U_ECALL;
2368 }
2369 }
2370 }
2371
2372 trace_riscv_trap(env->mhartid, async, cause, env->pc, tval,
2373 riscv_cpu_get_trap_name(cause, async));
2374
2375 qemu_log_mask(CPU_LOG_INT,
2376 "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", "
2377 "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n",
2378 __func__, env->mhartid, async, cause, env->pc, tval,
2379 riscv_cpu_get_trap_name(cause, async));
2380
2381 mode = env->priv <= PRV_S && cause < 64 &&
2382 (((deleg >> cause) & 1) || s_injected || vs_injected) ? PRV_S : PRV_M;
2383
2384 vsmode_exc = env->virt_enabled && cause < 64 &&
2385 (((hdeleg >> cause) & 1) || vs_injected);
2386
2387 /*
2388 * Check double trap condition only if already in S-mode and targeting
2389 * S-mode
2390 */
2391 if (cpu->cfg.ext_ssdbltrp && env->priv == PRV_S && mode == PRV_S) {
2392 bool dte = (env->menvcfg & MENVCFG_DTE) != 0;
2393 bool sdt = (env->mstatus & MSTATUS_SDT) != 0;
2394 /* In VS or HS */
2395 if (riscv_has_ext(env, RVH)) {
2396 if (vsmode_exc) {
2397 /* VS -> VS, use henvcfg instead of menvcfg*/
2398 dte = (env->henvcfg & HENVCFG_DTE) != 0;
2399 } else if (env->virt_enabled) {
2400 /* VS -> HS, use mstatus_hs */
2401 sdt = (env->mstatus_hs & MSTATUS_SDT) != 0;
2402 }
2403 }
2404 smode_double_trap = dte && sdt;
2405 if (smode_double_trap) {
2406 mode = PRV_M;
2407 }
2408 }
2409
2410 if (mode == PRV_S) {
2411 /* handle the trap in S-mode */
2412 /* save elp status */
2413 if (cpu_get_fcfien(env)) {
2414 env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, env->elp);
2415 }
2416
2417 if (riscv_has_ext(env, RVH)) {
2418 if (vsmode_exc) {
2419 /* Trap to VS mode */
2420 /*
2421 * See if we need to adjust cause. Yes if its VS mode interrupt
2422 * no if hypervisor has delegated one of hs mode's interrupt
2423 */
2424 if (async && (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
2425 cause == IRQ_VS_EXT)) {
2426 cause = cause - 1;
2427 }
2428 write_gva = false;
2429 } else if (env->virt_enabled) {
2430 /* Trap into HS mode, from virt */
2431 riscv_cpu_swap_hypervisor_regs(env);
2432 env->hstatus = set_field(env->hstatus, HSTATUS_SPVP,
2433 env->priv);
2434 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, true);
2435
2436 htval = env->guest_phys_fault_addr;
2437
2438 virt = false;
2439 } else {
2440 /* Trap into HS mode */
2441 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
2442 htval = env->guest_phys_fault_addr;
2443 }
2444 env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva);
2445 }
2446
2447 s = env->mstatus;
2448 s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
2449 s = set_field(s, MSTATUS_SPP, env->priv);
2450 s = set_field(s, MSTATUS_SIE, 0);
2451 if (riscv_env_smode_dbltrp_enabled(env, virt)) {
2452 s = set_field(s, MSTATUS_SDT, 1);
2453 }
2454 env->mstatus = s;
2455 sxlen = 16 << riscv_cpu_sxl(env);
2456 env->scause = cause | ((target_ulong)async << (sxlen - 1));
2457 env->sepc = env->pc;
2458 env->stval = tval;
2459 env->htval = htval;
2460 env->htinst = tinst;
2461 env->pc = (env->stvec >> 2 << 2) +
2462 ((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
2463 riscv_cpu_set_mode(env, PRV_S, virt);
2464
2465 src = env->sepc;
2466 } else {
2467 /*
2468 * If the hart encounters an exception while executing in M-mode
2469 * with the mnstatus.NMIE bit clear, the exception is an RNMI exception.
2470 */
2471 nnmi_excep = cpu->cfg.ext_smrnmi &&
2472 !get_field(env->mnstatus, MNSTATUS_NMIE) &&
2473 !async;
2474
2475 /* handle the trap in M-mode */
2476 /* save elp status */
2477 if (cpu_get_fcfien(env)) {
2478 if (nnmi_excep) {
2479 env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP,
2480 env->elp);
2481 } else {
2482 env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, env->elp);
2483 }
2484 }
2485
2486 if (riscv_has_ext(env, RVH)) {
2487 if (env->virt_enabled) {
2488 riscv_cpu_swap_hypervisor_regs(env);
2489 }
2490 env->mstatus = set_field(env->mstatus, MSTATUS_MPV,
2491 env->virt_enabled);
2492 if (env->virt_enabled && tval) {
2493 env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1);
2494 }
2495
2496 mtval2 = env->guest_phys_fault_addr;
2497
2498 /* Trapping to M mode, virt is disabled */
2499 virt = false;
2500 }
2501 /*
2502 * If the hart encounters an exception while executing in M-mode,
2503 * with the mnstatus.NMIE bit clear, the program counter is set to
2504 * the RNMI exception trap handler address.
2505 */
2506 nnmi_excep = cpu->cfg.ext_smrnmi &&
2507 !get_field(env->mnstatus, MNSTATUS_NMIE) &&
2508 !async;
2509
2510 s = env->mstatus;
2511 s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
2512 s = set_field(s, MSTATUS_MPP, env->priv);
2513 s = set_field(s, MSTATUS_MIE, 0);
2514 if (cpu->cfg.ext_smdbltrp) {
2515 if (env->mstatus & MSTATUS_MDT) {
2516 assert(env->priv == PRV_M);
2517 if (!cpu->cfg.ext_smrnmi || nnmi_excep) {
2518 cpu_abort(CPU(cpu), "M-mode double trap\n");
2519 } else {
2520 riscv_do_nmi(env, cause, false);
2521 return;
2522 }
2523 }
2524
2525 s = set_field(s, MSTATUS_MDT, 1);
2526 }
2527 env->mstatus = s;
2528 env->mcause = cause | ((target_ulong)async << (mxlen - 1));
2529 if (smode_double_trap) {
2530 env->mtval2 = env->mcause;
2531 env->mcause = RISCV_EXCP_DOUBLE_TRAP;
2532 } else {
2533 env->mtval2 = mtval2;
2534 }
2535 env->mepc = env->pc;
2536 env->mtval = tval;
2537 env->mtinst = tinst;
2538
2539 /*
2540 * For RNMI exception, program counter is set to the RNMI exception
2541 * trap handler address.
2542 */
2543 if (nnmi_excep) {
2544 env->pc = env->rnmi_excpvec;
2545 } else {
2546 env->pc = (env->mtvec >> 2 << 2) +
2547 ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
2548 }
2549 riscv_cpu_set_mode(env, PRV_M, virt);
2550 src = env->mepc;
2551 }
2552
2553 if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
2554 if (async && cause == IRQ_PMU_OVF) {
2555 riscv_ctr_freeze(env, XCTRCTL_LCOFIFRZ, virt);
2556 } else if (!async && cause == RISCV_EXCP_BREAKPOINT) {
2557 riscv_ctr_freeze(env, XCTRCTL_BPFRZ, virt);
2558 }
2559
2560 riscv_ctr_add_entry(env, src, env->pc,
2561 async ? CTRDATA_TYPE_INTERRUPT : CTRDATA_TYPE_EXCEPTION,
2562 prev_priv, prev_virt);
2563 }
2564
2565 /*
2566 * Interrupt/exception/trap delivery is asynchronous event and as per
2567 * zicfilp spec CPU should clear up the ELP state. No harm in clearing
2568 * unconditionally.
2569 */
2570 env->elp = false;
2571
2572 /*
2573 * NOTE: it is not necessary to yield load reservations here. It is only
2574 * necessary for an SC from "another hart" to cause a load reservation
2575 * to be yielded. Refer to the memory consistency model section of the
2576 * RISC-V ISA Specification.
2577 */
2578
2579 env->two_stage_lookup = false;
2580 env->two_stage_indirect_lookup = false;
2581 }
2582
2583 #endif /* !CONFIG_USER_ONLY */
2584