xref: /openbmc/qemu/target/riscv/csr.c (revision 38c83e8d)
1 /*
2  * RISC-V Control and Status Registers.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/timer.h"
23 #include "cpu.h"
24 #include "tcg/tcg-cpu.h"
25 #include "pmu.h"
26 #include "time_helper.h"
27 #include "exec/exec-all.h"
28 #include "exec/tb-flush.h"
29 #include "sysemu/cpu-timers.h"
30 #include "qemu/guest-random.h"
31 #include "qapi/error.h"
32 
33 /* CSR function table public API */
riscv_get_csr_ops(int csrno,riscv_csr_operations * ops)34 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
35 {
36     *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)];
37 }
38 
riscv_set_csr_ops(int csrno,riscv_csr_operations * ops)39 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
40 {
41     csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops;
42 }
43 
44 /* Predicates */
45 #if !defined(CONFIG_USER_ONLY)
smstateen_acc_ok(CPURISCVState * env,int index,uint64_t bit)46 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit)
47 {
48     bool virt = env->virt_enabled;
49 
50     if (env->priv == PRV_M || !riscv_cpu_cfg(env)->ext_smstateen) {
51         return RISCV_EXCP_NONE;
52     }
53 
54     if (!(env->mstateen[index] & bit)) {
55         return RISCV_EXCP_ILLEGAL_INST;
56     }
57 
58     if (virt) {
59         if (!(env->hstateen[index] & bit)) {
60             return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
61         }
62 
63         if (env->priv == PRV_U && !(env->sstateen[index] & bit)) {
64             return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
65         }
66     }
67 
68     if (env->priv == PRV_U && riscv_has_ext(env, RVS)) {
69         if (!(env->sstateen[index] & bit)) {
70             return RISCV_EXCP_ILLEGAL_INST;
71         }
72     }
73 
74     return RISCV_EXCP_NONE;
75 }
76 #endif
77 
fs(CPURISCVState * env,int csrno)78 static RISCVException fs(CPURISCVState *env, int csrno)
79 {
80 #if !defined(CONFIG_USER_ONLY)
81     if (!env->debugger && !riscv_cpu_fp_enabled(env) &&
82         !riscv_cpu_cfg(env)->ext_zfinx) {
83         return RISCV_EXCP_ILLEGAL_INST;
84     }
85 
86     if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
87         return smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR);
88     }
89 #endif
90     return RISCV_EXCP_NONE;
91 }
92 
vs(CPURISCVState * env,int csrno)93 static RISCVException vs(CPURISCVState *env, int csrno)
94 {
95     if (riscv_cpu_cfg(env)->ext_zve32x) {
96 #if !defined(CONFIG_USER_ONLY)
97         if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
98             return RISCV_EXCP_ILLEGAL_INST;
99         }
100 #endif
101         return RISCV_EXCP_NONE;
102     }
103     return RISCV_EXCP_ILLEGAL_INST;
104 }
105 
ctr(CPURISCVState * env,int csrno)106 static RISCVException ctr(CPURISCVState *env, int csrno)
107 {
108 #if !defined(CONFIG_USER_ONLY)
109     RISCVCPU *cpu = env_archcpu(env);
110     int ctr_index;
111     target_ulong ctr_mask;
112     int base_csrno = CSR_CYCLE;
113     bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
114 
115     if (rv32 && csrno >= CSR_CYCLEH) {
116         /* Offset for RV32 hpmcounternh counters */
117         base_csrno += 0x80;
118     }
119     ctr_index = csrno - base_csrno;
120     ctr_mask = BIT(ctr_index);
121 
122     if ((csrno >= CSR_CYCLE && csrno <= CSR_INSTRET) ||
123         (csrno >= CSR_CYCLEH && csrno <= CSR_INSTRETH)) {
124         if (!riscv_cpu_cfg(env)->ext_zicntr) {
125             return RISCV_EXCP_ILLEGAL_INST;
126         }
127 
128         goto skip_ext_pmu_check;
129     }
130 
131     if (!(cpu->pmu_avail_ctrs & ctr_mask)) {
132         /* No counter is enabled in PMU or the counter is out of range */
133         return RISCV_EXCP_ILLEGAL_INST;
134     }
135 
136 skip_ext_pmu_check:
137 
138     if (env->debugger) {
139         return RISCV_EXCP_NONE;
140     }
141 
142     if (env->priv < PRV_M && !get_field(env->mcounteren, ctr_mask)) {
143         return RISCV_EXCP_ILLEGAL_INST;
144     }
145 
146     if (env->virt_enabled) {
147         if (!get_field(env->hcounteren, ctr_mask) ||
148             (env->priv == PRV_U && !get_field(env->scounteren, ctr_mask))) {
149             return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
150         }
151     }
152 
153     if (riscv_has_ext(env, RVS) && env->priv == PRV_U &&
154         !get_field(env->scounteren, ctr_mask)) {
155         return RISCV_EXCP_ILLEGAL_INST;
156     }
157 
158 #endif
159     return RISCV_EXCP_NONE;
160 }
161 
ctr32(CPURISCVState * env,int csrno)162 static RISCVException ctr32(CPURISCVState *env, int csrno)
163 {
164     if (riscv_cpu_mxl(env) != MXL_RV32) {
165         return RISCV_EXCP_ILLEGAL_INST;
166     }
167 
168     return ctr(env, csrno);
169 }
170 
zcmt(CPURISCVState * env,int csrno)171 static RISCVException zcmt(CPURISCVState *env, int csrno)
172 {
173     if (!riscv_cpu_cfg(env)->ext_zcmt) {
174         return RISCV_EXCP_ILLEGAL_INST;
175     }
176 
177 #if !defined(CONFIG_USER_ONLY)
178     RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_JVT);
179     if (ret != RISCV_EXCP_NONE) {
180         return ret;
181     }
182 #endif
183 
184     return RISCV_EXCP_NONE;
185 }
186 
187 #if !defined(CONFIG_USER_ONLY)
mctr(CPURISCVState * env,int csrno)188 static RISCVException mctr(CPURISCVState *env, int csrno)
189 {
190     RISCVCPU *cpu = env_archcpu(env);
191     uint32_t pmu_avail_ctrs = cpu->pmu_avail_ctrs;
192     int ctr_index;
193     int base_csrno = CSR_MHPMCOUNTER3;
194 
195     if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) {
196         /* Offset for RV32 mhpmcounternh counters */
197         csrno -= 0x80;
198     }
199 
200     g_assert(csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31);
201 
202     ctr_index = csrno - base_csrno;
203     if ((BIT(ctr_index) & pmu_avail_ctrs >> 3) == 0) {
204         /* The PMU is not enabled or counter is out of range */
205         return RISCV_EXCP_ILLEGAL_INST;
206     }
207 
208     return RISCV_EXCP_NONE;
209 }
210 
mctr32(CPURISCVState * env,int csrno)211 static RISCVException mctr32(CPURISCVState *env, int csrno)
212 {
213     if (riscv_cpu_mxl(env) != MXL_RV32) {
214         return RISCV_EXCP_ILLEGAL_INST;
215     }
216 
217     return mctr(env, csrno);
218 }
219 
sscofpmf(CPURISCVState * env,int csrno)220 static RISCVException sscofpmf(CPURISCVState *env, int csrno)
221 {
222     if (!riscv_cpu_cfg(env)->ext_sscofpmf) {
223         return RISCV_EXCP_ILLEGAL_INST;
224     }
225 
226     return RISCV_EXCP_NONE;
227 }
228 
sscofpmf_32(CPURISCVState * env,int csrno)229 static RISCVException sscofpmf_32(CPURISCVState *env, int csrno)
230 {
231     if (riscv_cpu_mxl(env) != MXL_RV32) {
232         return RISCV_EXCP_ILLEGAL_INST;
233     }
234 
235     return sscofpmf(env, csrno);
236 }
237 
smcntrpmf(CPURISCVState * env,int csrno)238 static RISCVException smcntrpmf(CPURISCVState *env, int csrno)
239 {
240     if (!riscv_cpu_cfg(env)->ext_smcntrpmf) {
241         return RISCV_EXCP_ILLEGAL_INST;
242     }
243 
244     return RISCV_EXCP_NONE;
245 }
246 
smcntrpmf_32(CPURISCVState * env,int csrno)247 static RISCVException smcntrpmf_32(CPURISCVState *env, int csrno)
248 {
249     if (riscv_cpu_mxl(env) != MXL_RV32) {
250         return RISCV_EXCP_ILLEGAL_INST;
251     }
252 
253     return smcntrpmf(env, csrno);
254 }
255 
any(CPURISCVState * env,int csrno)256 static RISCVException any(CPURISCVState *env, int csrno)
257 {
258     return RISCV_EXCP_NONE;
259 }
260 
any32(CPURISCVState * env,int csrno)261 static RISCVException any32(CPURISCVState *env, int csrno)
262 {
263     if (riscv_cpu_mxl(env) != MXL_RV32) {
264         return RISCV_EXCP_ILLEGAL_INST;
265     }
266 
267     return any(env, csrno);
268 
269 }
270 
aia_any(CPURISCVState * env,int csrno)271 static RISCVException aia_any(CPURISCVState *env, int csrno)
272 {
273     if (!riscv_cpu_cfg(env)->ext_smaia) {
274         return RISCV_EXCP_ILLEGAL_INST;
275     }
276 
277     return any(env, csrno);
278 }
279 
aia_any32(CPURISCVState * env,int csrno)280 static RISCVException aia_any32(CPURISCVState *env, int csrno)
281 {
282     if (!riscv_cpu_cfg(env)->ext_smaia) {
283         return RISCV_EXCP_ILLEGAL_INST;
284     }
285 
286     return any32(env, csrno);
287 }
288 
smode(CPURISCVState * env,int csrno)289 static RISCVException smode(CPURISCVState *env, int csrno)
290 {
291     if (riscv_has_ext(env, RVS)) {
292         return RISCV_EXCP_NONE;
293     }
294 
295     return RISCV_EXCP_ILLEGAL_INST;
296 }
297 
smode32(CPURISCVState * env,int csrno)298 static RISCVException smode32(CPURISCVState *env, int csrno)
299 {
300     if (riscv_cpu_mxl(env) != MXL_RV32) {
301         return RISCV_EXCP_ILLEGAL_INST;
302     }
303 
304     return smode(env, csrno);
305 }
306 
aia_smode(CPURISCVState * env,int csrno)307 static RISCVException aia_smode(CPURISCVState *env, int csrno)
308 {
309     if (!riscv_cpu_cfg(env)->ext_ssaia) {
310         return RISCV_EXCP_ILLEGAL_INST;
311     }
312 
313     return smode(env, csrno);
314 }
315 
aia_smode32(CPURISCVState * env,int csrno)316 static RISCVException aia_smode32(CPURISCVState *env, int csrno)
317 {
318     if (!riscv_cpu_cfg(env)->ext_ssaia) {
319         return RISCV_EXCP_ILLEGAL_INST;
320     }
321 
322     return smode32(env, csrno);
323 }
324 
hmode(CPURISCVState * env,int csrno)325 static RISCVException hmode(CPURISCVState *env, int csrno)
326 {
327     if (riscv_has_ext(env, RVH)) {
328         return RISCV_EXCP_NONE;
329     }
330 
331     return RISCV_EXCP_ILLEGAL_INST;
332 }
333 
hmode32(CPURISCVState * env,int csrno)334 static RISCVException hmode32(CPURISCVState *env, int csrno)
335 {
336     if (riscv_cpu_mxl(env) != MXL_RV32) {
337         return RISCV_EXCP_ILLEGAL_INST;
338     }
339 
340     return hmode(env, csrno);
341 
342 }
343 
umode(CPURISCVState * env,int csrno)344 static RISCVException umode(CPURISCVState *env, int csrno)
345 {
346     if (riscv_has_ext(env, RVU)) {
347         return RISCV_EXCP_NONE;
348     }
349 
350     return RISCV_EXCP_ILLEGAL_INST;
351 }
352 
umode32(CPURISCVState * env,int csrno)353 static RISCVException umode32(CPURISCVState *env, int csrno)
354 {
355     if (riscv_cpu_mxl(env) != MXL_RV32) {
356         return RISCV_EXCP_ILLEGAL_INST;
357     }
358 
359     return umode(env, csrno);
360 }
361 
mstateen(CPURISCVState * env,int csrno)362 static RISCVException mstateen(CPURISCVState *env, int csrno)
363 {
364     if (!riscv_cpu_cfg(env)->ext_smstateen) {
365         return RISCV_EXCP_ILLEGAL_INST;
366     }
367 
368     return any(env, csrno);
369 }
370 
hstateen_pred(CPURISCVState * env,int csrno,int base)371 static RISCVException hstateen_pred(CPURISCVState *env, int csrno, int base)
372 {
373     if (!riscv_cpu_cfg(env)->ext_smstateen) {
374         return RISCV_EXCP_ILLEGAL_INST;
375     }
376 
377     RISCVException ret = hmode(env, csrno);
378     if (ret != RISCV_EXCP_NONE) {
379         return ret;
380     }
381 
382     if (env->debugger) {
383         return RISCV_EXCP_NONE;
384     }
385 
386     if (env->priv < PRV_M) {
387         if (!(env->mstateen[csrno - base] & SMSTATEEN_STATEEN)) {
388             return RISCV_EXCP_ILLEGAL_INST;
389         }
390     }
391 
392     return RISCV_EXCP_NONE;
393 }
394 
hstateen(CPURISCVState * env,int csrno)395 static RISCVException hstateen(CPURISCVState *env, int csrno)
396 {
397     return hstateen_pred(env, csrno, CSR_HSTATEEN0);
398 }
399 
hstateenh(CPURISCVState * env,int csrno)400 static RISCVException hstateenh(CPURISCVState *env, int csrno)
401 {
402     return hstateen_pred(env, csrno, CSR_HSTATEEN0H);
403 }
404 
sstateen(CPURISCVState * env,int csrno)405 static RISCVException sstateen(CPURISCVState *env, int csrno)
406 {
407     bool virt = env->virt_enabled;
408     int index = csrno - CSR_SSTATEEN0;
409 
410     if (!riscv_cpu_cfg(env)->ext_smstateen) {
411         return RISCV_EXCP_ILLEGAL_INST;
412     }
413 
414     RISCVException ret = smode(env, csrno);
415     if (ret != RISCV_EXCP_NONE) {
416         return ret;
417     }
418 
419     if (env->debugger) {
420         return RISCV_EXCP_NONE;
421     }
422 
423     if (env->priv < PRV_M) {
424         if (!(env->mstateen[index] & SMSTATEEN_STATEEN)) {
425             return RISCV_EXCP_ILLEGAL_INST;
426         }
427 
428         if (virt) {
429             if (!(env->hstateen[index] & SMSTATEEN_STATEEN)) {
430                 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
431             }
432         }
433     }
434 
435     return RISCV_EXCP_NONE;
436 }
437 
sstc(CPURISCVState * env,int csrno)438 static RISCVException sstc(CPURISCVState *env, int csrno)
439 {
440     bool hmode_check = false;
441 
442     if (!riscv_cpu_cfg(env)->ext_sstc || !env->rdtime_fn) {
443         return RISCV_EXCP_ILLEGAL_INST;
444     }
445 
446     if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) {
447         hmode_check = true;
448     }
449 
450     RISCVException ret = hmode_check ? hmode(env, csrno) : smode(env, csrno);
451     if (ret != RISCV_EXCP_NONE) {
452         return ret;
453     }
454 
455     if (env->debugger) {
456         return RISCV_EXCP_NONE;
457     }
458 
459     if (env->priv == PRV_M) {
460         return RISCV_EXCP_NONE;
461     }
462 
463     /*
464      * No need of separate function for rv32 as menvcfg stores both menvcfg
465      * menvcfgh for RV32.
466      */
467     if (!(get_field(env->mcounteren, COUNTEREN_TM) &&
468           get_field(env->menvcfg, MENVCFG_STCE))) {
469         return RISCV_EXCP_ILLEGAL_INST;
470     }
471 
472     if (env->virt_enabled) {
473         if (!(get_field(env->hcounteren, COUNTEREN_TM) &&
474               get_field(env->henvcfg, HENVCFG_STCE))) {
475             return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
476         }
477     }
478 
479     return RISCV_EXCP_NONE;
480 }
481 
sstc_32(CPURISCVState * env,int csrno)482 static RISCVException sstc_32(CPURISCVState *env, int csrno)
483 {
484     if (riscv_cpu_mxl(env) != MXL_RV32) {
485         return RISCV_EXCP_ILLEGAL_INST;
486     }
487 
488     return sstc(env, csrno);
489 }
490 
satp(CPURISCVState * env,int csrno)491 static RISCVException satp(CPURISCVState *env, int csrno)
492 {
493     if (env->priv == PRV_S && !env->virt_enabled &&
494         get_field(env->mstatus, MSTATUS_TVM)) {
495         return RISCV_EXCP_ILLEGAL_INST;
496     }
497     if (env->priv == PRV_S && env->virt_enabled &&
498         get_field(env->hstatus, HSTATUS_VTVM)) {
499         return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
500     }
501 
502     return smode(env, csrno);
503 }
504 
hgatp(CPURISCVState * env,int csrno)505 static RISCVException hgatp(CPURISCVState *env, int csrno)
506 {
507     if (env->priv == PRV_S && !env->virt_enabled &&
508         get_field(env->mstatus, MSTATUS_TVM)) {
509         return RISCV_EXCP_ILLEGAL_INST;
510     }
511 
512     return hmode(env, csrno);
513 }
514 
515 /* Checks if PointerMasking registers could be accessed */
pointer_masking(CPURISCVState * env,int csrno)516 static RISCVException pointer_masking(CPURISCVState *env, int csrno)
517 {
518     /* Check if j-ext is present */
519     if (riscv_has_ext(env, RVJ)) {
520         return RISCV_EXCP_NONE;
521     }
522     return RISCV_EXCP_ILLEGAL_INST;
523 }
524 
aia_hmode(CPURISCVState * env,int csrno)525 static RISCVException aia_hmode(CPURISCVState *env, int csrno)
526 {
527     if (!riscv_cpu_cfg(env)->ext_ssaia) {
528         return RISCV_EXCP_ILLEGAL_INST;
529      }
530 
531      return hmode(env, csrno);
532 }
533 
aia_hmode32(CPURISCVState * env,int csrno)534 static RISCVException aia_hmode32(CPURISCVState *env, int csrno)
535 {
536     if (!riscv_cpu_cfg(env)->ext_ssaia) {
537         return RISCV_EXCP_ILLEGAL_INST;
538     }
539 
540     return hmode32(env, csrno);
541 }
542 
pmp(CPURISCVState * env,int csrno)543 static RISCVException pmp(CPURISCVState *env, int csrno)
544 {
545     if (riscv_cpu_cfg(env)->pmp) {
546         if (csrno <= CSR_PMPCFG3) {
547             uint32_t reg_index = csrno - CSR_PMPCFG0;
548 
549             /* TODO: RV128 restriction check */
550             if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) {
551                 return RISCV_EXCP_ILLEGAL_INST;
552             }
553         }
554 
555         return RISCV_EXCP_NONE;
556     }
557 
558     return RISCV_EXCP_ILLEGAL_INST;
559 }
560 
have_mseccfg(CPURISCVState * env,int csrno)561 static RISCVException have_mseccfg(CPURISCVState *env, int csrno)
562 {
563     if (riscv_cpu_cfg(env)->ext_smepmp) {
564         return RISCV_EXCP_NONE;
565     }
566     if (riscv_cpu_cfg(env)->ext_zkr) {
567         return RISCV_EXCP_NONE;
568     }
569 
570     return RISCV_EXCP_ILLEGAL_INST;
571 }
572 
debug(CPURISCVState * env,int csrno)573 static RISCVException debug(CPURISCVState *env, int csrno)
574 {
575     if (riscv_cpu_cfg(env)->debug) {
576         return RISCV_EXCP_NONE;
577     }
578 
579     return RISCV_EXCP_ILLEGAL_INST;
580 }
581 #endif
582 
seed(CPURISCVState * env,int csrno)583 static RISCVException seed(CPURISCVState *env, int csrno)
584 {
585     if (!riscv_cpu_cfg(env)->ext_zkr) {
586         return RISCV_EXCP_ILLEGAL_INST;
587     }
588 
589 #if !defined(CONFIG_USER_ONLY)
590     if (env->debugger) {
591         return RISCV_EXCP_NONE;
592     }
593 
594     /*
595      * With a CSR read-write instruction:
596      * 1) The seed CSR is always available in machine mode as normal.
597      * 2) Attempted access to seed from virtual modes VS and VU always raises
598      * an exception(virtual instruction exception only if mseccfg.sseed=1).
599      * 3) Without the corresponding access control bit set to 1, any attempted
600      * access to seed from U, S or HS modes will raise an illegal instruction
601      * exception.
602      */
603     if (env->priv == PRV_M) {
604         return RISCV_EXCP_NONE;
605     } else if (env->virt_enabled) {
606         if (env->mseccfg & MSECCFG_SSEED) {
607             return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
608         } else {
609             return RISCV_EXCP_ILLEGAL_INST;
610         }
611     } else {
612         if (env->priv == PRV_S && (env->mseccfg & MSECCFG_SSEED)) {
613             return RISCV_EXCP_NONE;
614         } else if (env->priv == PRV_U && (env->mseccfg & MSECCFG_USEED)) {
615             return RISCV_EXCP_NONE;
616         } else {
617             return RISCV_EXCP_ILLEGAL_INST;
618         }
619     }
620 #else
621     return RISCV_EXCP_NONE;
622 #endif
623 }
624 
625 /* User Floating-Point CSRs */
read_fflags(CPURISCVState * env,int csrno,target_ulong * val)626 static RISCVException read_fflags(CPURISCVState *env, int csrno,
627                                   target_ulong *val)
628 {
629     *val = riscv_cpu_get_fflags(env);
630     return RISCV_EXCP_NONE;
631 }
632 
write_fflags(CPURISCVState * env,int csrno,target_ulong val)633 static RISCVException write_fflags(CPURISCVState *env, int csrno,
634                                    target_ulong val)
635 {
636 #if !defined(CONFIG_USER_ONLY)
637     if (riscv_has_ext(env, RVF)) {
638         env->mstatus |= MSTATUS_FS;
639     }
640 #endif
641     riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT));
642     return RISCV_EXCP_NONE;
643 }
644 
read_frm(CPURISCVState * env,int csrno,target_ulong * val)645 static RISCVException read_frm(CPURISCVState *env, int csrno,
646                                target_ulong *val)
647 {
648     *val = env->frm;
649     return RISCV_EXCP_NONE;
650 }
651 
write_frm(CPURISCVState * env,int csrno,target_ulong val)652 static RISCVException write_frm(CPURISCVState *env, int csrno,
653                                 target_ulong val)
654 {
655 #if !defined(CONFIG_USER_ONLY)
656     if (riscv_has_ext(env, RVF)) {
657         env->mstatus |= MSTATUS_FS;
658     }
659 #endif
660     env->frm = val & (FSR_RD >> FSR_RD_SHIFT);
661     return RISCV_EXCP_NONE;
662 }
663 
read_fcsr(CPURISCVState * env,int csrno,target_ulong * val)664 static RISCVException read_fcsr(CPURISCVState *env, int csrno,
665                                 target_ulong *val)
666 {
667     *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT)
668         | (env->frm << FSR_RD_SHIFT);
669     return RISCV_EXCP_NONE;
670 }
671 
write_fcsr(CPURISCVState * env,int csrno,target_ulong val)672 static RISCVException write_fcsr(CPURISCVState *env, int csrno,
673                                  target_ulong val)
674 {
675 #if !defined(CONFIG_USER_ONLY)
676     if (riscv_has_ext(env, RVF)) {
677         env->mstatus |= MSTATUS_FS;
678     }
679 #endif
680     env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
681     riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
682     return RISCV_EXCP_NONE;
683 }
684 
read_vtype(CPURISCVState * env,int csrno,target_ulong * val)685 static RISCVException read_vtype(CPURISCVState *env, int csrno,
686                                  target_ulong *val)
687 {
688     uint64_t vill;
689     switch (env->xl) {
690     case MXL_RV32:
691         vill = (uint32_t)env->vill << 31;
692         break;
693     case MXL_RV64:
694         vill = (uint64_t)env->vill << 63;
695         break;
696     default:
697         g_assert_not_reached();
698     }
699     *val = (target_ulong)vill | env->vtype;
700     return RISCV_EXCP_NONE;
701 }
702 
read_vl(CPURISCVState * env,int csrno,target_ulong * val)703 static RISCVException read_vl(CPURISCVState *env, int csrno,
704                               target_ulong *val)
705 {
706     *val = env->vl;
707     return RISCV_EXCP_NONE;
708 }
709 
read_vlenb(CPURISCVState * env,int csrno,target_ulong * val)710 static RISCVException read_vlenb(CPURISCVState *env, int csrno,
711                                  target_ulong *val)
712 {
713     *val = riscv_cpu_cfg(env)->vlenb;
714     return RISCV_EXCP_NONE;
715 }
716 
read_vxrm(CPURISCVState * env,int csrno,target_ulong * val)717 static RISCVException read_vxrm(CPURISCVState *env, int csrno,
718                                 target_ulong *val)
719 {
720     *val = env->vxrm;
721     return RISCV_EXCP_NONE;
722 }
723 
write_vxrm(CPURISCVState * env,int csrno,target_ulong val)724 static RISCVException write_vxrm(CPURISCVState *env, int csrno,
725                                  target_ulong val)
726 {
727 #if !defined(CONFIG_USER_ONLY)
728     env->mstatus |= MSTATUS_VS;
729 #endif
730     env->vxrm = val;
731     return RISCV_EXCP_NONE;
732 }
733 
read_vxsat(CPURISCVState * env,int csrno,target_ulong * val)734 static RISCVException read_vxsat(CPURISCVState *env, int csrno,
735                                  target_ulong *val)
736 {
737     *val = env->vxsat;
738     return RISCV_EXCP_NONE;
739 }
740 
write_vxsat(CPURISCVState * env,int csrno,target_ulong val)741 static RISCVException write_vxsat(CPURISCVState *env, int csrno,
742                                   target_ulong val)
743 {
744 #if !defined(CONFIG_USER_ONLY)
745     env->mstatus |= MSTATUS_VS;
746 #endif
747     env->vxsat = val;
748     return RISCV_EXCP_NONE;
749 }
750 
read_vstart(CPURISCVState * env,int csrno,target_ulong * val)751 static RISCVException read_vstart(CPURISCVState *env, int csrno,
752                                   target_ulong *val)
753 {
754     *val = env->vstart;
755     return RISCV_EXCP_NONE;
756 }
757 
write_vstart(CPURISCVState * env,int csrno,target_ulong val)758 static RISCVException write_vstart(CPURISCVState *env, int csrno,
759                                    target_ulong val)
760 {
761 #if !defined(CONFIG_USER_ONLY)
762     env->mstatus |= MSTATUS_VS;
763 #endif
764     /*
765      * The vstart CSR is defined to have only enough writable bits
766      * to hold the largest element index, i.e. lg2(VLEN) bits.
767      */
768     env->vstart = val & ~(~0ULL << ctzl(riscv_cpu_cfg(env)->vlenb << 3));
769     return RISCV_EXCP_NONE;
770 }
771 
read_vcsr(CPURISCVState * env,int csrno,target_ulong * val)772 static RISCVException read_vcsr(CPURISCVState *env, int csrno,
773                                 target_ulong *val)
774 {
775     *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT);
776     return RISCV_EXCP_NONE;
777 }
778 
write_vcsr(CPURISCVState * env,int csrno,target_ulong val)779 static RISCVException write_vcsr(CPURISCVState *env, int csrno,
780                                  target_ulong val)
781 {
782 #if !defined(CONFIG_USER_ONLY)
783     env->mstatus |= MSTATUS_VS;
784 #endif
785     env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT;
786     env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT;
787     return RISCV_EXCP_NONE;
788 }
789 
790 #if defined(CONFIG_USER_ONLY)
791 /* User Timers and Counters */
get_ticks(bool shift)792 static target_ulong get_ticks(bool shift)
793 {
794     int64_t val = cpu_get_host_ticks();
795     target_ulong result = shift ? val >> 32 : val;
796 
797     return result;
798 }
799 
read_time(CPURISCVState * env,int csrno,target_ulong * val)800 static RISCVException read_time(CPURISCVState *env, int csrno,
801                                 target_ulong *val)
802 {
803     *val = cpu_get_host_ticks();
804     return RISCV_EXCP_NONE;
805 }
806 
read_timeh(CPURISCVState * env,int csrno,target_ulong * val)807 static RISCVException read_timeh(CPURISCVState *env, int csrno,
808                                  target_ulong *val)
809 {
810     *val = cpu_get_host_ticks() >> 32;
811     return RISCV_EXCP_NONE;
812 }
813 
read_hpmcounter(CPURISCVState * env,int csrno,target_ulong * val)814 static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
815                                       target_ulong *val)
816 {
817     *val = get_ticks(false);
818     return RISCV_EXCP_NONE;
819 }
820 
read_hpmcounterh(CPURISCVState * env,int csrno,target_ulong * val)821 static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
822                                        target_ulong *val)
823 {
824     *val = get_ticks(true);
825     return RISCV_EXCP_NONE;
826 }
827 
828 #else /* CONFIG_USER_ONLY */
829 
read_mcyclecfg(CPURISCVState * env,int csrno,target_ulong * val)830 static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno,
831                                      target_ulong *val)
832 {
833     *val = env->mcyclecfg;
834     return RISCV_EXCP_NONE;
835 }
836 
write_mcyclecfg(CPURISCVState * env,int csrno,target_ulong val)837 static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno,
838                                       target_ulong val)
839 {
840     uint64_t inh_avail_mask;
841 
842     if (riscv_cpu_mxl(env) == MXL_RV32) {
843         env->mcyclecfg = val;
844     } else {
845         /* Set xINH fields if priv mode supported */
846         inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MCYCLECFG_BIT_MINH;
847         inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFG_BIT_UINH : 0;
848         inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFG_BIT_SINH : 0;
849         inh_avail_mask |= (riscv_has_ext(env, RVH) &&
850                            riscv_has_ext(env, RVU)) ? MCYCLECFG_BIT_VUINH : 0;
851         inh_avail_mask |= (riscv_has_ext(env, RVH) &&
852                            riscv_has_ext(env, RVS)) ? MCYCLECFG_BIT_VSINH : 0;
853         env->mcyclecfg = val & inh_avail_mask;
854     }
855 
856     return RISCV_EXCP_NONE;
857 }
858 
read_mcyclecfgh(CPURISCVState * env,int csrno,target_ulong * val)859 static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno,
860                                       target_ulong *val)
861 {
862     *val = env->mcyclecfgh;
863     return RISCV_EXCP_NONE;
864 }
865 
write_mcyclecfgh(CPURISCVState * env,int csrno,target_ulong val)866 static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno,
867                                        target_ulong val)
868 {
869     target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
870                                                  MCYCLECFGH_BIT_MINH);
871 
872     /* Set xINH fields if priv mode supported */
873     inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFGH_BIT_UINH : 0;
874     inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFGH_BIT_SINH : 0;
875     inh_avail_mask |= (riscv_has_ext(env, RVH) &&
876                        riscv_has_ext(env, RVU)) ? MCYCLECFGH_BIT_VUINH : 0;
877     inh_avail_mask |= (riscv_has_ext(env, RVH) &&
878                        riscv_has_ext(env, RVS)) ? MCYCLECFGH_BIT_VSINH : 0;
879 
880     env->mcyclecfgh = val & inh_avail_mask;
881     return RISCV_EXCP_NONE;
882 }
883 
read_minstretcfg(CPURISCVState * env,int csrno,target_ulong * val)884 static RISCVException read_minstretcfg(CPURISCVState *env, int csrno,
885                                        target_ulong *val)
886 {
887     *val = env->minstretcfg;
888     return RISCV_EXCP_NONE;
889 }
890 
write_minstretcfg(CPURISCVState * env,int csrno,target_ulong val)891 static RISCVException write_minstretcfg(CPURISCVState *env, int csrno,
892                                         target_ulong val)
893 {
894     uint64_t inh_avail_mask;
895 
896     if (riscv_cpu_mxl(env) == MXL_RV32) {
897         env->minstretcfg = val;
898     } else {
899         inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MINSTRETCFG_BIT_MINH;
900         inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFG_BIT_UINH : 0;
901         inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFG_BIT_SINH : 0;
902         inh_avail_mask |= (riscv_has_ext(env, RVH) &&
903                            riscv_has_ext(env, RVU)) ? MINSTRETCFG_BIT_VUINH : 0;
904         inh_avail_mask |= (riscv_has_ext(env, RVH) &&
905                            riscv_has_ext(env, RVS)) ? MINSTRETCFG_BIT_VSINH : 0;
906         env->minstretcfg = val & inh_avail_mask;
907     }
908     return RISCV_EXCP_NONE;
909 }
910 
read_minstretcfgh(CPURISCVState * env,int csrno,target_ulong * val)911 static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno,
912                                         target_ulong *val)
913 {
914     *val = env->minstretcfgh;
915     return RISCV_EXCP_NONE;
916 }
917 
write_minstretcfgh(CPURISCVState * env,int csrno,target_ulong val)918 static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno,
919                                          target_ulong val)
920 {
921     target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
922                                                  MINSTRETCFGH_BIT_MINH);
923 
924     inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFGH_BIT_UINH : 0;
925     inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFGH_BIT_SINH : 0;
926     inh_avail_mask |= (riscv_has_ext(env, RVH) &&
927                        riscv_has_ext(env, RVU)) ? MINSTRETCFGH_BIT_VUINH : 0;
928     inh_avail_mask |= (riscv_has_ext(env, RVH) &&
929                        riscv_has_ext(env, RVS)) ? MINSTRETCFGH_BIT_VSINH : 0;
930 
931     env->minstretcfgh = val & inh_avail_mask;
932     return RISCV_EXCP_NONE;
933 }
934 
read_mhpmevent(CPURISCVState * env,int csrno,target_ulong * val)935 static RISCVException read_mhpmevent(CPURISCVState *env, int csrno,
936                                      target_ulong *val)
937 {
938     int evt_index = csrno - CSR_MCOUNTINHIBIT;
939 
940     *val = env->mhpmevent_val[evt_index];
941 
942     return RISCV_EXCP_NONE;
943 }
944 
write_mhpmevent(CPURISCVState * env,int csrno,target_ulong val)945 static RISCVException write_mhpmevent(CPURISCVState *env, int csrno,
946                                       target_ulong val)
947 {
948     int evt_index = csrno - CSR_MCOUNTINHIBIT;
949     uint64_t mhpmevt_val = val;
950     uint64_t inh_avail_mask;
951 
952     if (riscv_cpu_mxl(env) == MXL_RV32) {
953         env->mhpmevent_val[evt_index] = val;
954         mhpmevt_val = mhpmevt_val |
955                       ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
956     } else {
957         inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MHPMEVENT_BIT_MINH;
958         inh_avail_mask |= riscv_has_ext(env, RVU) ? MHPMEVENT_BIT_UINH : 0;
959         inh_avail_mask |= riscv_has_ext(env, RVS) ? MHPMEVENT_BIT_SINH : 0;
960         inh_avail_mask |= (riscv_has_ext(env, RVH) &&
961                            riscv_has_ext(env, RVU)) ? MHPMEVENT_BIT_VUINH : 0;
962         inh_avail_mask |= (riscv_has_ext(env, RVH) &&
963                            riscv_has_ext(env, RVS)) ? MHPMEVENT_BIT_VSINH : 0;
964         mhpmevt_val = val & inh_avail_mask;
965         env->mhpmevent_val[evt_index] = mhpmevt_val;
966     }
967 
968     riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
969 
970     return RISCV_EXCP_NONE;
971 }
972 
read_mhpmeventh(CPURISCVState * env,int csrno,target_ulong * val)973 static RISCVException read_mhpmeventh(CPURISCVState *env, int csrno,
974                                       target_ulong *val)
975 {
976     int evt_index = csrno - CSR_MHPMEVENT3H + 3;
977 
978     *val = env->mhpmeventh_val[evt_index];
979 
980     return RISCV_EXCP_NONE;
981 }
982 
write_mhpmeventh(CPURISCVState * env,int csrno,target_ulong val)983 static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno,
984                                        target_ulong val)
985 {
986     int evt_index = csrno - CSR_MHPMEVENT3H + 3;
987     uint64_t mhpmevth_val;
988     uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
989     target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
990                                                   MHPMEVENTH_BIT_MINH);
991 
992     inh_avail_mask |= riscv_has_ext(env, RVU) ? MHPMEVENTH_BIT_UINH : 0;
993     inh_avail_mask |= riscv_has_ext(env, RVS) ? MHPMEVENTH_BIT_SINH : 0;
994     inh_avail_mask |= (riscv_has_ext(env, RVH) &&
995                        riscv_has_ext(env, RVU)) ? MHPMEVENTH_BIT_VUINH : 0;
996     inh_avail_mask |= (riscv_has_ext(env, RVH) &&
997                        riscv_has_ext(env, RVS)) ? MHPMEVENTH_BIT_VSINH : 0;
998 
999     mhpmevth_val = val & inh_avail_mask;
1000     mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
1001     env->mhpmeventh_val[evt_index] = mhpmevth_val;
1002 
1003     riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1004 
1005     return RISCV_EXCP_NONE;
1006 }
1007 
riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState * env,int counter_idx,bool upper_half)1008 static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env,
1009                                                          int counter_idx,
1010                                                          bool upper_half)
1011 {
1012     int inst = riscv_pmu_ctr_monitor_instructions(env, counter_idx);
1013     uint64_t *counter_arr_virt = env->pmu_fixed_ctrs[inst].counter_virt;
1014     uint64_t *counter_arr = env->pmu_fixed_ctrs[inst].counter;
1015     target_ulong result = 0;
1016     uint64_t curr_val = 0;
1017     uint64_t cfg_val = 0;
1018 
1019     if (counter_idx == 0) {
1020         cfg_val = upper_half ? ((uint64_t)env->mcyclecfgh << 32) :
1021                   env->mcyclecfg;
1022     } else if (counter_idx == 2) {
1023         cfg_val = upper_half ? ((uint64_t)env->minstretcfgh << 32) :
1024                   env->minstretcfg;
1025     } else {
1026         cfg_val = upper_half ?
1027                   ((uint64_t)env->mhpmeventh_val[counter_idx] << 32) :
1028                   env->mhpmevent_val[counter_idx];
1029         cfg_val &= MHPMEVENT_FILTER_MASK;
1030     }
1031 
1032     if (!cfg_val) {
1033         if (icount_enabled()) {
1034                 curr_val = inst ? icount_get_raw() : icount_get();
1035         } else {
1036             curr_val = cpu_get_host_ticks();
1037         }
1038 
1039         goto done;
1040     }
1041 
1042     /* Update counter before reading. */
1043     riscv_pmu_update_fixed_ctrs(env, env->priv, env->virt_enabled);
1044 
1045     if (!(cfg_val & MCYCLECFG_BIT_MINH)) {
1046         curr_val += counter_arr[PRV_M];
1047     }
1048 
1049     if (!(cfg_val & MCYCLECFG_BIT_SINH)) {
1050         curr_val += counter_arr[PRV_S];
1051     }
1052 
1053     if (!(cfg_val & MCYCLECFG_BIT_UINH)) {
1054         curr_val += counter_arr[PRV_U];
1055     }
1056 
1057     if (!(cfg_val & MCYCLECFG_BIT_VSINH)) {
1058         curr_val += counter_arr_virt[PRV_S];
1059     }
1060 
1061     if (!(cfg_val & MCYCLECFG_BIT_VUINH)) {
1062         curr_val += counter_arr_virt[PRV_U];
1063     }
1064 
1065 done:
1066     if (riscv_cpu_mxl(env) == MXL_RV32) {
1067         result = upper_half ? curr_val >> 32 : curr_val;
1068     } else {
1069         result = curr_val;
1070     }
1071 
1072     return result;
1073 }
1074 
write_mhpmcounter(CPURISCVState * env,int csrno,target_ulong val)1075 static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
1076                                         target_ulong val)
1077 {
1078     int ctr_idx = csrno - CSR_MCYCLE;
1079     PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1080     uint64_t mhpmctr_val = val;
1081 
1082     counter->mhpmcounter_val = val;
1083     if (!get_field(env->mcountinhibit, BIT(ctr_idx)) &&
1084         (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1085          riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) {
1086         counter->mhpmcounter_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
1087                                                                 ctr_idx, false);
1088         if (ctr_idx > 2) {
1089             if (riscv_cpu_mxl(env) == MXL_RV32) {
1090                 mhpmctr_val = mhpmctr_val |
1091                               ((uint64_t)counter->mhpmcounterh_val << 32);
1092             }
1093             riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
1094         }
1095      } else {
1096         /* Other counters can keep incrementing from the given value */
1097         counter->mhpmcounter_prev = val;
1098     }
1099 
1100     return RISCV_EXCP_NONE;
1101 }
1102 
write_mhpmcounterh(CPURISCVState * env,int csrno,target_ulong val)1103 static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
1104                                          target_ulong val)
1105 {
1106     int ctr_idx = csrno - CSR_MCYCLEH;
1107     PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1108     uint64_t mhpmctr_val = counter->mhpmcounter_val;
1109     uint64_t mhpmctrh_val = val;
1110 
1111     counter->mhpmcounterh_val = val;
1112     mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
1113     if (!get_field(env->mcountinhibit, BIT(ctr_idx)) &&
1114         (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1115          riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) {
1116         counter->mhpmcounterh_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
1117                                                                  ctr_idx, true);
1118         if (ctr_idx > 2) {
1119             riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
1120         }
1121     } else {
1122         counter->mhpmcounterh_prev = val;
1123     }
1124 
1125     return RISCV_EXCP_NONE;
1126 }
1127 
riscv_pmu_read_ctr(CPURISCVState * env,target_ulong * val,bool upper_half,uint32_t ctr_idx)1128 RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
1129                                          bool upper_half, uint32_t ctr_idx)
1130 {
1131     PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1132     target_ulong ctr_prev = upper_half ? counter->mhpmcounterh_prev :
1133                                          counter->mhpmcounter_prev;
1134     target_ulong ctr_val = upper_half ? counter->mhpmcounterh_val :
1135                                         counter->mhpmcounter_val;
1136 
1137     if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
1138         /*
1139          * Counter should not increment if inhibit bit is set. Just return the
1140          * current counter value.
1141          */
1142          *val = ctr_val;
1143          return RISCV_EXCP_NONE;
1144     }
1145 
1146     /*
1147      * The kernel computes the perf delta by subtracting the current value from
1148      * the value it initialized previously (ctr_val).
1149      */
1150     if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1151         riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
1152         *val = riscv_pmu_ctr_get_fixed_counters_val(env, ctr_idx, upper_half) -
1153                                                     ctr_prev + ctr_val;
1154     } else {
1155         *val = ctr_val;
1156     }
1157 
1158     return RISCV_EXCP_NONE;
1159 }
1160 
read_hpmcounter(CPURISCVState * env,int csrno,target_ulong * val)1161 static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
1162                                       target_ulong *val)
1163 {
1164     uint16_t ctr_index;
1165 
1166     if (csrno >= CSR_MCYCLE && csrno <= CSR_MHPMCOUNTER31) {
1167         ctr_index = csrno - CSR_MCYCLE;
1168     } else if (csrno >= CSR_CYCLE && csrno <= CSR_HPMCOUNTER31) {
1169         ctr_index = csrno - CSR_CYCLE;
1170     } else {
1171         return RISCV_EXCP_ILLEGAL_INST;
1172     }
1173 
1174     return riscv_pmu_read_ctr(env, val, false, ctr_index);
1175 }
1176 
read_hpmcounterh(CPURISCVState * env,int csrno,target_ulong * val)1177 static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
1178                                        target_ulong *val)
1179 {
1180     uint16_t ctr_index;
1181 
1182     if (csrno >= CSR_MCYCLEH && csrno <= CSR_MHPMCOUNTER31H) {
1183         ctr_index = csrno - CSR_MCYCLEH;
1184     } else if (csrno >= CSR_CYCLEH && csrno <= CSR_HPMCOUNTER31H) {
1185         ctr_index = csrno - CSR_CYCLEH;
1186     } else {
1187         return RISCV_EXCP_ILLEGAL_INST;
1188     }
1189 
1190     return riscv_pmu_read_ctr(env, val, true, ctr_index);
1191 }
1192 
read_scountovf(CPURISCVState * env,int csrno,target_ulong * val)1193 static RISCVException read_scountovf(CPURISCVState *env, int csrno,
1194                                      target_ulong *val)
1195 {
1196     int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT;
1197     int i;
1198     *val = 0;
1199     target_ulong *mhpm_evt_val;
1200     uint64_t of_bit_mask;
1201 
1202     if (riscv_cpu_mxl(env) == MXL_RV32) {
1203         mhpm_evt_val = env->mhpmeventh_val;
1204         of_bit_mask = MHPMEVENTH_BIT_OF;
1205     } else {
1206         mhpm_evt_val = env->mhpmevent_val;
1207         of_bit_mask = MHPMEVENT_BIT_OF;
1208     }
1209 
1210     for (i = mhpmevt_start; i < RV_MAX_MHPMEVENTS; i++) {
1211         if ((get_field(env->mcounteren, BIT(i))) &&
1212             (mhpm_evt_val[i] & of_bit_mask)) {
1213                     *val |= BIT(i);
1214             }
1215     }
1216 
1217     return RISCV_EXCP_NONE;
1218 }
1219 
read_time(CPURISCVState * env,int csrno,target_ulong * val)1220 static RISCVException read_time(CPURISCVState *env, int csrno,
1221                                 target_ulong *val)
1222 {
1223     uint64_t delta = env->virt_enabled ? env->htimedelta : 0;
1224 
1225     if (!env->rdtime_fn) {
1226         return RISCV_EXCP_ILLEGAL_INST;
1227     }
1228 
1229     *val = env->rdtime_fn(env->rdtime_fn_arg) + delta;
1230     return RISCV_EXCP_NONE;
1231 }
1232 
read_timeh(CPURISCVState * env,int csrno,target_ulong * val)1233 static RISCVException read_timeh(CPURISCVState *env, int csrno,
1234                                  target_ulong *val)
1235 {
1236     uint64_t delta = env->virt_enabled ? env->htimedelta : 0;
1237 
1238     if (!env->rdtime_fn) {
1239         return RISCV_EXCP_ILLEGAL_INST;
1240     }
1241 
1242     *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32;
1243     return RISCV_EXCP_NONE;
1244 }
1245 
read_vstimecmp(CPURISCVState * env,int csrno,target_ulong * val)1246 static RISCVException read_vstimecmp(CPURISCVState *env, int csrno,
1247                                      target_ulong *val)
1248 {
1249     *val = env->vstimecmp;
1250 
1251     return RISCV_EXCP_NONE;
1252 }
1253 
read_vstimecmph(CPURISCVState * env,int csrno,target_ulong * val)1254 static RISCVException read_vstimecmph(CPURISCVState *env, int csrno,
1255                                       target_ulong *val)
1256 {
1257     *val = env->vstimecmp >> 32;
1258 
1259     return RISCV_EXCP_NONE;
1260 }
1261 
write_vstimecmp(CPURISCVState * env,int csrno,target_ulong val)1262 static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
1263                                       target_ulong val)
1264 {
1265     if (riscv_cpu_mxl(env) == MXL_RV32) {
1266         env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val);
1267     } else {
1268         env->vstimecmp = val;
1269     }
1270 
1271     riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
1272                               env->htimedelta, MIP_VSTIP);
1273 
1274     return RISCV_EXCP_NONE;
1275 }
1276 
write_vstimecmph(CPURISCVState * env,int csrno,target_ulong val)1277 static RISCVException write_vstimecmph(CPURISCVState *env, int csrno,
1278                                        target_ulong val)
1279 {
1280     env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val);
1281     riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
1282                               env->htimedelta, MIP_VSTIP);
1283 
1284     return RISCV_EXCP_NONE;
1285 }
1286 
read_stimecmp(CPURISCVState * env,int csrno,target_ulong * val)1287 static RISCVException read_stimecmp(CPURISCVState *env, int csrno,
1288                                     target_ulong *val)
1289 {
1290     if (env->virt_enabled) {
1291         *val = env->vstimecmp;
1292     } else {
1293         *val = env->stimecmp;
1294     }
1295 
1296     return RISCV_EXCP_NONE;
1297 }
1298 
read_stimecmph(CPURISCVState * env,int csrno,target_ulong * val)1299 static RISCVException read_stimecmph(CPURISCVState *env, int csrno,
1300                                      target_ulong *val)
1301 {
1302     if (env->virt_enabled) {
1303         *val = env->vstimecmp >> 32;
1304     } else {
1305         *val = env->stimecmp >> 32;
1306     }
1307 
1308     return RISCV_EXCP_NONE;
1309 }
1310 
write_stimecmp(CPURISCVState * env,int csrno,target_ulong val)1311 static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
1312                                      target_ulong val)
1313 {
1314     if (env->virt_enabled) {
1315         if (env->hvictl & HVICTL_VTI) {
1316             return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1317         }
1318         return write_vstimecmp(env, csrno, val);
1319     }
1320 
1321     if (riscv_cpu_mxl(env) == MXL_RV32) {
1322         env->stimecmp = deposit64(env->stimecmp, 0, 32, (uint64_t)val);
1323     } else {
1324         env->stimecmp = val;
1325     }
1326 
1327     riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP);
1328 
1329     return RISCV_EXCP_NONE;
1330 }
1331 
write_stimecmph(CPURISCVState * env,int csrno,target_ulong val)1332 static RISCVException write_stimecmph(CPURISCVState *env, int csrno,
1333                                       target_ulong val)
1334 {
1335     if (env->virt_enabled) {
1336         if (env->hvictl & HVICTL_VTI) {
1337             return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1338         }
1339         return write_vstimecmph(env, csrno, val);
1340     }
1341 
1342     env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val);
1343     riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP);
1344 
1345     return RISCV_EXCP_NONE;
1346 }
1347 
1348 #define VSTOPI_NUM_SRCS 5
1349 
1350 /*
1351  * All core local interrupts except the fixed ones 0:12. This macro is for
1352  * virtual interrupts logic so please don't change this to avoid messing up
1353  * the whole support, For reference see AIA spec: `5.3 Interrupt filtering and
1354  * virtual interrupts for supervisor level` and `6.3.2 Virtual interrupts for
1355  * VS level`.
1356  */
1357 #define LOCAL_INTERRUPTS   (~0x1FFFULL)
1358 
1359 static const uint64_t delegable_ints =
1360     S_MODE_INTERRUPTS | VS_MODE_INTERRUPTS | MIP_LCOFIP;
1361 static const uint64_t vs_delegable_ints =
1362     (VS_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & ~MIP_LCOFIP;
1363 static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
1364                                      HS_MODE_INTERRUPTS | LOCAL_INTERRUPTS;
1365 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
1366                          (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
1367                          (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
1368                          (1ULL << (RISCV_EXCP_BREAKPOINT)) | \
1369                          (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \
1370                          (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \
1371                          (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \
1372                          (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \
1373                          (1ULL << (RISCV_EXCP_U_ECALL)) | \
1374                          (1ULL << (RISCV_EXCP_S_ECALL)) | \
1375                          (1ULL << (RISCV_EXCP_VS_ECALL)) | \
1376                          (1ULL << (RISCV_EXCP_M_ECALL)) | \
1377                          (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
1378                          (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
1379                          (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
1380                          (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
1381                          (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
1382                          (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
1383                          (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)))
1384 static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
1385     ~((1ULL << (RISCV_EXCP_S_ECALL)) |
1386       (1ULL << (RISCV_EXCP_VS_ECALL)) |
1387       (1ULL << (RISCV_EXCP_M_ECALL)) |
1388       (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) |
1389       (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) |
1390       (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) |
1391       (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)));
1392 static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
1393     SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
1394     SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
1395 
1396 /*
1397  * Spec allows for bits 13:63 to be either read-only or writable.
1398  * So far we have interrupt LCOFIP in that region which is writable.
1399  *
1400  * Also, spec allows to inject virtual interrupts in this region even
1401  * without any hardware interrupts for that interrupt number.
1402  *
1403  * For now interrupt in 13:63 region are all kept writable. 13 being
1404  * LCOFIP and 14:63 being virtual only. Change this in future if we
1405  * introduce more interrupts that are not writable.
1406  */
1407 
1408 /* Bit STIP can be an alias of mip.STIP that's why it's writable in mvip. */
1409 static const uint64_t mvip_writable_mask = MIP_SSIP | MIP_STIP | MIP_SEIP |
1410                                     LOCAL_INTERRUPTS;
1411 static const uint64_t mvien_writable_mask = MIP_SSIP | MIP_SEIP |
1412                                     LOCAL_INTERRUPTS;
1413 
1414 static const uint64_t sip_writable_mask = SIP_SSIP | LOCAL_INTERRUPTS;
1415 static const uint64_t hip_writable_mask = MIP_VSSIP;
1416 static const uint64_t hvip_writable_mask = MIP_VSSIP | MIP_VSTIP |
1417                                     MIP_VSEIP | LOCAL_INTERRUPTS;
1418 static const uint64_t hvien_writable_mask = LOCAL_INTERRUPTS;
1419 
1420 static const uint64_t vsip_writable_mask = MIP_VSSIP | LOCAL_INTERRUPTS;
1421 
1422 const bool valid_vm_1_10_32[16] = {
1423     [VM_1_10_MBARE] = true,
1424     [VM_1_10_SV32] = true
1425 };
1426 
1427 const bool valid_vm_1_10_64[16] = {
1428     [VM_1_10_MBARE] = true,
1429     [VM_1_10_SV39] = true,
1430     [VM_1_10_SV48] = true,
1431     [VM_1_10_SV57] = true
1432 };
1433 
1434 /* Machine Information Registers */
read_zero(CPURISCVState * env,int csrno,target_ulong * val)1435 static RISCVException read_zero(CPURISCVState *env, int csrno,
1436                                 target_ulong *val)
1437 {
1438     *val = 0;
1439     return RISCV_EXCP_NONE;
1440 }
1441 
write_ignore(CPURISCVState * env,int csrno,target_ulong val)1442 static RISCVException write_ignore(CPURISCVState *env, int csrno,
1443                                    target_ulong val)
1444 {
1445     return RISCV_EXCP_NONE;
1446 }
1447 
read_mvendorid(CPURISCVState * env,int csrno,target_ulong * val)1448 static RISCVException read_mvendorid(CPURISCVState *env, int csrno,
1449                                      target_ulong *val)
1450 {
1451     *val = riscv_cpu_cfg(env)->mvendorid;
1452     return RISCV_EXCP_NONE;
1453 }
1454 
read_marchid(CPURISCVState * env,int csrno,target_ulong * val)1455 static RISCVException read_marchid(CPURISCVState *env, int csrno,
1456                                    target_ulong *val)
1457 {
1458     *val = riscv_cpu_cfg(env)->marchid;
1459     return RISCV_EXCP_NONE;
1460 }
1461 
read_mimpid(CPURISCVState * env,int csrno,target_ulong * val)1462 static RISCVException read_mimpid(CPURISCVState *env, int csrno,
1463                                   target_ulong *val)
1464 {
1465     *val = riscv_cpu_cfg(env)->mimpid;
1466     return RISCV_EXCP_NONE;
1467 }
1468 
read_mhartid(CPURISCVState * env,int csrno,target_ulong * val)1469 static RISCVException read_mhartid(CPURISCVState *env, int csrno,
1470                                    target_ulong *val)
1471 {
1472     *val = env->mhartid;
1473     return RISCV_EXCP_NONE;
1474 }
1475 
1476 /* Machine Trap Setup */
1477 
1478 /* We do not store SD explicitly, only compute it on demand. */
add_status_sd(RISCVMXL xl,uint64_t status)1479 static uint64_t add_status_sd(RISCVMXL xl, uint64_t status)
1480 {
1481     if ((status & MSTATUS_FS) == MSTATUS_FS ||
1482         (status & MSTATUS_VS) == MSTATUS_VS ||
1483         (status & MSTATUS_XS) == MSTATUS_XS) {
1484         switch (xl) {
1485         case MXL_RV32:
1486             return status | MSTATUS32_SD;
1487         case MXL_RV64:
1488             return status | MSTATUS64_SD;
1489         case MXL_RV128:
1490             return MSTATUSH128_SD;
1491         default:
1492             g_assert_not_reached();
1493         }
1494     }
1495     return status;
1496 }
1497 
read_mstatus(CPURISCVState * env,int csrno,target_ulong * val)1498 static RISCVException read_mstatus(CPURISCVState *env, int csrno,
1499                                    target_ulong *val)
1500 {
1501     *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus);
1502     return RISCV_EXCP_NONE;
1503 }
1504 
validate_vm(CPURISCVState * env,target_ulong vm)1505 static bool validate_vm(CPURISCVState *env, target_ulong vm)
1506 {
1507     uint64_t mode_supported = riscv_cpu_cfg(env)->satp_mode.map;
1508     return get_field(mode_supported, (1 << vm));
1509 }
1510 
legalize_xatp(CPURISCVState * env,target_ulong old_xatp,target_ulong val)1511 static target_ulong legalize_xatp(CPURISCVState *env, target_ulong old_xatp,
1512                                   target_ulong val)
1513 {
1514     target_ulong mask;
1515     bool vm;
1516     if (riscv_cpu_mxl(env) == MXL_RV32) {
1517         vm = validate_vm(env, get_field(val, SATP32_MODE));
1518         mask = (val ^ old_xatp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN);
1519     } else {
1520         vm = validate_vm(env, get_field(val, SATP64_MODE));
1521         mask = (val ^ old_xatp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN);
1522     }
1523 
1524     if (vm && mask) {
1525         /*
1526          * The ISA defines SATP.MODE=Bare as "no translation", but we still
1527          * pass these through QEMU's TLB emulation as it improves
1528          * performance.  Flushing the TLB on SATP writes with paging
1529          * enabled avoids leaking those invalid cached mappings.
1530          */
1531         tlb_flush(env_cpu(env));
1532         return val;
1533     }
1534     return old_xatp;
1535 }
1536 
legalize_mpp(CPURISCVState * env,target_ulong old_mpp,target_ulong val)1537 static target_ulong legalize_mpp(CPURISCVState *env, target_ulong old_mpp,
1538                                  target_ulong val)
1539 {
1540     bool valid = false;
1541     target_ulong new_mpp = get_field(val, MSTATUS_MPP);
1542 
1543     switch (new_mpp) {
1544     case PRV_M:
1545         valid = true;
1546         break;
1547     case PRV_S:
1548         valid = riscv_has_ext(env, RVS);
1549         break;
1550     case PRV_U:
1551         valid = riscv_has_ext(env, RVU);
1552         break;
1553     }
1554 
1555     /* Remain field unchanged if new_mpp value is invalid */
1556     if (!valid) {
1557         val = set_field(val, MSTATUS_MPP, old_mpp);
1558     }
1559 
1560     return val;
1561 }
1562 
write_mstatus(CPURISCVState * env,int csrno,target_ulong val)1563 static RISCVException write_mstatus(CPURISCVState *env, int csrno,
1564                                     target_ulong val)
1565 {
1566     uint64_t mstatus = env->mstatus;
1567     uint64_t mask = 0;
1568     RISCVMXL xl = riscv_cpu_mxl(env);
1569 
1570     /*
1571      * MPP field have been made WARL since priv version 1.11. However,
1572      * legalization for it will not break any software running on 1.10.
1573      */
1574     val = legalize_mpp(env, get_field(mstatus, MSTATUS_MPP), val);
1575 
1576     /* flush tlb on mstatus fields that affect VM */
1577     if ((val ^ mstatus) & MSTATUS_MXR) {
1578         tlb_flush(env_cpu(env));
1579     }
1580     mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
1581         MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM |
1582         MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
1583         MSTATUS_TW;
1584 
1585     if (riscv_has_ext(env, RVF)) {
1586         mask |= MSTATUS_FS;
1587     }
1588     if (riscv_has_ext(env, RVV)) {
1589         mask |= MSTATUS_VS;
1590     }
1591 
1592     if (xl != MXL_RV32 || env->debugger) {
1593         if (riscv_has_ext(env, RVH)) {
1594             mask |= MSTATUS_MPV | MSTATUS_GVA;
1595         }
1596         if ((val & MSTATUS64_UXL) != 0) {
1597             mask |= MSTATUS64_UXL;
1598         }
1599     }
1600 
1601     mstatus = (mstatus & ~mask) | (val & mask);
1602 
1603     env->mstatus = mstatus;
1604 
1605     /*
1606      * Except in debug mode, UXL/SXL can only be modified by higher
1607      * privilege mode. So xl will not be changed in normal mode.
1608      */
1609     if (env->debugger) {
1610         env->xl = cpu_recompute_xl(env);
1611     }
1612 
1613     riscv_cpu_update_mask(env);
1614     return RISCV_EXCP_NONE;
1615 }
1616 
read_mstatush(CPURISCVState * env,int csrno,target_ulong * val)1617 static RISCVException read_mstatush(CPURISCVState *env, int csrno,
1618                                     target_ulong *val)
1619 {
1620     *val = env->mstatus >> 32;
1621     return RISCV_EXCP_NONE;
1622 }
1623 
write_mstatush(CPURISCVState * env,int csrno,target_ulong val)1624 static RISCVException write_mstatush(CPURISCVState *env, int csrno,
1625                                      target_ulong val)
1626 {
1627     uint64_t valh = (uint64_t)val << 32;
1628     uint64_t mask = riscv_has_ext(env, RVH) ? MSTATUS_MPV | MSTATUS_GVA : 0;
1629 
1630     env->mstatus = (env->mstatus & ~mask) | (valh & mask);
1631 
1632     return RISCV_EXCP_NONE;
1633 }
1634 
read_mstatus_i128(CPURISCVState * env,int csrno,Int128 * val)1635 static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno,
1636                                         Int128 *val)
1637 {
1638     *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128,
1639                                                       env->mstatus));
1640     return RISCV_EXCP_NONE;
1641 }
1642 
read_misa_i128(CPURISCVState * env,int csrno,Int128 * val)1643 static RISCVException read_misa_i128(CPURISCVState *env, int csrno,
1644                                      Int128 *val)
1645 {
1646     *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62);
1647     return RISCV_EXCP_NONE;
1648 }
1649 
read_misa(CPURISCVState * env,int csrno,target_ulong * val)1650 static RISCVException read_misa(CPURISCVState *env, int csrno,
1651                                 target_ulong *val)
1652 {
1653     target_ulong misa;
1654 
1655     switch (env->misa_mxl) {
1656     case MXL_RV32:
1657         misa = (target_ulong)MXL_RV32 << 30;
1658         break;
1659 #ifdef TARGET_RISCV64
1660     case MXL_RV64:
1661         misa = (target_ulong)MXL_RV64 << 62;
1662         break;
1663 #endif
1664     default:
1665         g_assert_not_reached();
1666     }
1667 
1668     *val = misa | env->misa_ext;
1669     return RISCV_EXCP_NONE;
1670 }
1671 
write_misa(CPURISCVState * env,int csrno,target_ulong val)1672 static RISCVException write_misa(CPURISCVState *env, int csrno,
1673                                  target_ulong val)
1674 {
1675     RISCVCPU *cpu = env_archcpu(env);
1676     uint32_t orig_misa_ext = env->misa_ext;
1677     Error *local_err = NULL;
1678 
1679     if (!riscv_cpu_cfg(env)->misa_w) {
1680         /* drop write to misa */
1681         return RISCV_EXCP_NONE;
1682     }
1683 
1684     /* Mask extensions that are not supported by this hart */
1685     val &= env->misa_ext_mask;
1686 
1687     /*
1688      * Suppress 'C' if next instruction is not aligned
1689      * TODO: this should check next_pc
1690      */
1691     if ((val & RVC) && (GETPC() & ~3) != 0) {
1692         val &= ~RVC;
1693     }
1694 
1695     /* Disable RVG if any of its dependencies are disabled */
1696     if (!(val & RVI && val & RVM && val & RVA &&
1697           val & RVF && val & RVD)) {
1698         val &= ~RVG;
1699     }
1700 
1701     /* If nothing changed, do nothing. */
1702     if (val == env->misa_ext) {
1703         return RISCV_EXCP_NONE;
1704     }
1705 
1706     env->misa_ext = val;
1707     riscv_cpu_validate_set_extensions(cpu, &local_err);
1708     if (local_err != NULL) {
1709         /* Rollback on validation error */
1710         qemu_log_mask(LOG_GUEST_ERROR, "Unable to write MISA ext value "
1711                       "0x%x, keeping existing MISA ext 0x%x\n",
1712                       env->misa_ext, orig_misa_ext);
1713 
1714         env->misa_ext = orig_misa_ext;
1715 
1716         return RISCV_EXCP_NONE;
1717     }
1718 
1719     if (!(env->misa_ext & RVF)) {
1720         env->mstatus &= ~MSTATUS_FS;
1721     }
1722 
1723     /* flush translation cache */
1724     tb_flush(env_cpu(env));
1725     env->xl = riscv_cpu_mxl(env);
1726     return RISCV_EXCP_NONE;
1727 }
1728 
read_medeleg(CPURISCVState * env,int csrno,target_ulong * val)1729 static RISCVException read_medeleg(CPURISCVState *env, int csrno,
1730                                    target_ulong *val)
1731 {
1732     *val = env->medeleg;
1733     return RISCV_EXCP_NONE;
1734 }
1735 
write_medeleg(CPURISCVState * env,int csrno,target_ulong val)1736 static RISCVException write_medeleg(CPURISCVState *env, int csrno,
1737                                     target_ulong val)
1738 {
1739     env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS);
1740     return RISCV_EXCP_NONE;
1741 }
1742 
rmw_mideleg64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)1743 static RISCVException rmw_mideleg64(CPURISCVState *env, int csrno,
1744                                     uint64_t *ret_val,
1745                                     uint64_t new_val, uint64_t wr_mask)
1746 {
1747     uint64_t mask = wr_mask & delegable_ints;
1748 
1749     if (ret_val) {
1750         *ret_val = env->mideleg;
1751     }
1752 
1753     env->mideleg = (env->mideleg & ~mask) | (new_val & mask);
1754 
1755     if (riscv_has_ext(env, RVH)) {
1756         env->mideleg |= HS_MODE_INTERRUPTS;
1757     }
1758 
1759     return RISCV_EXCP_NONE;
1760 }
1761 
rmw_mideleg(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1762 static RISCVException rmw_mideleg(CPURISCVState *env, int csrno,
1763                                   target_ulong *ret_val,
1764                                   target_ulong new_val, target_ulong wr_mask)
1765 {
1766     uint64_t rval;
1767     RISCVException ret;
1768 
1769     ret = rmw_mideleg64(env, csrno, &rval, new_val, wr_mask);
1770     if (ret_val) {
1771         *ret_val = rval;
1772     }
1773 
1774     return ret;
1775 }
1776 
rmw_midelegh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1777 static RISCVException rmw_midelegh(CPURISCVState *env, int csrno,
1778                                    target_ulong *ret_val,
1779                                    target_ulong new_val,
1780                                    target_ulong wr_mask)
1781 {
1782     uint64_t rval;
1783     RISCVException ret;
1784 
1785     ret = rmw_mideleg64(env, csrno, &rval,
1786         ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1787     if (ret_val) {
1788         *ret_val = rval >> 32;
1789     }
1790 
1791     return ret;
1792 }
1793 
rmw_mie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)1794 static RISCVException rmw_mie64(CPURISCVState *env, int csrno,
1795                                 uint64_t *ret_val,
1796                                 uint64_t new_val, uint64_t wr_mask)
1797 {
1798     uint64_t mask = wr_mask & all_ints;
1799 
1800     if (ret_val) {
1801         *ret_val = env->mie;
1802     }
1803 
1804     env->mie = (env->mie & ~mask) | (new_val & mask);
1805 
1806     if (!riscv_has_ext(env, RVH)) {
1807         env->mie &= ~((uint64_t)HS_MODE_INTERRUPTS);
1808     }
1809 
1810     return RISCV_EXCP_NONE;
1811 }
1812 
rmw_mie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1813 static RISCVException rmw_mie(CPURISCVState *env, int csrno,
1814                               target_ulong *ret_val,
1815                               target_ulong new_val, target_ulong wr_mask)
1816 {
1817     uint64_t rval;
1818     RISCVException ret;
1819 
1820     ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask);
1821     if (ret_val) {
1822         *ret_val = rval;
1823     }
1824 
1825     return ret;
1826 }
1827 
rmw_mieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1828 static RISCVException rmw_mieh(CPURISCVState *env, int csrno,
1829                                target_ulong *ret_val,
1830                                target_ulong new_val, target_ulong wr_mask)
1831 {
1832     uint64_t rval;
1833     RISCVException ret;
1834 
1835     ret = rmw_mie64(env, csrno, &rval,
1836         ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1837     if (ret_val) {
1838         *ret_val = rval >> 32;
1839     }
1840 
1841     return ret;
1842 }
1843 
rmw_mvien64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)1844 static RISCVException rmw_mvien64(CPURISCVState *env, int csrno,
1845                                 uint64_t *ret_val,
1846                                 uint64_t new_val, uint64_t wr_mask)
1847 {
1848     uint64_t mask = wr_mask & mvien_writable_mask;
1849 
1850     if (ret_val) {
1851         *ret_val = env->mvien;
1852     }
1853 
1854     env->mvien = (env->mvien & ~mask) | (new_val & mask);
1855 
1856     return RISCV_EXCP_NONE;
1857 }
1858 
rmw_mvien(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1859 static RISCVException rmw_mvien(CPURISCVState *env, int csrno,
1860                               target_ulong *ret_val,
1861                               target_ulong new_val, target_ulong wr_mask)
1862 {
1863     uint64_t rval;
1864     RISCVException ret;
1865 
1866     ret = rmw_mvien64(env, csrno, &rval, new_val, wr_mask);
1867     if (ret_val) {
1868         *ret_val = rval;
1869     }
1870 
1871     return ret;
1872 }
1873 
rmw_mvienh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1874 static RISCVException rmw_mvienh(CPURISCVState *env, int csrno,
1875                                 target_ulong *ret_val,
1876                                 target_ulong new_val, target_ulong wr_mask)
1877 {
1878     uint64_t rval;
1879     RISCVException ret;
1880 
1881     ret = rmw_mvien64(env, csrno, &rval,
1882         ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1883     if (ret_val) {
1884         *ret_val = rval >> 32;
1885     }
1886 
1887     return ret;
1888 }
1889 
read_mtopi(CPURISCVState * env,int csrno,target_ulong * val)1890 static RISCVException read_mtopi(CPURISCVState *env, int csrno,
1891                                  target_ulong *val)
1892 {
1893     int irq;
1894     uint8_t iprio;
1895 
1896     irq = riscv_cpu_mirq_pending(env);
1897     if (irq <= 0 || irq > 63) {
1898         *val = 0;
1899     } else {
1900         iprio = env->miprio[irq];
1901         if (!iprio) {
1902             if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_M) {
1903                 iprio = IPRIO_MMAXIPRIO;
1904             }
1905         }
1906         *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
1907         *val |= iprio;
1908     }
1909 
1910     return RISCV_EXCP_NONE;
1911 }
1912 
aia_xlate_vs_csrno(CPURISCVState * env,int csrno)1913 static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
1914 {
1915     if (!env->virt_enabled) {
1916         return csrno;
1917     }
1918 
1919     switch (csrno) {
1920     case CSR_SISELECT:
1921         return CSR_VSISELECT;
1922     case CSR_SIREG:
1923         return CSR_VSIREG;
1924     case CSR_STOPEI:
1925         return CSR_VSTOPEI;
1926     default:
1927         return csrno;
1928     };
1929 }
1930 
rmw_xiselect(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1931 static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
1932                                    target_ulong *val, target_ulong new_val,
1933                                    target_ulong wr_mask)
1934 {
1935     target_ulong *iselect;
1936 
1937     /* Translate CSR number for VS-mode */
1938     csrno = aia_xlate_vs_csrno(env, csrno);
1939 
1940     /* Find the iselect CSR based on CSR number */
1941     switch (csrno) {
1942     case CSR_MISELECT:
1943         iselect = &env->miselect;
1944         break;
1945     case CSR_SISELECT:
1946         iselect = &env->siselect;
1947         break;
1948     case CSR_VSISELECT:
1949         iselect = &env->vsiselect;
1950         break;
1951     default:
1952          return RISCV_EXCP_ILLEGAL_INST;
1953     };
1954 
1955     if (val) {
1956         *val = *iselect;
1957     }
1958 
1959     wr_mask &= ISELECT_MASK;
1960     if (wr_mask) {
1961         *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask);
1962     }
1963 
1964     return RISCV_EXCP_NONE;
1965 }
1966 
rmw_iprio(target_ulong xlen,target_ulong iselect,uint8_t * iprio,target_ulong * val,target_ulong new_val,target_ulong wr_mask,int ext_irq_no)1967 static int rmw_iprio(target_ulong xlen,
1968                      target_ulong iselect, uint8_t *iprio,
1969                      target_ulong *val, target_ulong new_val,
1970                      target_ulong wr_mask, int ext_irq_no)
1971 {
1972     int i, firq, nirqs;
1973     target_ulong old_val;
1974 
1975     if (iselect < ISELECT_IPRIO0 || ISELECT_IPRIO15 < iselect) {
1976         return -EINVAL;
1977     }
1978     if (xlen != 32 && iselect & 0x1) {
1979         return -EINVAL;
1980     }
1981 
1982     nirqs = 4 * (xlen / 32);
1983     firq = ((iselect - ISELECT_IPRIO0) / (xlen / 32)) * (nirqs);
1984 
1985     old_val = 0;
1986     for (i = 0; i < nirqs; i++) {
1987         old_val |= ((target_ulong)iprio[firq + i]) << (IPRIO_IRQ_BITS * i);
1988     }
1989 
1990     if (val) {
1991         *val = old_val;
1992     }
1993 
1994     if (wr_mask) {
1995         new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
1996         for (i = 0; i < nirqs; i++) {
1997             /*
1998              * M-level and S-level external IRQ priority always read-only
1999              * zero. This means default priority order is always preferred
2000              * for M-level and S-level external IRQs.
2001              */
2002             if ((firq + i) == ext_irq_no) {
2003                 continue;
2004             }
2005             iprio[firq + i] = (new_val >> (IPRIO_IRQ_BITS * i)) & 0xff;
2006         }
2007     }
2008 
2009     return 0;
2010 }
2011 
rmw_xireg(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2012 static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
2013                                 target_ulong *val, target_ulong new_val,
2014                                 target_ulong wr_mask)
2015 {
2016     bool virt, isel_reserved;
2017     uint8_t *iprio;
2018     int ret = -EINVAL;
2019     target_ulong priv, isel, vgein;
2020 
2021     /* Translate CSR number for VS-mode */
2022     csrno = aia_xlate_vs_csrno(env, csrno);
2023 
2024     /* Decode register details from CSR number */
2025     virt = false;
2026     isel_reserved = false;
2027     switch (csrno) {
2028     case CSR_MIREG:
2029         iprio = env->miprio;
2030         isel = env->miselect;
2031         priv = PRV_M;
2032         break;
2033     case CSR_SIREG:
2034         if (env->priv == PRV_S && env->mvien & MIP_SEIP &&
2035             env->siselect >= ISELECT_IMSIC_EIDELIVERY &&
2036             env->siselect <= ISELECT_IMSIC_EIE63) {
2037             goto done;
2038         }
2039         iprio = env->siprio;
2040         isel = env->siselect;
2041         priv = PRV_S;
2042         break;
2043     case CSR_VSIREG:
2044         iprio = env->hviprio;
2045         isel = env->vsiselect;
2046         priv = PRV_S;
2047         virt = true;
2048         break;
2049     default:
2050          goto done;
2051     };
2052 
2053     /* Find the selected guest interrupt file */
2054     vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
2055 
2056     if (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) {
2057         /* Local interrupt priority registers not available for VS-mode */
2058         if (!virt) {
2059             ret = rmw_iprio(riscv_cpu_mxl_bits(env),
2060                             isel, iprio, val, new_val, wr_mask,
2061                             (priv == PRV_M) ? IRQ_M_EXT : IRQ_S_EXT);
2062         }
2063     } else if (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST) {
2064         /* IMSIC registers only available when machine implements it. */
2065         if (env->aia_ireg_rmw_fn[priv]) {
2066             /* Selected guest interrupt file should not be zero */
2067             if (virt && (!vgein || env->geilen < vgein)) {
2068                 goto done;
2069             }
2070             /* Call machine specific IMSIC register emulation */
2071             ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
2072                                     AIA_MAKE_IREG(isel, priv, virt, vgein,
2073                                                   riscv_cpu_mxl_bits(env)),
2074                                     val, new_val, wr_mask);
2075         }
2076     } else {
2077         isel_reserved = true;
2078     }
2079 
2080 done:
2081     if (ret) {
2082         return (env->virt_enabled && virt && !isel_reserved) ?
2083                RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2084     }
2085     return RISCV_EXCP_NONE;
2086 }
2087 
rmw_xtopei(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2088 static RISCVException rmw_xtopei(CPURISCVState *env, int csrno,
2089                                  target_ulong *val, target_ulong new_val,
2090                                  target_ulong wr_mask)
2091 {
2092     bool virt;
2093     int ret = -EINVAL;
2094     target_ulong priv, vgein;
2095 
2096     /* Translate CSR number for VS-mode */
2097     csrno = aia_xlate_vs_csrno(env, csrno);
2098 
2099     /* Decode register details from CSR number */
2100     virt = false;
2101     switch (csrno) {
2102     case CSR_MTOPEI:
2103         priv = PRV_M;
2104         break;
2105     case CSR_STOPEI:
2106         if (env->mvien & MIP_SEIP && env->priv == PRV_S) {
2107             goto done;
2108         }
2109         priv = PRV_S;
2110         break;
2111     case CSR_VSTOPEI:
2112         priv = PRV_S;
2113         virt = true;
2114         break;
2115     default:
2116         goto done;
2117     };
2118 
2119     /* IMSIC CSRs only available when machine implements IMSIC. */
2120     if (!env->aia_ireg_rmw_fn[priv]) {
2121         goto done;
2122     }
2123 
2124     /* Find the selected guest interrupt file */
2125     vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
2126 
2127     /* Selected guest interrupt file should be valid */
2128     if (virt && (!vgein || env->geilen < vgein)) {
2129         goto done;
2130     }
2131 
2132     /* Call machine specific IMSIC register emulation for TOPEI */
2133     ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
2134                     AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, priv, virt, vgein,
2135                                   riscv_cpu_mxl_bits(env)),
2136                     val, new_val, wr_mask);
2137 
2138 done:
2139     if (ret) {
2140         return (env->virt_enabled && virt) ?
2141                RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2142     }
2143     return RISCV_EXCP_NONE;
2144 }
2145 
read_mtvec(CPURISCVState * env,int csrno,target_ulong * val)2146 static RISCVException read_mtvec(CPURISCVState *env, int csrno,
2147                                  target_ulong *val)
2148 {
2149     *val = env->mtvec;
2150     return RISCV_EXCP_NONE;
2151 }
2152 
write_mtvec(CPURISCVState * env,int csrno,target_ulong val)2153 static RISCVException write_mtvec(CPURISCVState *env, int csrno,
2154                                   target_ulong val)
2155 {
2156     /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
2157     if ((val & 3) < 2) {
2158         env->mtvec = val;
2159     } else {
2160         qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n");
2161     }
2162     return RISCV_EXCP_NONE;
2163 }
2164 
read_mcountinhibit(CPURISCVState * env,int csrno,target_ulong * val)2165 static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno,
2166                                          target_ulong *val)
2167 {
2168     *val = env->mcountinhibit;
2169     return RISCV_EXCP_NONE;
2170 }
2171 
write_mcountinhibit(CPURISCVState * env,int csrno,target_ulong val)2172 static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
2173                                           target_ulong val)
2174 {
2175     int cidx;
2176     PMUCTRState *counter;
2177     RISCVCPU *cpu = env_archcpu(env);
2178     uint32_t present_ctrs = cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_IR;
2179     target_ulong updated_ctrs = (env->mcountinhibit ^ val) & present_ctrs;
2180     uint64_t mhpmctr_val, prev_count, curr_count;
2181 
2182     /* WARL register - disable unavailable counters; TM bit is always 0 */
2183     env->mcountinhibit = val & present_ctrs;
2184 
2185     /* Check if any other counter is also monitoring cycles/instructions */
2186     for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
2187         if (!(updated_ctrs & BIT(cidx)) ||
2188             (!riscv_pmu_ctr_monitor_cycles(env, cidx) &&
2189             !riscv_pmu_ctr_monitor_instructions(env, cidx))) {
2190             continue;
2191         }
2192 
2193         counter = &env->pmu_ctrs[cidx];
2194 
2195         if (!get_field(env->mcountinhibit, BIT(cidx))) {
2196             counter->mhpmcounter_prev =
2197                 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false);
2198             if (riscv_cpu_mxl(env) == MXL_RV32) {
2199                 counter->mhpmcounterh_prev =
2200                     riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true);
2201             }
2202 
2203             if (cidx > 2) {
2204                 mhpmctr_val = counter->mhpmcounter_val;
2205                 if (riscv_cpu_mxl(env) == MXL_RV32) {
2206                     mhpmctr_val = mhpmctr_val |
2207                             ((uint64_t)counter->mhpmcounterh_val << 32);
2208                 }
2209                 riscv_pmu_setup_timer(env, mhpmctr_val, cidx);
2210             }
2211         } else {
2212             curr_count = riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false);
2213 
2214             mhpmctr_val = counter->mhpmcounter_val;
2215             prev_count = counter->mhpmcounter_prev;
2216             if (riscv_cpu_mxl(env) == MXL_RV32) {
2217                 uint64_t tmp =
2218                     riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true);
2219 
2220                 curr_count = curr_count | (tmp << 32);
2221                 mhpmctr_val = mhpmctr_val |
2222                     ((uint64_t)counter->mhpmcounterh_val << 32);
2223                 prev_count = prev_count |
2224                     ((uint64_t)counter->mhpmcounterh_prev << 32);
2225             }
2226 
2227             /* Adjust the counter for later reads. */
2228             mhpmctr_val = curr_count - prev_count + mhpmctr_val;
2229             counter->mhpmcounter_val = mhpmctr_val;
2230             if (riscv_cpu_mxl(env) == MXL_RV32) {
2231                 counter->mhpmcounterh_val = mhpmctr_val >> 32;
2232             }
2233         }
2234     }
2235 
2236     return RISCV_EXCP_NONE;
2237 }
2238 
read_mcounteren(CPURISCVState * env,int csrno,target_ulong * val)2239 static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
2240                                       target_ulong *val)
2241 {
2242     *val = env->mcounteren;
2243     return RISCV_EXCP_NONE;
2244 }
2245 
write_mcounteren(CPURISCVState * env,int csrno,target_ulong val)2246 static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
2247                                        target_ulong val)
2248 {
2249     RISCVCPU *cpu = env_archcpu(env);
2250 
2251     /* WARL register - disable unavailable counters */
2252     env->mcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
2253                              COUNTEREN_IR);
2254     return RISCV_EXCP_NONE;
2255 }
2256 
2257 /* Machine Trap Handling */
read_mscratch_i128(CPURISCVState * env,int csrno,Int128 * val)2258 static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno,
2259                                          Int128 *val)
2260 {
2261     *val = int128_make128(env->mscratch, env->mscratchh);
2262     return RISCV_EXCP_NONE;
2263 }
2264 
write_mscratch_i128(CPURISCVState * env,int csrno,Int128 val)2265 static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno,
2266                                           Int128 val)
2267 {
2268     env->mscratch = int128_getlo(val);
2269     env->mscratchh = int128_gethi(val);
2270     return RISCV_EXCP_NONE;
2271 }
2272 
read_mscratch(CPURISCVState * env,int csrno,target_ulong * val)2273 static RISCVException read_mscratch(CPURISCVState *env, int csrno,
2274                                     target_ulong *val)
2275 {
2276     *val = env->mscratch;
2277     return RISCV_EXCP_NONE;
2278 }
2279 
write_mscratch(CPURISCVState * env,int csrno,target_ulong val)2280 static RISCVException write_mscratch(CPURISCVState *env, int csrno,
2281                                      target_ulong val)
2282 {
2283     env->mscratch = val;
2284     return RISCV_EXCP_NONE;
2285 }
2286 
read_mepc(CPURISCVState * env,int csrno,target_ulong * val)2287 static RISCVException read_mepc(CPURISCVState *env, int csrno,
2288                                 target_ulong *val)
2289 {
2290     *val = env->mepc;
2291     return RISCV_EXCP_NONE;
2292 }
2293 
write_mepc(CPURISCVState * env,int csrno,target_ulong val)2294 static RISCVException write_mepc(CPURISCVState *env, int csrno,
2295                                  target_ulong val)
2296 {
2297     env->mepc = val;
2298     return RISCV_EXCP_NONE;
2299 }
2300 
read_mcause(CPURISCVState * env,int csrno,target_ulong * val)2301 static RISCVException read_mcause(CPURISCVState *env, int csrno,
2302                                   target_ulong *val)
2303 {
2304     *val = env->mcause;
2305     return RISCV_EXCP_NONE;
2306 }
2307 
write_mcause(CPURISCVState * env,int csrno,target_ulong val)2308 static RISCVException write_mcause(CPURISCVState *env, int csrno,
2309                                    target_ulong val)
2310 {
2311     env->mcause = val;
2312     return RISCV_EXCP_NONE;
2313 }
2314 
read_mtval(CPURISCVState * env,int csrno,target_ulong * val)2315 static RISCVException read_mtval(CPURISCVState *env, int csrno,
2316                                  target_ulong *val)
2317 {
2318     *val = env->mtval;
2319     return RISCV_EXCP_NONE;
2320 }
2321 
write_mtval(CPURISCVState * env,int csrno,target_ulong val)2322 static RISCVException write_mtval(CPURISCVState *env, int csrno,
2323                                   target_ulong val)
2324 {
2325     env->mtval = val;
2326     return RISCV_EXCP_NONE;
2327 }
2328 
2329 /* Execution environment configuration setup */
read_menvcfg(CPURISCVState * env,int csrno,target_ulong * val)2330 static RISCVException read_menvcfg(CPURISCVState *env, int csrno,
2331                                    target_ulong *val)
2332 {
2333     *val = env->menvcfg;
2334     return RISCV_EXCP_NONE;
2335 }
2336 
write_menvcfg(CPURISCVState * env,int csrno,target_ulong val)2337 static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
2338                                     target_ulong val)
2339 {
2340     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
2341     uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE;
2342 
2343     if (riscv_cpu_mxl(env) == MXL_RV64) {
2344         mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
2345                 (cfg->ext_sstc ? MENVCFG_STCE : 0) |
2346                 (cfg->ext_svadu ? MENVCFG_ADUE : 0);
2347     }
2348     env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
2349 
2350     return RISCV_EXCP_NONE;
2351 }
2352 
read_menvcfgh(CPURISCVState * env,int csrno,target_ulong * val)2353 static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
2354                                     target_ulong *val)
2355 {
2356     *val = env->menvcfg >> 32;
2357     return RISCV_EXCP_NONE;
2358 }
2359 
write_menvcfgh(CPURISCVState * env,int csrno,target_ulong val)2360 static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
2361                                      target_ulong val)
2362 {
2363     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
2364     uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
2365                     (cfg->ext_sstc ? MENVCFG_STCE : 0) |
2366                     (cfg->ext_svadu ? MENVCFG_ADUE : 0);
2367     uint64_t valh = (uint64_t)val << 32;
2368 
2369     env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
2370 
2371     return RISCV_EXCP_NONE;
2372 }
2373 
read_senvcfg(CPURISCVState * env,int csrno,target_ulong * val)2374 static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
2375                                    target_ulong *val)
2376 {
2377     RISCVException ret;
2378 
2379     ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2380     if (ret != RISCV_EXCP_NONE) {
2381         return ret;
2382     }
2383 
2384     *val = env->senvcfg;
2385     return RISCV_EXCP_NONE;
2386 }
2387 
write_senvcfg(CPURISCVState * env,int csrno,target_ulong val)2388 static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
2389                                     target_ulong val)
2390 {
2391     uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE;
2392     RISCVException ret;
2393 
2394     ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2395     if (ret != RISCV_EXCP_NONE) {
2396         return ret;
2397     }
2398 
2399     env->senvcfg = (env->senvcfg & ~mask) | (val & mask);
2400     return RISCV_EXCP_NONE;
2401 }
2402 
read_henvcfg(CPURISCVState * env,int csrno,target_ulong * val)2403 static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
2404                                    target_ulong *val)
2405 {
2406     RISCVException ret;
2407 
2408     ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2409     if (ret != RISCV_EXCP_NONE) {
2410         return ret;
2411     }
2412 
2413     /*
2414      * henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0
2415      * henvcfg.stce is read_only 0 when menvcfg.stce = 0
2416      * henvcfg.adue is read_only 0 when menvcfg.adue = 0
2417      */
2418     *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
2419                            env->menvcfg);
2420     return RISCV_EXCP_NONE;
2421 }
2422 
write_henvcfg(CPURISCVState * env,int csrno,target_ulong val)2423 static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
2424                                     target_ulong val)
2425 {
2426     uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE;
2427     RISCVException ret;
2428 
2429     ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2430     if (ret != RISCV_EXCP_NONE) {
2431         return ret;
2432     }
2433 
2434     if (riscv_cpu_mxl(env) == MXL_RV64) {
2435         mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE);
2436     }
2437 
2438     env->henvcfg = (env->henvcfg & ~mask) | (val & mask);
2439 
2440     return RISCV_EXCP_NONE;
2441 }
2442 
read_henvcfgh(CPURISCVState * env,int csrno,target_ulong * val)2443 static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
2444                                     target_ulong *val)
2445 {
2446     RISCVException ret;
2447 
2448     ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2449     if (ret != RISCV_EXCP_NONE) {
2450         return ret;
2451     }
2452 
2453     *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
2454                             env->menvcfg)) >> 32;
2455     return RISCV_EXCP_NONE;
2456 }
2457 
write_henvcfgh(CPURISCVState * env,int csrno,target_ulong val)2458 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
2459                                      target_ulong val)
2460 {
2461     uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE |
2462                                     HENVCFG_ADUE);
2463     uint64_t valh = (uint64_t)val << 32;
2464     RISCVException ret;
2465 
2466     ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2467     if (ret != RISCV_EXCP_NONE) {
2468         return ret;
2469     }
2470 
2471     env->henvcfg = (env->henvcfg & ~mask) | (valh & mask);
2472     return RISCV_EXCP_NONE;
2473 }
2474 
read_mstateen(CPURISCVState * env,int csrno,target_ulong * val)2475 static RISCVException read_mstateen(CPURISCVState *env, int csrno,
2476                                     target_ulong *val)
2477 {
2478     *val = env->mstateen[csrno - CSR_MSTATEEN0];
2479 
2480     return RISCV_EXCP_NONE;
2481 }
2482 
write_mstateen(CPURISCVState * env,int csrno,uint64_t wr_mask,target_ulong new_val)2483 static RISCVException write_mstateen(CPURISCVState *env, int csrno,
2484                                      uint64_t wr_mask, target_ulong new_val)
2485 {
2486     uint64_t *reg;
2487 
2488     reg = &env->mstateen[csrno - CSR_MSTATEEN0];
2489     *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2490 
2491     return RISCV_EXCP_NONE;
2492 }
2493 
write_mstateen0(CPURISCVState * env,int csrno,target_ulong new_val)2494 static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
2495                                       target_ulong new_val)
2496 {
2497     uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2498     if (!riscv_has_ext(env, RVF)) {
2499         wr_mask |= SMSTATEEN0_FCSR;
2500     }
2501 
2502     if (env->priv_ver >= PRIV_VERSION_1_13_0) {
2503         wr_mask |= SMSTATEEN0_P1P13;
2504     }
2505 
2506     return write_mstateen(env, csrno, wr_mask, new_val);
2507 }
2508 
write_mstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val)2509 static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno,
2510                                          target_ulong new_val)
2511 {
2512     return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2513 }
2514 
read_mstateenh(CPURISCVState * env,int csrno,target_ulong * val)2515 static RISCVException read_mstateenh(CPURISCVState *env, int csrno,
2516                                      target_ulong *val)
2517 {
2518     *val = env->mstateen[csrno - CSR_MSTATEEN0H] >> 32;
2519 
2520     return RISCV_EXCP_NONE;
2521 }
2522 
write_mstateenh(CPURISCVState * env,int csrno,uint64_t wr_mask,target_ulong new_val)2523 static RISCVException write_mstateenh(CPURISCVState *env, int csrno,
2524                                       uint64_t wr_mask, target_ulong new_val)
2525 {
2526     uint64_t *reg, val;
2527 
2528     reg = &env->mstateen[csrno - CSR_MSTATEEN0H];
2529     val = (uint64_t)new_val << 32;
2530     val |= *reg & 0xFFFFFFFF;
2531     *reg = (*reg & ~wr_mask) | (val & wr_mask);
2532 
2533     return RISCV_EXCP_NONE;
2534 }
2535 
write_mstateen0h(CPURISCVState * env,int csrno,target_ulong new_val)2536 static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
2537                                        target_ulong new_val)
2538 {
2539     uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2540 
2541     if (env->priv_ver >= PRIV_VERSION_1_13_0) {
2542         wr_mask |= SMSTATEEN0_P1P13;
2543     }
2544 
2545     return write_mstateenh(env, csrno, wr_mask, new_val);
2546 }
2547 
write_mstateenh_1_3(CPURISCVState * env,int csrno,target_ulong new_val)2548 static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno,
2549                                           target_ulong new_val)
2550 {
2551     return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
2552 }
2553 
read_hstateen(CPURISCVState * env,int csrno,target_ulong * val)2554 static RISCVException read_hstateen(CPURISCVState *env, int csrno,
2555                                     target_ulong *val)
2556 {
2557     int index = csrno - CSR_HSTATEEN0;
2558 
2559     *val = env->hstateen[index] & env->mstateen[index];
2560 
2561     return RISCV_EXCP_NONE;
2562 }
2563 
write_hstateen(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)2564 static RISCVException write_hstateen(CPURISCVState *env, int csrno,
2565                                      uint64_t mask, target_ulong new_val)
2566 {
2567     int index = csrno - CSR_HSTATEEN0;
2568     uint64_t *reg, wr_mask;
2569 
2570     reg = &env->hstateen[index];
2571     wr_mask = env->mstateen[index] & mask;
2572     *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2573 
2574     return RISCV_EXCP_NONE;
2575 }
2576 
write_hstateen0(CPURISCVState * env,int csrno,target_ulong new_val)2577 static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
2578                                       target_ulong new_val)
2579 {
2580     uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2581 
2582     if (!riscv_has_ext(env, RVF)) {
2583         wr_mask |= SMSTATEEN0_FCSR;
2584     }
2585 
2586     return write_hstateen(env, csrno, wr_mask, new_val);
2587 }
2588 
write_hstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val)2589 static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno,
2590                                          target_ulong new_val)
2591 {
2592     return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2593 }
2594 
read_hstateenh(CPURISCVState * env,int csrno,target_ulong * val)2595 static RISCVException read_hstateenh(CPURISCVState *env, int csrno,
2596                                      target_ulong *val)
2597 {
2598     int index = csrno - CSR_HSTATEEN0H;
2599 
2600     *val = (env->hstateen[index] >> 32) & (env->mstateen[index] >> 32);
2601 
2602     return RISCV_EXCP_NONE;
2603 }
2604 
write_hstateenh(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)2605 static RISCVException write_hstateenh(CPURISCVState *env, int csrno,
2606                                       uint64_t mask, target_ulong new_val)
2607 {
2608     int index = csrno - CSR_HSTATEEN0H;
2609     uint64_t *reg, wr_mask, val;
2610 
2611     reg = &env->hstateen[index];
2612     val = (uint64_t)new_val << 32;
2613     val |= *reg & 0xFFFFFFFF;
2614     wr_mask = env->mstateen[index] & mask;
2615     *reg = (*reg & ~wr_mask) | (val & wr_mask);
2616 
2617     return RISCV_EXCP_NONE;
2618 }
2619 
write_hstateen0h(CPURISCVState * env,int csrno,target_ulong new_val)2620 static RISCVException write_hstateen0h(CPURISCVState *env, int csrno,
2621                                        target_ulong new_val)
2622 {
2623     uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2624 
2625     return write_hstateenh(env, csrno, wr_mask, new_val);
2626 }
2627 
write_hstateenh_1_3(CPURISCVState * env,int csrno,target_ulong new_val)2628 static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno,
2629                                           target_ulong new_val)
2630 {
2631     return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
2632 }
2633 
read_sstateen(CPURISCVState * env,int csrno,target_ulong * val)2634 static RISCVException read_sstateen(CPURISCVState *env, int csrno,
2635                                     target_ulong *val)
2636 {
2637     bool virt = env->virt_enabled;
2638     int index = csrno - CSR_SSTATEEN0;
2639 
2640     *val = env->sstateen[index] & env->mstateen[index];
2641     if (virt) {
2642         *val &= env->hstateen[index];
2643     }
2644 
2645     return RISCV_EXCP_NONE;
2646 }
2647 
write_sstateen(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)2648 static RISCVException write_sstateen(CPURISCVState *env, int csrno,
2649                                      uint64_t mask, target_ulong new_val)
2650 {
2651     bool virt = env->virt_enabled;
2652     int index = csrno - CSR_SSTATEEN0;
2653     uint64_t wr_mask;
2654     uint64_t *reg;
2655 
2656     wr_mask = env->mstateen[index] & mask;
2657     if (virt) {
2658         wr_mask &= env->hstateen[index];
2659     }
2660 
2661     reg = &env->sstateen[index];
2662     *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2663 
2664     return RISCV_EXCP_NONE;
2665 }
2666 
write_sstateen0(CPURISCVState * env,int csrno,target_ulong new_val)2667 static RISCVException write_sstateen0(CPURISCVState *env, int csrno,
2668                                       target_ulong new_val)
2669 {
2670     uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2671 
2672     if (!riscv_has_ext(env, RVF)) {
2673         wr_mask |= SMSTATEEN0_FCSR;
2674     }
2675 
2676     return write_sstateen(env, csrno, wr_mask, new_val);
2677 }
2678 
write_sstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val)2679 static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno,
2680                                       target_ulong new_val)
2681 {
2682     return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2683 }
2684 
rmw_mip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2685 static RISCVException rmw_mip64(CPURISCVState *env, int csrno,
2686                                 uint64_t *ret_val,
2687                                 uint64_t new_val, uint64_t wr_mask)
2688 {
2689     uint64_t old_mip, mask = wr_mask & delegable_ints;
2690     uint32_t gin;
2691 
2692     if (mask & MIP_SEIP) {
2693         env->software_seip = new_val & MIP_SEIP;
2694         new_val |= env->external_seip * MIP_SEIP;
2695     }
2696 
2697     if (riscv_cpu_cfg(env)->ext_sstc && (env->priv == PRV_M) &&
2698         get_field(env->menvcfg, MENVCFG_STCE)) {
2699         /* sstc extension forbids STIP & VSTIP to be writeable in mip */
2700         mask = mask & ~(MIP_STIP | MIP_VSTIP);
2701     }
2702 
2703     if (mask) {
2704         old_mip = riscv_cpu_update_mip(env, mask, (new_val & mask));
2705     } else {
2706         old_mip = env->mip;
2707     }
2708 
2709     if (csrno != CSR_HVIP) {
2710         gin = get_field(env->hstatus, HSTATUS_VGEIN);
2711         old_mip |= (env->hgeip & ((target_ulong)1 << gin)) ? MIP_VSEIP : 0;
2712         old_mip |= env->vstime_irq ? MIP_VSTIP : 0;
2713     }
2714 
2715     if (ret_val) {
2716         *ret_val = old_mip;
2717     }
2718 
2719     return RISCV_EXCP_NONE;
2720 }
2721 
rmw_mip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2722 static RISCVException rmw_mip(CPURISCVState *env, int csrno,
2723                               target_ulong *ret_val,
2724                               target_ulong new_val, target_ulong wr_mask)
2725 {
2726     uint64_t rval;
2727     RISCVException ret;
2728 
2729     ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask);
2730     if (ret_val) {
2731         *ret_val = rval;
2732     }
2733 
2734     return ret;
2735 }
2736 
rmw_miph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2737 static RISCVException rmw_miph(CPURISCVState *env, int csrno,
2738                                target_ulong *ret_val,
2739                                target_ulong new_val, target_ulong wr_mask)
2740 {
2741     uint64_t rval;
2742     RISCVException ret;
2743 
2744     ret = rmw_mip64(env, csrno, &rval,
2745         ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2746     if (ret_val) {
2747         *ret_val = rval >> 32;
2748     }
2749 
2750     return ret;
2751 }
2752 
2753 /*
2754  * The function is written for two use-cases:
2755  * 1- To access mvip csr as is for m-mode access.
2756  * 2- To access sip as a combination of mip and mvip for s-mode.
2757  *
2758  * Both report bits 1, 5, 9 and 13:63 but with the exception of
2759  * STIP being read-only zero in case of mvip when sstc extension
2760  * is present.
2761  * Also, sip needs to be read-only zero when both mideleg[i] and
2762  * mvien[i] are zero but mvip needs to be an alias of mip.
2763  */
rmw_mvip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2764 static RISCVException rmw_mvip64(CPURISCVState *env, int csrno,
2765                                 uint64_t *ret_val,
2766                                 uint64_t new_val, uint64_t wr_mask)
2767 {
2768     RISCVCPU *cpu = env_archcpu(env);
2769     target_ulong ret_mip = 0;
2770     RISCVException ret;
2771     uint64_t old_mvip;
2772 
2773     /*
2774      * mideleg[i]  mvien[i]
2775      *   0           0      No delegation. mvip[i] is alias of mip[i].
2776      *   0           1      mvip[i] becomes source of interrupt, mip bypassed.
2777      *   1           X      mip[i] is source of interrupt and mvip[i] aliases
2778      *                      mip[i].
2779      *
2780      *   So alias condition would be for bits:
2781      *      ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (mideleg | ~mvien)) |
2782      *          (!sstc & MIP_STIP)
2783      *
2784      *   Non-alias condition will be for bits:
2785      *      (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (~mideleg & mvien)
2786      *
2787      *  alias_mask denotes the bits that come from mip nalias_mask denotes bits
2788      *  that come from hvip.
2789      */
2790     uint64_t alias_mask = ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
2791         (env->mideleg | ~env->mvien)) | MIP_STIP;
2792     uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
2793         (~env->mideleg & env->mvien);
2794     uint64_t wr_mask_mvip;
2795     uint64_t wr_mask_mip;
2796 
2797     /*
2798      * mideleg[i]  mvien[i]
2799      *   0           0      sip[i] read-only zero.
2800      *   0           1      sip[i] alias of mvip[i].
2801      *   1           X      sip[i] alias of mip[i].
2802      *
2803      *  Both alias and non-alias mask remain same for sip except for bits
2804      *  which are zero in both mideleg and mvien.
2805      */
2806     if (csrno == CSR_SIP) {
2807         /* Remove bits that are zero in both mideleg and mvien. */
2808         alias_mask &= (env->mideleg | env->mvien);
2809         nalias_mask &= (env->mideleg | env->mvien);
2810     }
2811 
2812     /*
2813      * If sstc is present, mvip.STIP is not an alias of mip.STIP so clear
2814      * that our in mip returned value.
2815      */
2816     if (cpu->cfg.ext_sstc && (env->priv == PRV_M) &&
2817         get_field(env->menvcfg, MENVCFG_STCE)) {
2818         alias_mask &= ~MIP_STIP;
2819     }
2820 
2821     wr_mask_mip = wr_mask & alias_mask & mvip_writable_mask;
2822     wr_mask_mvip = wr_mask & nalias_mask & mvip_writable_mask;
2823 
2824     /*
2825      * For bits set in alias_mask, mvip needs to be alias of mip, so forward
2826      * this to rmw_mip.
2827      */
2828     ret = rmw_mip(env, CSR_MIP, &ret_mip, new_val, wr_mask_mip);
2829     if (ret != RISCV_EXCP_NONE) {
2830         return ret;
2831     }
2832 
2833     old_mvip = env->mvip;
2834 
2835     /*
2836      * Write to mvip. Update only non-alias bits. Alias bits were updated
2837      * in mip in rmw_mip above.
2838      */
2839     if (wr_mask_mvip) {
2840         env->mvip = (env->mvip & ~wr_mask_mvip) | (new_val & wr_mask_mvip);
2841 
2842         /*
2843          * Given mvip is separate source from mip, we need to trigger interrupt
2844          * from here separately. Normally this happen from riscv_cpu_update_mip.
2845          */
2846         riscv_cpu_interrupt(env);
2847     }
2848 
2849     if (ret_val) {
2850         ret_mip &= alias_mask;
2851         old_mvip &= nalias_mask;
2852 
2853         *ret_val = old_mvip | ret_mip;
2854     }
2855 
2856     return RISCV_EXCP_NONE;
2857 }
2858 
rmw_mvip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2859 static RISCVException rmw_mvip(CPURISCVState *env, int csrno,
2860                               target_ulong *ret_val,
2861                               target_ulong new_val, target_ulong wr_mask)
2862 {
2863     uint64_t rval;
2864     RISCVException ret;
2865 
2866     ret = rmw_mvip64(env, csrno, &rval, new_val, wr_mask);
2867     if (ret_val) {
2868         *ret_val = rval;
2869     }
2870 
2871     return ret;
2872 }
2873 
rmw_mviph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2874 static RISCVException rmw_mviph(CPURISCVState *env, int csrno,
2875                                target_ulong *ret_val,
2876                                target_ulong new_val, target_ulong wr_mask)
2877 {
2878     uint64_t rval;
2879     RISCVException ret;
2880 
2881     ret = rmw_mvip64(env, csrno, &rval,
2882         ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2883     if (ret_val) {
2884         *ret_val = rval >> 32;
2885     }
2886 
2887     return ret;
2888 }
2889 
2890 /* Supervisor Trap Setup */
read_sstatus_i128(CPURISCVState * env,int csrno,Int128 * val)2891 static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
2892                                         Int128 *val)
2893 {
2894     uint64_t mask = sstatus_v1_10_mask;
2895     uint64_t sstatus = env->mstatus & mask;
2896     if (env->xl != MXL_RV32 || env->debugger) {
2897         mask |= SSTATUS64_UXL;
2898     }
2899 
2900     *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
2901     return RISCV_EXCP_NONE;
2902 }
2903 
read_sstatus(CPURISCVState * env,int csrno,target_ulong * val)2904 static RISCVException read_sstatus(CPURISCVState *env, int csrno,
2905                                    target_ulong *val)
2906 {
2907     target_ulong mask = (sstatus_v1_10_mask);
2908     if (env->xl != MXL_RV32 || env->debugger) {
2909         mask |= SSTATUS64_UXL;
2910     }
2911     /* TODO: Use SXL not MXL. */
2912     *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
2913     return RISCV_EXCP_NONE;
2914 }
2915 
write_sstatus(CPURISCVState * env,int csrno,target_ulong val)2916 static RISCVException write_sstatus(CPURISCVState *env, int csrno,
2917                                     target_ulong val)
2918 {
2919     target_ulong mask = (sstatus_v1_10_mask);
2920 
2921     if (env->xl != MXL_RV32 || env->debugger) {
2922         if ((val & SSTATUS64_UXL) != 0) {
2923             mask |= SSTATUS64_UXL;
2924         }
2925     }
2926     target_ulong newval = (env->mstatus & ~mask) | (val & mask);
2927     return write_mstatus(env, CSR_MSTATUS, newval);
2928 }
2929 
rmw_vsie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2930 static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
2931                                  uint64_t *ret_val,
2932                                  uint64_t new_val, uint64_t wr_mask)
2933 {
2934     uint64_t alias_mask = (LOCAL_INTERRUPTS | VS_MODE_INTERRUPTS) &
2935                             env->hideleg;
2936     uint64_t nalias_mask = LOCAL_INTERRUPTS & (~env->hideleg & env->hvien);
2937     uint64_t rval, rval_vs, vsbits;
2938     uint64_t wr_mask_vsie;
2939     uint64_t wr_mask_mie;
2940     RISCVException ret;
2941 
2942     /* Bring VS-level bits to correct position */
2943     vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
2944     new_val &= ~(VS_MODE_INTERRUPTS >> 1);
2945     new_val |= vsbits << 1;
2946 
2947     vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
2948     wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
2949     wr_mask |= vsbits << 1;
2950 
2951     wr_mask_mie = wr_mask & alias_mask;
2952     wr_mask_vsie = wr_mask & nalias_mask;
2953 
2954     ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask_mie);
2955 
2956     rval_vs = env->vsie & nalias_mask;
2957     env->vsie = (env->vsie & ~wr_mask_vsie) | (new_val & wr_mask_vsie);
2958 
2959     if (ret_val) {
2960         rval &= alias_mask;
2961         vsbits = rval & VS_MODE_INTERRUPTS;
2962         rval &= ~VS_MODE_INTERRUPTS;
2963         *ret_val = rval | (vsbits >> 1) | rval_vs;
2964     }
2965 
2966     return ret;
2967 }
2968 
rmw_vsie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2969 static RISCVException rmw_vsie(CPURISCVState *env, int csrno,
2970                                target_ulong *ret_val,
2971                                target_ulong new_val, target_ulong wr_mask)
2972 {
2973     uint64_t rval;
2974     RISCVException ret;
2975 
2976     ret = rmw_vsie64(env, csrno, &rval, new_val, wr_mask);
2977     if (ret_val) {
2978         *ret_val = rval;
2979     }
2980 
2981     return ret;
2982 }
2983 
rmw_vsieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2984 static RISCVException rmw_vsieh(CPURISCVState *env, int csrno,
2985                                 target_ulong *ret_val,
2986                                 target_ulong new_val, target_ulong wr_mask)
2987 {
2988     uint64_t rval;
2989     RISCVException ret;
2990 
2991     ret = rmw_vsie64(env, csrno, &rval,
2992         ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2993     if (ret_val) {
2994         *ret_val = rval >> 32;
2995     }
2996 
2997     return ret;
2998 }
2999 
rmw_sie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3000 static RISCVException rmw_sie64(CPURISCVState *env, int csrno,
3001                                 uint64_t *ret_val,
3002                                 uint64_t new_val, uint64_t wr_mask)
3003 {
3004     uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
3005         (~env->mideleg & env->mvien);
3006     uint64_t alias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & env->mideleg;
3007     uint64_t sie_mask = wr_mask & nalias_mask;
3008     RISCVException ret;
3009 
3010     /*
3011      * mideleg[i]  mvien[i]
3012      *   0           0      sie[i] read-only zero.
3013      *   0           1      sie[i] is a separate writable bit.
3014      *   1           X      sie[i] alias of mie[i].
3015      *
3016      *  Both alias and non-alias mask remain same for sip except for bits
3017      *  which are zero in both mideleg and mvien.
3018      */
3019     if (env->virt_enabled) {
3020         if (env->hvictl & HVICTL_VTI) {
3021             return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
3022         }
3023         ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask);
3024         if (ret_val) {
3025             *ret_val &= alias_mask;
3026         }
3027     } else {
3028         ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & alias_mask);
3029         if (ret_val) {
3030             *ret_val &= alias_mask;
3031             *ret_val |= env->sie & nalias_mask;
3032         }
3033 
3034         env->sie = (env->sie & ~sie_mask) | (new_val & sie_mask);
3035     }
3036 
3037     return ret;
3038 }
3039 
rmw_sie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3040 static RISCVException rmw_sie(CPURISCVState *env, int csrno,
3041                               target_ulong *ret_val,
3042                               target_ulong new_val, target_ulong wr_mask)
3043 {
3044     uint64_t rval;
3045     RISCVException ret;
3046 
3047     ret = rmw_sie64(env, csrno, &rval, new_val, wr_mask);
3048     if (ret == RISCV_EXCP_NONE && ret_val) {
3049         *ret_val = rval;
3050     }
3051 
3052     return ret;
3053 }
3054 
rmw_sieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3055 static RISCVException rmw_sieh(CPURISCVState *env, int csrno,
3056                                target_ulong *ret_val,
3057                                target_ulong new_val, target_ulong wr_mask)
3058 {
3059     uint64_t rval;
3060     RISCVException ret;
3061 
3062     ret = rmw_sie64(env, csrno, &rval,
3063         ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3064     if (ret_val) {
3065         *ret_val = rval >> 32;
3066     }
3067 
3068     return ret;
3069 }
3070 
read_stvec(CPURISCVState * env,int csrno,target_ulong * val)3071 static RISCVException read_stvec(CPURISCVState *env, int csrno,
3072                                  target_ulong *val)
3073 {
3074     *val = env->stvec;
3075     return RISCV_EXCP_NONE;
3076 }
3077 
write_stvec(CPURISCVState * env,int csrno,target_ulong val)3078 static RISCVException write_stvec(CPURISCVState *env, int csrno,
3079                                   target_ulong val)
3080 {
3081     /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
3082     if ((val & 3) < 2) {
3083         env->stvec = val;
3084     } else {
3085         qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n");
3086     }
3087     return RISCV_EXCP_NONE;
3088 }
3089 
read_scounteren(CPURISCVState * env,int csrno,target_ulong * val)3090 static RISCVException read_scounteren(CPURISCVState *env, int csrno,
3091                                       target_ulong *val)
3092 {
3093     *val = env->scounteren;
3094     return RISCV_EXCP_NONE;
3095 }
3096 
write_scounteren(CPURISCVState * env,int csrno,target_ulong val)3097 static RISCVException write_scounteren(CPURISCVState *env, int csrno,
3098                                        target_ulong val)
3099 {
3100     RISCVCPU *cpu = env_archcpu(env);
3101 
3102     /* WARL register - disable unavailable counters */
3103     env->scounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
3104                              COUNTEREN_IR);
3105     return RISCV_EXCP_NONE;
3106 }
3107 
3108 /* Supervisor Trap Handling */
read_sscratch_i128(CPURISCVState * env,int csrno,Int128 * val)3109 static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno,
3110                                          Int128 *val)
3111 {
3112     *val = int128_make128(env->sscratch, env->sscratchh);
3113     return RISCV_EXCP_NONE;
3114 }
3115 
write_sscratch_i128(CPURISCVState * env,int csrno,Int128 val)3116 static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno,
3117                                           Int128 val)
3118 {
3119     env->sscratch = int128_getlo(val);
3120     env->sscratchh = int128_gethi(val);
3121     return RISCV_EXCP_NONE;
3122 }
3123 
read_sscratch(CPURISCVState * env,int csrno,target_ulong * val)3124 static RISCVException read_sscratch(CPURISCVState *env, int csrno,
3125                                     target_ulong *val)
3126 {
3127     *val = env->sscratch;
3128     return RISCV_EXCP_NONE;
3129 }
3130 
write_sscratch(CPURISCVState * env,int csrno,target_ulong val)3131 static RISCVException write_sscratch(CPURISCVState *env, int csrno,
3132                                      target_ulong val)
3133 {
3134     env->sscratch = val;
3135     return RISCV_EXCP_NONE;
3136 }
3137 
read_sepc(CPURISCVState * env,int csrno,target_ulong * val)3138 static RISCVException read_sepc(CPURISCVState *env, int csrno,
3139                                 target_ulong *val)
3140 {
3141     *val = env->sepc;
3142     return RISCV_EXCP_NONE;
3143 }
3144 
write_sepc(CPURISCVState * env,int csrno,target_ulong val)3145 static RISCVException write_sepc(CPURISCVState *env, int csrno,
3146                                  target_ulong val)
3147 {
3148     env->sepc = val;
3149     return RISCV_EXCP_NONE;
3150 }
3151 
read_scause(CPURISCVState * env,int csrno,target_ulong * val)3152 static RISCVException read_scause(CPURISCVState *env, int csrno,
3153                                   target_ulong *val)
3154 {
3155     *val = env->scause;
3156     return RISCV_EXCP_NONE;
3157 }
3158 
write_scause(CPURISCVState * env,int csrno,target_ulong val)3159 static RISCVException write_scause(CPURISCVState *env, int csrno,
3160                                    target_ulong val)
3161 {
3162     env->scause = val;
3163     return RISCV_EXCP_NONE;
3164 }
3165 
read_stval(CPURISCVState * env,int csrno,target_ulong * val)3166 static RISCVException read_stval(CPURISCVState *env, int csrno,
3167                                  target_ulong *val)
3168 {
3169     *val = env->stval;
3170     return RISCV_EXCP_NONE;
3171 }
3172 
write_stval(CPURISCVState * env,int csrno,target_ulong val)3173 static RISCVException write_stval(CPURISCVState *env, int csrno,
3174                                   target_ulong val)
3175 {
3176     env->stval = val;
3177     return RISCV_EXCP_NONE;
3178 }
3179 
3180 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
3181                                  uint64_t *ret_val,
3182                                  uint64_t new_val, uint64_t wr_mask);
3183 
rmw_vsip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3184 static RISCVException rmw_vsip64(CPURISCVState *env, int csrno,
3185                                  uint64_t *ret_val,
3186                                  uint64_t new_val, uint64_t wr_mask)
3187 {
3188     RISCVException ret;
3189     uint64_t rval, mask = env->hideleg & VS_MODE_INTERRUPTS;
3190     uint64_t vsbits;
3191 
3192     /* Add virtualized bits into vsip mask. */
3193     mask |= env->hvien & ~env->hideleg;
3194 
3195     /* Bring VS-level bits to correct position */
3196     vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
3197     new_val &= ~(VS_MODE_INTERRUPTS >> 1);
3198     new_val |= vsbits << 1;
3199     vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
3200     wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
3201     wr_mask |= vsbits << 1;
3202 
3203     ret = rmw_hvip64(env, csrno, &rval, new_val,
3204                      wr_mask & mask & vsip_writable_mask);
3205     if (ret_val) {
3206         rval &= mask;
3207         vsbits = rval & VS_MODE_INTERRUPTS;
3208         rval &= ~VS_MODE_INTERRUPTS;
3209         *ret_val = rval | (vsbits >> 1);
3210     }
3211 
3212     return ret;
3213 }
3214 
rmw_vsip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3215 static RISCVException rmw_vsip(CPURISCVState *env, int csrno,
3216                                target_ulong *ret_val,
3217                                target_ulong new_val, target_ulong wr_mask)
3218 {
3219     uint64_t rval;
3220     RISCVException ret;
3221 
3222     ret = rmw_vsip64(env, csrno, &rval, new_val, wr_mask);
3223     if (ret_val) {
3224         *ret_val = rval;
3225     }
3226 
3227     return ret;
3228 }
3229 
rmw_vsiph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3230 static RISCVException rmw_vsiph(CPURISCVState *env, int csrno,
3231                                 target_ulong *ret_val,
3232                                 target_ulong new_val, target_ulong wr_mask)
3233 {
3234     uint64_t rval;
3235     RISCVException ret;
3236 
3237     ret = rmw_vsip64(env, csrno, &rval,
3238         ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3239     if (ret_val) {
3240         *ret_val = rval >> 32;
3241     }
3242 
3243     return ret;
3244 }
3245 
rmw_sip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3246 static RISCVException rmw_sip64(CPURISCVState *env, int csrno,
3247                                 uint64_t *ret_val,
3248                                 uint64_t new_val, uint64_t wr_mask)
3249 {
3250     RISCVException ret;
3251     uint64_t mask = (env->mideleg | env->mvien) & sip_writable_mask;
3252 
3253     if (env->virt_enabled) {
3254         if (env->hvictl & HVICTL_VTI) {
3255             return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
3256         }
3257         ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask);
3258     } else {
3259         ret = rmw_mvip64(env, csrno, ret_val, new_val, wr_mask & mask);
3260     }
3261 
3262     if (ret_val) {
3263         *ret_val &= (env->mideleg | env->mvien) &
3264             (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS);
3265     }
3266 
3267     return ret;
3268 }
3269 
rmw_sip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3270 static RISCVException rmw_sip(CPURISCVState *env, int csrno,
3271                               target_ulong *ret_val,
3272                               target_ulong new_val, target_ulong wr_mask)
3273 {
3274     uint64_t rval;
3275     RISCVException ret;
3276 
3277     ret = rmw_sip64(env, csrno, &rval, new_val, wr_mask);
3278     if (ret_val) {
3279         *ret_val = rval;
3280     }
3281 
3282     return ret;
3283 }
3284 
rmw_siph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3285 static RISCVException rmw_siph(CPURISCVState *env, int csrno,
3286                                target_ulong *ret_val,
3287                                target_ulong new_val, target_ulong wr_mask)
3288 {
3289     uint64_t rval;
3290     RISCVException ret;
3291 
3292     ret = rmw_sip64(env, csrno, &rval,
3293         ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3294     if (ret_val) {
3295         *ret_val = rval >> 32;
3296     }
3297 
3298     return ret;
3299 }
3300 
3301 /* Supervisor Protection and Translation */
read_satp(CPURISCVState * env,int csrno,target_ulong * val)3302 static RISCVException read_satp(CPURISCVState *env, int csrno,
3303                                 target_ulong *val)
3304 {
3305     if (!riscv_cpu_cfg(env)->mmu) {
3306         *val = 0;
3307         return RISCV_EXCP_NONE;
3308     }
3309     *val = env->satp;
3310     return RISCV_EXCP_NONE;
3311 }
3312 
write_satp(CPURISCVState * env,int csrno,target_ulong val)3313 static RISCVException write_satp(CPURISCVState *env, int csrno,
3314                                  target_ulong val)
3315 {
3316     if (!riscv_cpu_cfg(env)->mmu) {
3317         return RISCV_EXCP_NONE;
3318     }
3319 
3320     env->satp = legalize_xatp(env, env->satp, val);
3321     return RISCV_EXCP_NONE;
3322 }
3323 
read_vstopi(CPURISCVState * env,int csrno,target_ulong * val)3324 static RISCVException read_vstopi(CPURISCVState *env, int csrno,
3325                                   target_ulong *val)
3326 {
3327     int irq, ret;
3328     target_ulong topei;
3329     uint64_t vseip, vsgein;
3330     uint32_t iid, iprio, hviid, hviprio, gein;
3331     uint32_t s, scount = 0, siid[VSTOPI_NUM_SRCS], siprio[VSTOPI_NUM_SRCS];
3332 
3333     gein = get_field(env->hstatus, HSTATUS_VGEIN);
3334     hviid = get_field(env->hvictl, HVICTL_IID);
3335     hviprio = get_field(env->hvictl, HVICTL_IPRIO);
3336 
3337     if (gein) {
3338         vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
3339         vseip = env->mie & (env->mip | vsgein) & MIP_VSEIP;
3340         if (gein <= env->geilen && vseip) {
3341             siid[scount] = IRQ_S_EXT;
3342             siprio[scount] = IPRIO_MMAXIPRIO + 1;
3343             if (env->aia_ireg_rmw_fn[PRV_S]) {
3344                 /*
3345                  * Call machine specific IMSIC register emulation for
3346                  * reading TOPEI.
3347                  */
3348                 ret = env->aia_ireg_rmw_fn[PRV_S](
3349                         env->aia_ireg_rmw_fn_arg[PRV_S],
3350                         AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, PRV_S, true, gein,
3351                                       riscv_cpu_mxl_bits(env)),
3352                         &topei, 0, 0);
3353                 if (!ret && topei) {
3354                     siprio[scount] = topei & IMSIC_TOPEI_IPRIO_MASK;
3355                 }
3356             }
3357             scount++;
3358         }
3359     } else {
3360         if (hviid == IRQ_S_EXT && hviprio) {
3361             siid[scount] = IRQ_S_EXT;
3362             siprio[scount] = hviprio;
3363             scount++;
3364         }
3365     }
3366 
3367     if (env->hvictl & HVICTL_VTI) {
3368         if (hviid != IRQ_S_EXT) {
3369             siid[scount] = hviid;
3370             siprio[scount] = hviprio;
3371             scount++;
3372         }
3373     } else {
3374         irq = riscv_cpu_vsirq_pending(env);
3375         if (irq != IRQ_S_EXT && 0 < irq && irq <= 63) {
3376             siid[scount] = irq;
3377             siprio[scount] = env->hviprio[irq];
3378             scount++;
3379         }
3380     }
3381 
3382     iid = 0;
3383     iprio = UINT_MAX;
3384     for (s = 0; s < scount; s++) {
3385         if (siprio[s] < iprio) {
3386             iid = siid[s];
3387             iprio = siprio[s];
3388         }
3389     }
3390 
3391     if (iid) {
3392         if (env->hvictl & HVICTL_IPRIOM) {
3393             if (iprio > IPRIO_MMAXIPRIO) {
3394                 iprio = IPRIO_MMAXIPRIO;
3395             }
3396             if (!iprio) {
3397                 if (riscv_cpu_default_priority(iid) > IPRIO_DEFAULT_S) {
3398                     iprio = IPRIO_MMAXIPRIO;
3399                 }
3400             }
3401         } else {
3402             iprio = 1;
3403         }
3404     } else {
3405         iprio = 0;
3406     }
3407 
3408     *val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT;
3409     *val |= iprio;
3410 
3411     return RISCV_EXCP_NONE;
3412 }
3413 
read_stopi(CPURISCVState * env,int csrno,target_ulong * val)3414 static RISCVException read_stopi(CPURISCVState *env, int csrno,
3415                                  target_ulong *val)
3416 {
3417     int irq;
3418     uint8_t iprio;
3419 
3420     if (env->virt_enabled) {
3421         return read_vstopi(env, CSR_VSTOPI, val);
3422     }
3423 
3424     irq = riscv_cpu_sirq_pending(env);
3425     if (irq <= 0 || irq > 63) {
3426         *val = 0;
3427     } else {
3428         iprio = env->siprio[irq];
3429         if (!iprio) {
3430             if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_S) {
3431                 iprio = IPRIO_MMAXIPRIO;
3432            }
3433         }
3434         *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
3435         *val |= iprio;
3436     }
3437 
3438     return RISCV_EXCP_NONE;
3439 }
3440 
3441 /* Hypervisor Extensions */
read_hstatus(CPURISCVState * env,int csrno,target_ulong * val)3442 static RISCVException read_hstatus(CPURISCVState *env, int csrno,
3443                                    target_ulong *val)
3444 {
3445     *val = env->hstatus;
3446     if (riscv_cpu_mxl(env) != MXL_RV32) {
3447         /* We only support 64-bit VSXL */
3448         *val = set_field(*val, HSTATUS_VSXL, 2);
3449     }
3450     /* We only support little endian */
3451     *val = set_field(*val, HSTATUS_VSBE, 0);
3452     return RISCV_EXCP_NONE;
3453 }
3454 
write_hstatus(CPURISCVState * env,int csrno,target_ulong val)3455 static RISCVException write_hstatus(CPURISCVState *env, int csrno,
3456                                     target_ulong val)
3457 {
3458     env->hstatus = val;
3459     if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) {
3460         qemu_log_mask(LOG_UNIMP,
3461                       "QEMU does not support mixed HSXLEN options.");
3462     }
3463     if (get_field(val, HSTATUS_VSBE) != 0) {
3464         qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests.");
3465     }
3466     return RISCV_EXCP_NONE;
3467 }
3468 
read_hedeleg(CPURISCVState * env,int csrno,target_ulong * val)3469 static RISCVException read_hedeleg(CPURISCVState *env, int csrno,
3470                                    target_ulong *val)
3471 {
3472     *val = env->hedeleg;
3473     return RISCV_EXCP_NONE;
3474 }
3475 
write_hedeleg(CPURISCVState * env,int csrno,target_ulong val)3476 static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
3477                                     target_ulong val)
3478 {
3479     env->hedeleg = val & vs_delegable_excps;
3480     return RISCV_EXCP_NONE;
3481 }
3482 
read_hedelegh(CPURISCVState * env,int csrno,target_ulong * val)3483 static RISCVException read_hedelegh(CPURISCVState *env, int csrno,
3484                                    target_ulong *val)
3485 {
3486     RISCVException ret;
3487     ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13);
3488     if (ret != RISCV_EXCP_NONE) {
3489         return ret;
3490     }
3491 
3492     /* Reserved, now read zero */
3493     *val = 0;
3494     return RISCV_EXCP_NONE;
3495 }
3496 
write_hedelegh(CPURISCVState * env,int csrno,target_ulong val)3497 static RISCVException write_hedelegh(CPURISCVState *env, int csrno,
3498                                     target_ulong val)
3499 {
3500     RISCVException ret;
3501     ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13);
3502     if (ret != RISCV_EXCP_NONE) {
3503         return ret;
3504     }
3505 
3506     /* Reserved, now write ignore */
3507     return RISCV_EXCP_NONE;
3508 }
3509 
rmw_hvien64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3510 static RISCVException rmw_hvien64(CPURISCVState *env, int csrno,
3511                                     uint64_t *ret_val,
3512                                     uint64_t new_val, uint64_t wr_mask)
3513 {
3514     uint64_t mask = wr_mask & hvien_writable_mask;
3515 
3516     if (ret_val) {
3517         *ret_val = env->hvien;
3518     }
3519 
3520     env->hvien = (env->hvien & ~mask) | (new_val & mask);
3521 
3522     return RISCV_EXCP_NONE;
3523 }
3524 
rmw_hvien(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3525 static RISCVException rmw_hvien(CPURISCVState *env, int csrno,
3526                                target_ulong *ret_val,
3527                                target_ulong new_val, target_ulong wr_mask)
3528 {
3529     uint64_t rval;
3530     RISCVException ret;
3531 
3532     ret = rmw_hvien64(env, csrno, &rval, new_val, wr_mask);
3533     if (ret_val) {
3534         *ret_val = rval;
3535     }
3536 
3537     return ret;
3538 }
3539 
rmw_hvienh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3540 static RISCVException rmw_hvienh(CPURISCVState *env, int csrno,
3541                                    target_ulong *ret_val,
3542                                    target_ulong new_val, target_ulong wr_mask)
3543 {
3544     uint64_t rval;
3545     RISCVException ret;
3546 
3547     ret = rmw_hvien64(env, csrno, &rval,
3548         ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3549     if (ret_val) {
3550         *ret_val = rval >> 32;
3551     }
3552 
3553     return ret;
3554 }
3555 
rmw_hideleg64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3556 static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno,
3557                                     uint64_t *ret_val,
3558                                     uint64_t new_val, uint64_t wr_mask)
3559 {
3560     uint64_t mask = wr_mask & vs_delegable_ints;
3561 
3562     if (ret_val) {
3563         *ret_val = env->hideleg & vs_delegable_ints;
3564     }
3565 
3566     env->hideleg = (env->hideleg & ~mask) | (new_val & mask);
3567     return RISCV_EXCP_NONE;
3568 }
3569 
rmw_hideleg(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3570 static RISCVException rmw_hideleg(CPURISCVState *env, int csrno,
3571                                   target_ulong *ret_val,
3572                                   target_ulong new_val, target_ulong wr_mask)
3573 {
3574     uint64_t rval;
3575     RISCVException ret;
3576 
3577     ret = rmw_hideleg64(env, csrno, &rval, new_val, wr_mask);
3578     if (ret_val) {
3579         *ret_val = rval;
3580     }
3581 
3582     return ret;
3583 }
3584 
rmw_hidelegh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3585 static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno,
3586                                    target_ulong *ret_val,
3587                                    target_ulong new_val, target_ulong wr_mask)
3588 {
3589     uint64_t rval;
3590     RISCVException ret;
3591 
3592     ret = rmw_hideleg64(env, csrno, &rval,
3593         ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3594     if (ret_val) {
3595         *ret_val = rval >> 32;
3596     }
3597 
3598     return ret;
3599 }
3600 
3601 /*
3602  * The function is written for two use-cases:
3603  * 1- To access hvip csr as is for HS-mode access.
3604  * 2- To access vsip as a combination of hvip, and mip for vs-mode.
3605  *
3606  * Both report bits 2, 6, 10 and 13:63.
3607  * vsip needs to be read-only zero when both hideleg[i] and
3608  * hvien[i] are zero.
3609  */
rmw_hvip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3610 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
3611                                  uint64_t *ret_val,
3612                                  uint64_t new_val, uint64_t wr_mask)
3613 {
3614     RISCVException ret;
3615     uint64_t old_hvip;
3616     uint64_t ret_mip;
3617 
3618     /*
3619      * For bits 10, 6 and 2, vsip[i] is an alias of hip[i]. These bits are
3620      * present in hip, hvip and mip. Where mip[i] is alias of hip[i] and hvip[i]
3621      * is OR'ed in hip[i] to inject virtual interrupts from hypervisor. These
3622      * bits are actually being maintained in mip so we read them from there.
3623      * This way we have a single source of truth and allows for easier
3624      * implementation.
3625      *
3626      * For bits 13:63 we have:
3627      *
3628      * hideleg[i]  hvien[i]
3629      *   0           0      No delegation. vsip[i] readonly zero.
3630      *   0           1      vsip[i] is alias of hvip[i], sip bypassed.
3631      *   1           X      vsip[i] is alias of sip[i], hvip bypassed.
3632      *
3633      *  alias_mask denotes the bits that come from sip (mip here given we
3634      *  maintain all bits there). nalias_mask denotes bits that come from
3635      *  hvip.
3636      */
3637     uint64_t alias_mask = (env->hideleg | ~env->hvien) | VS_MODE_INTERRUPTS;
3638     uint64_t nalias_mask = (~env->hideleg & env->hvien);
3639     uint64_t wr_mask_hvip;
3640     uint64_t wr_mask_mip;
3641 
3642     /*
3643      * Both alias and non-alias mask remain same for vsip except:
3644      *  1- For VS* bits if they are zero in hideleg.
3645      *  2- For 13:63 bits if they are zero in both hideleg and hvien.
3646      */
3647     if (csrno == CSR_VSIP) {
3648         /* zero-out VS* bits that are not delegated to VS mode. */
3649         alias_mask &= (env->hideleg | ~VS_MODE_INTERRUPTS);
3650 
3651         /*
3652          * zero-out 13:63 bits that are zero in both hideleg and hvien.
3653          * nalias_mask mask can not contain any VS* bits so only second
3654          * condition applies on it.
3655          */
3656         nalias_mask &= (env->hideleg | env->hvien);
3657         alias_mask &= (env->hideleg | env->hvien);
3658     }
3659 
3660     wr_mask_hvip = wr_mask & nalias_mask & hvip_writable_mask;
3661     wr_mask_mip = wr_mask & alias_mask & hvip_writable_mask;
3662 
3663     /* Aliased bits, bits 10, 6, 2 need to come from mip. */
3664     ret = rmw_mip64(env, csrno, &ret_mip, new_val, wr_mask_mip);
3665     if (ret != RISCV_EXCP_NONE) {
3666         return ret;
3667     }
3668 
3669     old_hvip = env->hvip;
3670 
3671     if (wr_mask_hvip) {
3672         env->hvip = (env->hvip & ~wr_mask_hvip) | (new_val & wr_mask_hvip);
3673 
3674         /*
3675          * Given hvip is separate source from mip, we need to trigger interrupt
3676          * from here separately. Normally this happen from riscv_cpu_update_mip.
3677          */
3678         riscv_cpu_interrupt(env);
3679     }
3680 
3681     if (ret_val) {
3682         /* Only take VS* bits from mip. */
3683         ret_mip &= alias_mask;
3684 
3685         /* Take in non-delegated 13:63 bits from hvip. */
3686         old_hvip &= nalias_mask;
3687 
3688         *ret_val = ret_mip | old_hvip;
3689     }
3690 
3691     return ret;
3692 }
3693 
rmw_hvip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3694 static RISCVException rmw_hvip(CPURISCVState *env, int csrno,
3695                                target_ulong *ret_val,
3696                                target_ulong new_val, target_ulong wr_mask)
3697 {
3698     uint64_t rval;
3699     RISCVException ret;
3700 
3701     ret = rmw_hvip64(env, csrno, &rval, new_val, wr_mask);
3702     if (ret_val) {
3703         *ret_val = rval;
3704     }
3705 
3706     return ret;
3707 }
3708 
rmw_hviph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3709 static RISCVException rmw_hviph(CPURISCVState *env, int csrno,
3710                                 target_ulong *ret_val,
3711                                 target_ulong new_val, target_ulong wr_mask)
3712 {
3713     uint64_t rval;
3714     RISCVException ret;
3715 
3716     ret = rmw_hvip64(env, csrno, &rval,
3717         ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3718     if (ret_val) {
3719         *ret_val = rval >> 32;
3720     }
3721 
3722     return ret;
3723 }
3724 
rmw_hip(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)3725 static RISCVException rmw_hip(CPURISCVState *env, int csrno,
3726                               target_ulong *ret_value,
3727                               target_ulong new_value, target_ulong write_mask)
3728 {
3729     int ret = rmw_mip(env, csrno, ret_value, new_value,
3730                       write_mask & hip_writable_mask);
3731 
3732     if (ret_value) {
3733         *ret_value &= HS_MODE_INTERRUPTS;
3734     }
3735     return ret;
3736 }
3737 
rmw_hie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3738 static RISCVException rmw_hie(CPURISCVState *env, int csrno,
3739                               target_ulong *ret_val,
3740                               target_ulong new_val, target_ulong wr_mask)
3741 {
3742     uint64_t rval;
3743     RISCVException ret;
3744 
3745     ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & HS_MODE_INTERRUPTS);
3746     if (ret_val) {
3747         *ret_val = rval & HS_MODE_INTERRUPTS;
3748     }
3749 
3750     return ret;
3751 }
3752 
read_hcounteren(CPURISCVState * env,int csrno,target_ulong * val)3753 static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
3754                                       target_ulong *val)
3755 {
3756     *val = env->hcounteren;
3757     return RISCV_EXCP_NONE;
3758 }
3759 
write_hcounteren(CPURISCVState * env,int csrno,target_ulong val)3760 static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
3761                                        target_ulong val)
3762 {
3763     RISCVCPU *cpu = env_archcpu(env);
3764 
3765     /* WARL register - disable unavailable counters */
3766     env->hcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
3767                              COUNTEREN_IR);
3768     return RISCV_EXCP_NONE;
3769 }
3770 
read_hgeie(CPURISCVState * env,int csrno,target_ulong * val)3771 static RISCVException read_hgeie(CPURISCVState *env, int csrno,
3772                                  target_ulong *val)
3773 {
3774     if (val) {
3775         *val = env->hgeie;
3776     }
3777     return RISCV_EXCP_NONE;
3778 }
3779 
write_hgeie(CPURISCVState * env,int csrno,target_ulong val)3780 static RISCVException write_hgeie(CPURISCVState *env, int csrno,
3781                                   target_ulong val)
3782 {
3783     /* Only GEILEN:1 bits implemented and BIT0 is never implemented */
3784     val &= ((((target_ulong)1) << env->geilen) - 1) << 1;
3785     env->hgeie = val;
3786     /* Update mip.SGEIP bit */
3787     riscv_cpu_update_mip(env, MIP_SGEIP,
3788                          BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
3789     return RISCV_EXCP_NONE;
3790 }
3791 
read_htval(CPURISCVState * env,int csrno,target_ulong * val)3792 static RISCVException read_htval(CPURISCVState *env, int csrno,
3793                                  target_ulong *val)
3794 {
3795     *val = env->htval;
3796     return RISCV_EXCP_NONE;
3797 }
3798 
write_htval(CPURISCVState * env,int csrno,target_ulong val)3799 static RISCVException write_htval(CPURISCVState *env, int csrno,
3800                                   target_ulong val)
3801 {
3802     env->htval = val;
3803     return RISCV_EXCP_NONE;
3804 }
3805 
read_htinst(CPURISCVState * env,int csrno,target_ulong * val)3806 static RISCVException read_htinst(CPURISCVState *env, int csrno,
3807                                   target_ulong *val)
3808 {
3809     *val = env->htinst;
3810     return RISCV_EXCP_NONE;
3811 }
3812 
write_htinst(CPURISCVState * env,int csrno,target_ulong val)3813 static RISCVException write_htinst(CPURISCVState *env, int csrno,
3814                                    target_ulong val)
3815 {
3816     return RISCV_EXCP_NONE;
3817 }
3818 
read_hgeip(CPURISCVState * env,int csrno,target_ulong * val)3819 static RISCVException read_hgeip(CPURISCVState *env, int csrno,
3820                                  target_ulong *val)
3821 {
3822     if (val) {
3823         *val = env->hgeip;
3824     }
3825     return RISCV_EXCP_NONE;
3826 }
3827 
read_hgatp(CPURISCVState * env,int csrno,target_ulong * val)3828 static RISCVException read_hgatp(CPURISCVState *env, int csrno,
3829                                  target_ulong *val)
3830 {
3831     *val = env->hgatp;
3832     return RISCV_EXCP_NONE;
3833 }
3834 
write_hgatp(CPURISCVState * env,int csrno,target_ulong val)3835 static RISCVException write_hgatp(CPURISCVState *env, int csrno,
3836                                   target_ulong val)
3837 {
3838     env->hgatp = legalize_xatp(env, env->hgatp, val);
3839     return RISCV_EXCP_NONE;
3840 }
3841 
read_htimedelta(CPURISCVState * env,int csrno,target_ulong * val)3842 static RISCVException read_htimedelta(CPURISCVState *env, int csrno,
3843                                       target_ulong *val)
3844 {
3845     if (!env->rdtime_fn) {
3846         return RISCV_EXCP_ILLEGAL_INST;
3847     }
3848 
3849     *val = env->htimedelta;
3850     return RISCV_EXCP_NONE;
3851 }
3852 
write_htimedelta(CPURISCVState * env,int csrno,target_ulong val)3853 static RISCVException write_htimedelta(CPURISCVState *env, int csrno,
3854                                        target_ulong val)
3855 {
3856     if (!env->rdtime_fn) {
3857         return RISCV_EXCP_ILLEGAL_INST;
3858     }
3859 
3860     if (riscv_cpu_mxl(env) == MXL_RV32) {
3861         env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val);
3862     } else {
3863         env->htimedelta = val;
3864     }
3865 
3866     if (riscv_cpu_cfg(env)->ext_sstc && env->rdtime_fn) {
3867         riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
3868                                   env->htimedelta, MIP_VSTIP);
3869     }
3870 
3871     return RISCV_EXCP_NONE;
3872 }
3873 
read_htimedeltah(CPURISCVState * env,int csrno,target_ulong * val)3874 static RISCVException read_htimedeltah(CPURISCVState *env, int csrno,
3875                                        target_ulong *val)
3876 {
3877     if (!env->rdtime_fn) {
3878         return RISCV_EXCP_ILLEGAL_INST;
3879     }
3880 
3881     *val = env->htimedelta >> 32;
3882     return RISCV_EXCP_NONE;
3883 }
3884 
write_htimedeltah(CPURISCVState * env,int csrno,target_ulong val)3885 static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
3886                                         target_ulong val)
3887 {
3888     if (!env->rdtime_fn) {
3889         return RISCV_EXCP_ILLEGAL_INST;
3890     }
3891 
3892     env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val);
3893 
3894     if (riscv_cpu_cfg(env)->ext_sstc && env->rdtime_fn) {
3895         riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
3896                                   env->htimedelta, MIP_VSTIP);
3897     }
3898 
3899     return RISCV_EXCP_NONE;
3900 }
3901 
read_hvictl(CPURISCVState * env,int csrno,target_ulong * val)3902 static RISCVException read_hvictl(CPURISCVState *env, int csrno,
3903                                   target_ulong *val)
3904 {
3905     *val = env->hvictl;
3906     return RISCV_EXCP_NONE;
3907 }
3908 
write_hvictl(CPURISCVState * env,int csrno,target_ulong val)3909 static RISCVException write_hvictl(CPURISCVState *env, int csrno,
3910                                    target_ulong val)
3911 {
3912     env->hvictl = val & HVICTL_VALID_MASK;
3913     return RISCV_EXCP_NONE;
3914 }
3915 
read_hvipriox(CPURISCVState * env,int first_index,uint8_t * iprio,target_ulong * val)3916 static RISCVException read_hvipriox(CPURISCVState *env, int first_index,
3917                          uint8_t *iprio, target_ulong *val)
3918 {
3919     int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
3920 
3921     /* First index has to be a multiple of number of irqs per register */
3922     if (first_index % num_irqs) {
3923         return (env->virt_enabled) ?
3924                RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
3925     }
3926 
3927     /* Fill-up return value */
3928     *val = 0;
3929     for (i = 0; i < num_irqs; i++) {
3930         if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
3931             continue;
3932         }
3933         if (rdzero) {
3934             continue;
3935         }
3936         *val |= ((target_ulong)iprio[irq]) << (i * 8);
3937     }
3938 
3939     return RISCV_EXCP_NONE;
3940 }
3941 
write_hvipriox(CPURISCVState * env,int first_index,uint8_t * iprio,target_ulong val)3942 static RISCVException write_hvipriox(CPURISCVState *env, int first_index,
3943                           uint8_t *iprio, target_ulong val)
3944 {
3945     int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
3946 
3947     /* First index has to be a multiple of number of irqs per register */
3948     if (first_index % num_irqs) {
3949         return (env->virt_enabled) ?
3950                RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
3951     }
3952 
3953     /* Fill-up priority array */
3954     for (i = 0; i < num_irqs; i++) {
3955         if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
3956             continue;
3957         }
3958         if (rdzero) {
3959             iprio[irq] = 0;
3960         } else {
3961             iprio[irq] = (val >> (i * 8)) & 0xff;
3962         }
3963     }
3964 
3965     return RISCV_EXCP_NONE;
3966 }
3967 
read_hviprio1(CPURISCVState * env,int csrno,target_ulong * val)3968 static RISCVException read_hviprio1(CPURISCVState *env, int csrno,
3969                                     target_ulong *val)
3970 {
3971     return read_hvipriox(env, 0, env->hviprio, val);
3972 }
3973 
write_hviprio1(CPURISCVState * env,int csrno,target_ulong val)3974 static RISCVException write_hviprio1(CPURISCVState *env, int csrno,
3975                                      target_ulong val)
3976 {
3977     return write_hvipriox(env, 0, env->hviprio, val);
3978 }
3979 
read_hviprio1h(CPURISCVState * env,int csrno,target_ulong * val)3980 static RISCVException read_hviprio1h(CPURISCVState *env, int csrno,
3981                                      target_ulong *val)
3982 {
3983     return read_hvipriox(env, 4, env->hviprio, val);
3984 }
3985 
write_hviprio1h(CPURISCVState * env,int csrno,target_ulong val)3986 static RISCVException write_hviprio1h(CPURISCVState *env, int csrno,
3987                                       target_ulong val)
3988 {
3989     return write_hvipriox(env, 4, env->hviprio, val);
3990 }
3991 
read_hviprio2(CPURISCVState * env,int csrno,target_ulong * val)3992 static RISCVException read_hviprio2(CPURISCVState *env, int csrno,
3993                                     target_ulong *val)
3994 {
3995     return read_hvipriox(env, 8, env->hviprio, val);
3996 }
3997 
write_hviprio2(CPURISCVState * env,int csrno,target_ulong val)3998 static RISCVException write_hviprio2(CPURISCVState *env, int csrno,
3999                                      target_ulong val)
4000 {
4001     return write_hvipriox(env, 8, env->hviprio, val);
4002 }
4003 
read_hviprio2h(CPURISCVState * env,int csrno,target_ulong * val)4004 static RISCVException read_hviprio2h(CPURISCVState *env, int csrno,
4005                                      target_ulong *val)
4006 {
4007     return read_hvipriox(env, 12, env->hviprio, val);
4008 }
4009 
write_hviprio2h(CPURISCVState * env,int csrno,target_ulong val)4010 static RISCVException write_hviprio2h(CPURISCVState *env, int csrno,
4011                                       target_ulong val)
4012 {
4013     return write_hvipriox(env, 12, env->hviprio, val);
4014 }
4015 
4016 /* Virtual CSR Registers */
read_vsstatus(CPURISCVState * env,int csrno,target_ulong * val)4017 static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
4018                                     target_ulong *val)
4019 {
4020     *val = env->vsstatus;
4021     return RISCV_EXCP_NONE;
4022 }
4023 
write_vsstatus(CPURISCVState * env,int csrno,target_ulong val)4024 static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
4025                                      target_ulong val)
4026 {
4027     uint64_t mask = (target_ulong)-1;
4028     if ((val & VSSTATUS64_UXL) == 0) {
4029         mask &= ~VSSTATUS64_UXL;
4030     }
4031     env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
4032     return RISCV_EXCP_NONE;
4033 }
4034 
read_vstvec(CPURISCVState * env,int csrno,target_ulong * val)4035 static RISCVException read_vstvec(CPURISCVState *env, int csrno,
4036                                   target_ulong *val)
4037 {
4038     *val = env->vstvec;
4039     return RISCV_EXCP_NONE;
4040 }
4041 
write_vstvec(CPURISCVState * env,int csrno,target_ulong val)4042 static RISCVException write_vstvec(CPURISCVState *env, int csrno,
4043                                    target_ulong val)
4044 {
4045     /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
4046     if ((val & 3) < 2) {
4047         env->vstvec = val;
4048     } else {
4049         qemu_log_mask(LOG_UNIMP, "CSR_VSTVEC: reserved mode not supported\n");
4050     }
4051     return RISCV_EXCP_NONE;
4052 }
4053 
read_vsscratch(CPURISCVState * env,int csrno,target_ulong * val)4054 static RISCVException read_vsscratch(CPURISCVState *env, int csrno,
4055                                      target_ulong *val)
4056 {
4057     *val = env->vsscratch;
4058     return RISCV_EXCP_NONE;
4059 }
4060 
write_vsscratch(CPURISCVState * env,int csrno,target_ulong val)4061 static RISCVException write_vsscratch(CPURISCVState *env, int csrno,
4062                                       target_ulong val)
4063 {
4064     env->vsscratch = val;
4065     return RISCV_EXCP_NONE;
4066 }
4067 
read_vsepc(CPURISCVState * env,int csrno,target_ulong * val)4068 static RISCVException read_vsepc(CPURISCVState *env, int csrno,
4069                                  target_ulong *val)
4070 {
4071     *val = env->vsepc;
4072     return RISCV_EXCP_NONE;
4073 }
4074 
write_vsepc(CPURISCVState * env,int csrno,target_ulong val)4075 static RISCVException write_vsepc(CPURISCVState *env, int csrno,
4076                                   target_ulong val)
4077 {
4078     env->vsepc = val;
4079     return RISCV_EXCP_NONE;
4080 }
4081 
read_vscause(CPURISCVState * env,int csrno,target_ulong * val)4082 static RISCVException read_vscause(CPURISCVState *env, int csrno,
4083                                    target_ulong *val)
4084 {
4085     *val = env->vscause;
4086     return RISCV_EXCP_NONE;
4087 }
4088 
write_vscause(CPURISCVState * env,int csrno,target_ulong val)4089 static RISCVException write_vscause(CPURISCVState *env, int csrno,
4090                                     target_ulong val)
4091 {
4092     env->vscause = val;
4093     return RISCV_EXCP_NONE;
4094 }
4095 
read_vstval(CPURISCVState * env,int csrno,target_ulong * val)4096 static RISCVException read_vstval(CPURISCVState *env, int csrno,
4097                                   target_ulong *val)
4098 {
4099     *val = env->vstval;
4100     return RISCV_EXCP_NONE;
4101 }
4102 
write_vstval(CPURISCVState * env,int csrno,target_ulong val)4103 static RISCVException write_vstval(CPURISCVState *env, int csrno,
4104                                    target_ulong val)
4105 {
4106     env->vstval = val;
4107     return RISCV_EXCP_NONE;
4108 }
4109 
read_vsatp(CPURISCVState * env,int csrno,target_ulong * val)4110 static RISCVException read_vsatp(CPURISCVState *env, int csrno,
4111                                  target_ulong *val)
4112 {
4113     *val = env->vsatp;
4114     return RISCV_EXCP_NONE;
4115 }
4116 
write_vsatp(CPURISCVState * env,int csrno,target_ulong val)4117 static RISCVException write_vsatp(CPURISCVState *env, int csrno,
4118                                   target_ulong val)
4119 {
4120     env->vsatp = legalize_xatp(env, env->vsatp, val);
4121     return RISCV_EXCP_NONE;
4122 }
4123 
read_mtval2(CPURISCVState * env,int csrno,target_ulong * val)4124 static RISCVException read_mtval2(CPURISCVState *env, int csrno,
4125                                   target_ulong *val)
4126 {
4127     *val = env->mtval2;
4128     return RISCV_EXCP_NONE;
4129 }
4130 
write_mtval2(CPURISCVState * env,int csrno,target_ulong val)4131 static RISCVException write_mtval2(CPURISCVState *env, int csrno,
4132                                    target_ulong val)
4133 {
4134     env->mtval2 = val;
4135     return RISCV_EXCP_NONE;
4136 }
4137 
read_mtinst(CPURISCVState * env,int csrno,target_ulong * val)4138 static RISCVException read_mtinst(CPURISCVState *env, int csrno,
4139                                   target_ulong *val)
4140 {
4141     *val = env->mtinst;
4142     return RISCV_EXCP_NONE;
4143 }
4144 
write_mtinst(CPURISCVState * env,int csrno,target_ulong val)4145 static RISCVException write_mtinst(CPURISCVState *env, int csrno,
4146                                    target_ulong val)
4147 {
4148     env->mtinst = val;
4149     return RISCV_EXCP_NONE;
4150 }
4151 
4152 /* Physical Memory Protection */
read_mseccfg(CPURISCVState * env,int csrno,target_ulong * val)4153 static RISCVException read_mseccfg(CPURISCVState *env, int csrno,
4154                                    target_ulong *val)
4155 {
4156     *val = mseccfg_csr_read(env);
4157     return RISCV_EXCP_NONE;
4158 }
4159 
write_mseccfg(CPURISCVState * env,int csrno,target_ulong val)4160 static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
4161                                     target_ulong val)
4162 {
4163     mseccfg_csr_write(env, val);
4164     return RISCV_EXCP_NONE;
4165 }
4166 
read_pmpcfg(CPURISCVState * env,int csrno,target_ulong * val)4167 static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
4168                                   target_ulong *val)
4169 {
4170     uint32_t reg_index = csrno - CSR_PMPCFG0;
4171 
4172     *val = pmpcfg_csr_read(env, reg_index);
4173     return RISCV_EXCP_NONE;
4174 }
4175 
write_pmpcfg(CPURISCVState * env,int csrno,target_ulong val)4176 static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
4177                                    target_ulong val)
4178 {
4179     uint32_t reg_index = csrno - CSR_PMPCFG0;
4180 
4181     pmpcfg_csr_write(env, reg_index, val);
4182     return RISCV_EXCP_NONE;
4183 }
4184 
read_pmpaddr(CPURISCVState * env,int csrno,target_ulong * val)4185 static RISCVException read_pmpaddr(CPURISCVState *env, int csrno,
4186                                    target_ulong *val)
4187 {
4188     *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0);
4189     return RISCV_EXCP_NONE;
4190 }
4191 
write_pmpaddr(CPURISCVState * env,int csrno,target_ulong val)4192 static RISCVException write_pmpaddr(CPURISCVState *env, int csrno,
4193                                     target_ulong val)
4194 {
4195     pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val);
4196     return RISCV_EXCP_NONE;
4197 }
4198 
read_tselect(CPURISCVState * env,int csrno,target_ulong * val)4199 static RISCVException read_tselect(CPURISCVState *env, int csrno,
4200                                    target_ulong *val)
4201 {
4202     *val = tselect_csr_read(env);
4203     return RISCV_EXCP_NONE;
4204 }
4205 
write_tselect(CPURISCVState * env,int csrno,target_ulong val)4206 static RISCVException write_tselect(CPURISCVState *env, int csrno,
4207                                     target_ulong val)
4208 {
4209     tselect_csr_write(env, val);
4210     return RISCV_EXCP_NONE;
4211 }
4212 
read_tdata(CPURISCVState * env,int csrno,target_ulong * val)4213 static RISCVException read_tdata(CPURISCVState *env, int csrno,
4214                                  target_ulong *val)
4215 {
4216     /* return 0 in tdata1 to end the trigger enumeration */
4217     if (env->trigger_cur >= RV_MAX_TRIGGERS && csrno == CSR_TDATA1) {
4218         *val = 0;
4219         return RISCV_EXCP_NONE;
4220     }
4221 
4222     if (!tdata_available(env, csrno - CSR_TDATA1)) {
4223         return RISCV_EXCP_ILLEGAL_INST;
4224     }
4225 
4226     *val = tdata_csr_read(env, csrno - CSR_TDATA1);
4227     return RISCV_EXCP_NONE;
4228 }
4229 
write_tdata(CPURISCVState * env,int csrno,target_ulong val)4230 static RISCVException write_tdata(CPURISCVState *env, int csrno,
4231                                   target_ulong val)
4232 {
4233     if (!tdata_available(env, csrno - CSR_TDATA1)) {
4234         return RISCV_EXCP_ILLEGAL_INST;
4235     }
4236 
4237     tdata_csr_write(env, csrno - CSR_TDATA1, val);
4238     return RISCV_EXCP_NONE;
4239 }
4240 
read_tinfo(CPURISCVState * env,int csrno,target_ulong * val)4241 static RISCVException read_tinfo(CPURISCVState *env, int csrno,
4242                                  target_ulong *val)
4243 {
4244     *val = tinfo_csr_read(env);
4245     return RISCV_EXCP_NONE;
4246 }
4247 
read_mcontext(CPURISCVState * env,int csrno,target_ulong * val)4248 static RISCVException read_mcontext(CPURISCVState *env, int csrno,
4249                                     target_ulong *val)
4250 {
4251     *val = env->mcontext;
4252     return RISCV_EXCP_NONE;
4253 }
4254 
write_mcontext(CPURISCVState * env,int csrno,target_ulong val)4255 static RISCVException write_mcontext(CPURISCVState *env, int csrno,
4256                                      target_ulong val)
4257 {
4258     bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
4259     int32_t mask;
4260 
4261     if (riscv_has_ext(env, RVH)) {
4262         /* Spec suggest 7-bit for RV32 and 14-bit for RV64 w/ H extension */
4263         mask = rv32 ? MCONTEXT32_HCONTEXT : MCONTEXT64_HCONTEXT;
4264     } else {
4265         /* Spec suggest 6-bit for RV32 and 13-bit for RV64 w/o H extension */
4266         mask = rv32 ? MCONTEXT32 : MCONTEXT64;
4267     }
4268 
4269     env->mcontext = val & mask;
4270     return RISCV_EXCP_NONE;
4271 }
4272 
4273 /*
4274  * Functions to access Pointer Masking feature registers
4275  * We have to check if current priv lvl could modify
4276  * csr in given mode
4277  */
check_pm_current_disabled(CPURISCVState * env,int csrno)4278 static bool check_pm_current_disabled(CPURISCVState *env, int csrno)
4279 {
4280     int csr_priv = get_field(csrno, 0x300);
4281     int pm_current;
4282 
4283     if (env->debugger) {
4284         return false;
4285     }
4286     /*
4287      * If priv lvls differ that means we're accessing csr from higher priv lvl,
4288      * so allow the access
4289      */
4290     if (env->priv != csr_priv) {
4291         return false;
4292     }
4293     switch (env->priv) {
4294     case PRV_M:
4295         pm_current = get_field(env->mmte, M_PM_CURRENT);
4296         break;
4297     case PRV_S:
4298         pm_current = get_field(env->mmte, S_PM_CURRENT);
4299         break;
4300     case PRV_U:
4301         pm_current = get_field(env->mmte, U_PM_CURRENT);
4302         break;
4303     default:
4304         g_assert_not_reached();
4305     }
4306     /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */
4307     return !pm_current;
4308 }
4309 
read_mmte(CPURISCVState * env,int csrno,target_ulong * val)4310 static RISCVException read_mmte(CPURISCVState *env, int csrno,
4311                                 target_ulong *val)
4312 {
4313     *val = env->mmte & MMTE_MASK;
4314     return RISCV_EXCP_NONE;
4315 }
4316 
write_mmte(CPURISCVState * env,int csrno,target_ulong val)4317 static RISCVException write_mmte(CPURISCVState *env, int csrno,
4318                                  target_ulong val)
4319 {
4320     uint64_t mstatus;
4321     target_ulong wpri_val = val & MMTE_MASK;
4322 
4323     if (val != wpri_val) {
4324         qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
4325                       TARGET_FMT_lx "\n", "MMTE: WPRI violation written 0x",
4326                       val, "vs expected 0x", wpri_val);
4327     }
4328     /* for machine mode pm.current is hardwired to 1 */
4329     wpri_val |= MMTE_M_PM_CURRENT;
4330 
4331     /* hardwiring pm.instruction bit to 0, since it's not supported yet */
4332     wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN);
4333     env->mmte = wpri_val | EXT_STATUS_DIRTY;
4334     riscv_cpu_update_mask(env);
4335 
4336     /* Set XS and SD bits, since PM CSRs are dirty */
4337     mstatus = env->mstatus | MSTATUS_XS;
4338     write_mstatus(env, csrno, mstatus);
4339     return RISCV_EXCP_NONE;
4340 }
4341 
read_smte(CPURISCVState * env,int csrno,target_ulong * val)4342 static RISCVException read_smte(CPURISCVState *env, int csrno,
4343                                 target_ulong *val)
4344 {
4345     *val = env->mmte & SMTE_MASK;
4346     return RISCV_EXCP_NONE;
4347 }
4348 
write_smte(CPURISCVState * env,int csrno,target_ulong val)4349 static RISCVException write_smte(CPURISCVState *env, int csrno,
4350                                  target_ulong val)
4351 {
4352     target_ulong wpri_val = val & SMTE_MASK;
4353 
4354     if (val != wpri_val) {
4355         qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
4356                       TARGET_FMT_lx "\n", "SMTE: WPRI violation written 0x",
4357                       val, "vs expected 0x", wpri_val);
4358     }
4359 
4360     /* if pm.current==0 we can't modify current PM CSRs */
4361     if (check_pm_current_disabled(env, csrno)) {
4362         return RISCV_EXCP_NONE;
4363     }
4364 
4365     wpri_val |= (env->mmte & ~SMTE_MASK);
4366     write_mmte(env, csrno, wpri_val);
4367     return RISCV_EXCP_NONE;
4368 }
4369 
read_umte(CPURISCVState * env,int csrno,target_ulong * val)4370 static RISCVException read_umte(CPURISCVState *env, int csrno,
4371                                 target_ulong *val)
4372 {
4373     *val = env->mmte & UMTE_MASK;
4374     return RISCV_EXCP_NONE;
4375 }
4376 
write_umte(CPURISCVState * env,int csrno,target_ulong val)4377 static RISCVException write_umte(CPURISCVState *env, int csrno,
4378                                  target_ulong val)
4379 {
4380     target_ulong wpri_val = val & UMTE_MASK;
4381 
4382     if (val != wpri_val) {
4383         qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
4384                       TARGET_FMT_lx "\n", "UMTE: WPRI violation written 0x",
4385                       val, "vs expected 0x", wpri_val);
4386     }
4387 
4388     if (check_pm_current_disabled(env, csrno)) {
4389         return RISCV_EXCP_NONE;
4390     }
4391 
4392     wpri_val |= (env->mmte & ~UMTE_MASK);
4393     write_mmte(env, csrno, wpri_val);
4394     return RISCV_EXCP_NONE;
4395 }
4396 
read_mpmmask(CPURISCVState * env,int csrno,target_ulong * val)4397 static RISCVException read_mpmmask(CPURISCVState *env, int csrno,
4398                                    target_ulong *val)
4399 {
4400     *val = env->mpmmask;
4401     return RISCV_EXCP_NONE;
4402 }
4403 
write_mpmmask(CPURISCVState * env,int csrno,target_ulong val)4404 static RISCVException write_mpmmask(CPURISCVState *env, int csrno,
4405                                     target_ulong val)
4406 {
4407     uint64_t mstatus;
4408 
4409     env->mpmmask = val;
4410     if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) {
4411         env->cur_pmmask = val;
4412     }
4413     env->mmte |= EXT_STATUS_DIRTY;
4414 
4415     /* Set XS and SD bits, since PM CSRs are dirty */
4416     mstatus = env->mstatus | MSTATUS_XS;
4417     write_mstatus(env, csrno, mstatus);
4418     return RISCV_EXCP_NONE;
4419 }
4420 
read_spmmask(CPURISCVState * env,int csrno,target_ulong * val)4421 static RISCVException read_spmmask(CPURISCVState *env, int csrno,
4422                                    target_ulong *val)
4423 {
4424     *val = env->spmmask;
4425     return RISCV_EXCP_NONE;
4426 }
4427 
write_spmmask(CPURISCVState * env,int csrno,target_ulong val)4428 static RISCVException write_spmmask(CPURISCVState *env, int csrno,
4429                                     target_ulong val)
4430 {
4431     uint64_t mstatus;
4432 
4433     /* if pm.current==0 we can't modify current PM CSRs */
4434     if (check_pm_current_disabled(env, csrno)) {
4435         return RISCV_EXCP_NONE;
4436     }
4437     env->spmmask = val;
4438     if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) {
4439         env->cur_pmmask = val;
4440         if (cpu_get_xl(env, PRV_S) == MXL_RV32) {
4441             env->cur_pmmask &= UINT32_MAX;
4442         }
4443     }
4444     env->mmte |= EXT_STATUS_DIRTY;
4445 
4446     /* Set XS and SD bits, since PM CSRs are dirty */
4447     mstatus = env->mstatus | MSTATUS_XS;
4448     write_mstatus(env, csrno, mstatus);
4449     return RISCV_EXCP_NONE;
4450 }
4451 
read_upmmask(CPURISCVState * env,int csrno,target_ulong * val)4452 static RISCVException read_upmmask(CPURISCVState *env, int csrno,
4453                                    target_ulong *val)
4454 {
4455     *val = env->upmmask;
4456     return RISCV_EXCP_NONE;
4457 }
4458 
write_upmmask(CPURISCVState * env,int csrno,target_ulong val)4459 static RISCVException write_upmmask(CPURISCVState *env, int csrno,
4460                                     target_ulong val)
4461 {
4462     uint64_t mstatus;
4463 
4464     /* if pm.current==0 we can't modify current PM CSRs */
4465     if (check_pm_current_disabled(env, csrno)) {
4466         return RISCV_EXCP_NONE;
4467     }
4468     env->upmmask = val;
4469     if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) {
4470         env->cur_pmmask = val;
4471         if (cpu_get_xl(env, PRV_U) == MXL_RV32) {
4472             env->cur_pmmask &= UINT32_MAX;
4473         }
4474     }
4475     env->mmte |= EXT_STATUS_DIRTY;
4476 
4477     /* Set XS and SD bits, since PM CSRs are dirty */
4478     mstatus = env->mstatus | MSTATUS_XS;
4479     write_mstatus(env, csrno, mstatus);
4480     return RISCV_EXCP_NONE;
4481 }
4482 
read_mpmbase(CPURISCVState * env,int csrno,target_ulong * val)4483 static RISCVException read_mpmbase(CPURISCVState *env, int csrno,
4484                                    target_ulong *val)
4485 {
4486     *val = env->mpmbase;
4487     return RISCV_EXCP_NONE;
4488 }
4489 
write_mpmbase(CPURISCVState * env,int csrno,target_ulong val)4490 static RISCVException write_mpmbase(CPURISCVState *env, int csrno,
4491                                     target_ulong val)
4492 {
4493     uint64_t mstatus;
4494 
4495     env->mpmbase = val;
4496     if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) {
4497         env->cur_pmbase = val;
4498     }
4499     env->mmte |= EXT_STATUS_DIRTY;
4500 
4501     /* Set XS and SD bits, since PM CSRs are dirty */
4502     mstatus = env->mstatus | MSTATUS_XS;
4503     write_mstatus(env, csrno, mstatus);
4504     return RISCV_EXCP_NONE;
4505 }
4506 
read_spmbase(CPURISCVState * env,int csrno,target_ulong * val)4507 static RISCVException read_spmbase(CPURISCVState *env, int csrno,
4508                                    target_ulong *val)
4509 {
4510     *val = env->spmbase;
4511     return RISCV_EXCP_NONE;
4512 }
4513 
write_spmbase(CPURISCVState * env,int csrno,target_ulong val)4514 static RISCVException write_spmbase(CPURISCVState *env, int csrno,
4515                                     target_ulong val)
4516 {
4517     uint64_t mstatus;
4518 
4519     /* if pm.current==0 we can't modify current PM CSRs */
4520     if (check_pm_current_disabled(env, csrno)) {
4521         return RISCV_EXCP_NONE;
4522     }
4523     env->spmbase = val;
4524     if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) {
4525         env->cur_pmbase = val;
4526         if (cpu_get_xl(env, PRV_S) == MXL_RV32) {
4527             env->cur_pmbase &= UINT32_MAX;
4528         }
4529     }
4530     env->mmte |= EXT_STATUS_DIRTY;
4531 
4532     /* Set XS and SD bits, since PM CSRs are dirty */
4533     mstatus = env->mstatus | MSTATUS_XS;
4534     write_mstatus(env, csrno, mstatus);
4535     return RISCV_EXCP_NONE;
4536 }
4537 
read_upmbase(CPURISCVState * env,int csrno,target_ulong * val)4538 static RISCVException read_upmbase(CPURISCVState *env, int csrno,
4539                                    target_ulong *val)
4540 {
4541     *val = env->upmbase;
4542     return RISCV_EXCP_NONE;
4543 }
4544 
write_upmbase(CPURISCVState * env,int csrno,target_ulong val)4545 static RISCVException write_upmbase(CPURISCVState *env, int csrno,
4546                                     target_ulong val)
4547 {
4548     uint64_t mstatus;
4549 
4550     /* if pm.current==0 we can't modify current PM CSRs */
4551     if (check_pm_current_disabled(env, csrno)) {
4552         return RISCV_EXCP_NONE;
4553     }
4554     env->upmbase = val;
4555     if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) {
4556         env->cur_pmbase = val;
4557         if (cpu_get_xl(env, PRV_U) == MXL_RV32) {
4558             env->cur_pmbase &= UINT32_MAX;
4559         }
4560     }
4561     env->mmte |= EXT_STATUS_DIRTY;
4562 
4563     /* Set XS and SD bits, since PM CSRs are dirty */
4564     mstatus = env->mstatus | MSTATUS_XS;
4565     write_mstatus(env, csrno, mstatus);
4566     return RISCV_EXCP_NONE;
4567 }
4568 
4569 #endif
4570 
4571 /* Crypto Extension */
riscv_new_csr_seed(target_ulong new_value,target_ulong write_mask)4572 target_ulong riscv_new_csr_seed(target_ulong new_value,
4573                                 target_ulong write_mask)
4574 {
4575     uint16_t random_v;
4576     Error *random_e = NULL;
4577     int random_r;
4578     target_ulong rval;
4579 
4580     random_r = qemu_guest_getrandom(&random_v, 2, &random_e);
4581     if (unlikely(random_r < 0)) {
4582         /*
4583          * Failed, for unknown reasons in the crypto subsystem.
4584          * The best we can do is log the reason and return a
4585          * failure indication to the guest.  There is no reason
4586          * we know to expect the failure to be transitory, so
4587          * indicate DEAD to avoid having the guest spin on WAIT.
4588          */
4589         qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
4590                       __func__, error_get_pretty(random_e));
4591         error_free(random_e);
4592         rval = SEED_OPST_DEAD;
4593     } else {
4594         rval = random_v | SEED_OPST_ES16;
4595     }
4596 
4597     return rval;
4598 }
4599 
rmw_seed(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)4600 static RISCVException rmw_seed(CPURISCVState *env, int csrno,
4601                                target_ulong *ret_value,
4602                                target_ulong new_value,
4603                                target_ulong write_mask)
4604 {
4605     target_ulong rval;
4606 
4607     rval = riscv_new_csr_seed(new_value, write_mask);
4608 
4609     if (ret_value) {
4610         *ret_value = rval;
4611     }
4612 
4613     return RISCV_EXCP_NONE;
4614 }
4615 
4616 /*
4617  * riscv_csrrw - read and/or update control and status register
4618  *
4619  * csrr   <->  riscv_csrrw(env, csrno, ret_value, 0, 0);
4620  * csrrw  <->  riscv_csrrw(env, csrno, ret_value, value, -1);
4621  * csrrs  <->  riscv_csrrw(env, csrno, ret_value, -1, value);
4622  * csrrc  <->  riscv_csrrw(env, csrno, ret_value, 0, value);
4623  */
4624 
riscv_csrrw_check(CPURISCVState * env,int csrno,bool write)4625 static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
4626                                                int csrno,
4627                                                bool write)
4628 {
4629     /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
4630     bool read_only = get_field(csrno, 0xC00) == 3;
4631     int csr_min_priv = csr_ops[csrno].min_priv_ver;
4632 
4633     /* ensure the CSR extension is enabled */
4634     if (!riscv_cpu_cfg(env)->ext_zicsr) {
4635         return RISCV_EXCP_ILLEGAL_INST;
4636     }
4637 
4638     /* ensure CSR is implemented by checking predicate */
4639     if (!csr_ops[csrno].predicate) {
4640         return RISCV_EXCP_ILLEGAL_INST;
4641     }
4642 
4643     /* privileged spec version check */
4644     if (env->priv_ver < csr_min_priv) {
4645         return RISCV_EXCP_ILLEGAL_INST;
4646     }
4647 
4648     /* read / write check */
4649     if (write && read_only) {
4650         return RISCV_EXCP_ILLEGAL_INST;
4651     }
4652 
4653     /*
4654      * The predicate() not only does existence check but also does some
4655      * access control check which triggers for example virtual instruction
4656      * exception in some cases. When writing read-only CSRs in those cases
4657      * illegal instruction exception should be triggered instead of virtual
4658      * instruction exception. Hence this comes after the read / write check.
4659      */
4660     RISCVException ret = csr_ops[csrno].predicate(env, csrno);
4661     if (ret != RISCV_EXCP_NONE) {
4662         return ret;
4663     }
4664 
4665 #if !defined(CONFIG_USER_ONLY)
4666     int csr_priv, effective_priv = env->priv;
4667 
4668     if (riscv_has_ext(env, RVH) && env->priv == PRV_S &&
4669         !env->virt_enabled) {
4670         /*
4671          * We are in HS mode. Add 1 to the effective privilege level to
4672          * allow us to access the Hypervisor CSRs.
4673          */
4674         effective_priv++;
4675     }
4676 
4677     csr_priv = get_field(csrno, 0x300);
4678     if (!env->debugger && (effective_priv < csr_priv)) {
4679         if (csr_priv == (PRV_S + 1) && env->virt_enabled) {
4680             return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
4681         }
4682         return RISCV_EXCP_ILLEGAL_INST;
4683     }
4684 #endif
4685     return RISCV_EXCP_NONE;
4686 }
4687 
riscv_csrrw_do64(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)4688 static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
4689                                        target_ulong *ret_value,
4690                                        target_ulong new_value,
4691                                        target_ulong write_mask)
4692 {
4693     RISCVException ret;
4694     target_ulong old_value = 0;
4695 
4696     /* execute combined read/write operation if it exists */
4697     if (csr_ops[csrno].op) {
4698         return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask);
4699     }
4700 
4701     /*
4702      * ret_value == NULL means that rd=x0 and we're coming from helper_csrw()
4703      * and we can't throw side effects caused by CSR reads.
4704      */
4705     if (ret_value) {
4706         /* if no accessor exists then return failure */
4707         if (!csr_ops[csrno].read) {
4708             return RISCV_EXCP_ILLEGAL_INST;
4709         }
4710         /* read old value */
4711         ret = csr_ops[csrno].read(env, csrno, &old_value);
4712         if (ret != RISCV_EXCP_NONE) {
4713             return ret;
4714         }
4715     }
4716 
4717     /* write value if writable and write mask set, otherwise drop writes */
4718     if (write_mask) {
4719         new_value = (old_value & ~write_mask) | (new_value & write_mask);
4720         if (csr_ops[csrno].write) {
4721             ret = csr_ops[csrno].write(env, csrno, new_value);
4722             if (ret != RISCV_EXCP_NONE) {
4723                 return ret;
4724             }
4725         }
4726     }
4727 
4728     /* return old value */
4729     if (ret_value) {
4730         *ret_value = old_value;
4731     }
4732 
4733     return RISCV_EXCP_NONE;
4734 }
4735 
riscv_csrr(CPURISCVState * env,int csrno,target_ulong * ret_value)4736 RISCVException riscv_csrr(CPURISCVState *env, int csrno,
4737                            target_ulong *ret_value)
4738 {
4739     RISCVException ret = riscv_csrrw_check(env, csrno, false);
4740     if (ret != RISCV_EXCP_NONE) {
4741         return ret;
4742     }
4743 
4744     return riscv_csrrw_do64(env, csrno, ret_value, 0, 0);
4745 }
4746 
riscv_csrrw(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)4747 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
4748                            target_ulong *ret_value,
4749                            target_ulong new_value, target_ulong write_mask)
4750 {
4751     RISCVException ret = riscv_csrrw_check(env, csrno, true);
4752     if (ret != RISCV_EXCP_NONE) {
4753         return ret;
4754     }
4755 
4756     return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask);
4757 }
4758 
riscv_csrrw_do128(CPURISCVState * env,int csrno,Int128 * ret_value,Int128 new_value,Int128 write_mask)4759 static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
4760                                         Int128 *ret_value,
4761                                         Int128 new_value,
4762                                         Int128 write_mask)
4763 {
4764     RISCVException ret;
4765     Int128 old_value;
4766 
4767     /* read old value */
4768     ret = csr_ops[csrno].read128(env, csrno, &old_value);
4769     if (ret != RISCV_EXCP_NONE) {
4770         return ret;
4771     }
4772 
4773     /* write value if writable and write mask set, otherwise drop writes */
4774     if (int128_nz(write_mask)) {
4775         new_value = int128_or(int128_and(old_value, int128_not(write_mask)),
4776                               int128_and(new_value, write_mask));
4777         if (csr_ops[csrno].write128) {
4778             ret = csr_ops[csrno].write128(env, csrno, new_value);
4779             if (ret != RISCV_EXCP_NONE) {
4780                 return ret;
4781             }
4782         } else if (csr_ops[csrno].write) {
4783             /* avoids having to write wrappers for all registers */
4784             ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value));
4785             if (ret != RISCV_EXCP_NONE) {
4786                 return ret;
4787             }
4788         }
4789     }
4790 
4791     /* return old value */
4792     if (ret_value) {
4793         *ret_value = old_value;
4794     }
4795 
4796     return RISCV_EXCP_NONE;
4797 }
4798 
riscv_csrr_i128(CPURISCVState * env,int csrno,Int128 * ret_value)4799 RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
4800                                Int128 *ret_value)
4801 {
4802     RISCVException ret;
4803 
4804     ret = riscv_csrrw_check(env, csrno, false);
4805     if (ret != RISCV_EXCP_NONE) {
4806         return ret;
4807     }
4808 
4809     if (csr_ops[csrno].read128) {
4810         return riscv_csrrw_do128(env, csrno, ret_value,
4811                                  int128_zero(), int128_zero());
4812     }
4813 
4814     /*
4815      * Fall back to 64-bit version for now, if the 128-bit alternative isn't
4816      * at all defined.
4817      * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
4818      * significant), for those, this fallback is correctly handling the
4819      * accesses
4820      */
4821     target_ulong old_value;
4822     ret = riscv_csrrw_do64(env, csrno, &old_value,
4823                            (target_ulong)0,
4824                            (target_ulong)0);
4825     if (ret == RISCV_EXCP_NONE && ret_value) {
4826         *ret_value = int128_make64(old_value);
4827     }
4828     return ret;
4829 }
4830 
riscv_csrrw_i128(CPURISCVState * env,int csrno,Int128 * ret_value,Int128 new_value,Int128 write_mask)4831 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
4832                                 Int128 *ret_value,
4833                                 Int128 new_value, Int128 write_mask)
4834 {
4835     RISCVException ret;
4836 
4837     ret = riscv_csrrw_check(env, csrno, true);
4838     if (ret != RISCV_EXCP_NONE) {
4839         return ret;
4840     }
4841 
4842     if (csr_ops[csrno].read128) {
4843         return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask);
4844     }
4845 
4846     /*
4847      * Fall back to 64-bit version for now, if the 128-bit alternative isn't
4848      * at all defined.
4849      * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
4850      * significant), for those, this fallback is correctly handling the
4851      * accesses
4852      */
4853     target_ulong old_value;
4854     ret = riscv_csrrw_do64(env, csrno, &old_value,
4855                            int128_getlo(new_value),
4856                            int128_getlo(write_mask));
4857     if (ret == RISCV_EXCP_NONE && ret_value) {
4858         *ret_value = int128_make64(old_value);
4859     }
4860     return ret;
4861 }
4862 
4863 /*
4864  * Debugger support.  If not in user mode, set env->debugger before the
4865  * riscv_csrrw call and clear it after the call.
4866  */
riscv_csrrw_debug(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)4867 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
4868                                  target_ulong *ret_value,
4869                                  target_ulong new_value,
4870                                  target_ulong write_mask)
4871 {
4872     RISCVException ret;
4873 #if !defined(CONFIG_USER_ONLY)
4874     env->debugger = true;
4875 #endif
4876     if (!write_mask) {
4877         ret = riscv_csrr(env, csrno, ret_value);
4878     } else {
4879         ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask);
4880     }
4881 #if !defined(CONFIG_USER_ONLY)
4882     env->debugger = false;
4883 #endif
4884     return ret;
4885 }
4886 
read_jvt(CPURISCVState * env,int csrno,target_ulong * val)4887 static RISCVException read_jvt(CPURISCVState *env, int csrno,
4888                                target_ulong *val)
4889 {
4890     *val = env->jvt;
4891     return RISCV_EXCP_NONE;
4892 }
4893 
write_jvt(CPURISCVState * env,int csrno,target_ulong val)4894 static RISCVException write_jvt(CPURISCVState *env, int csrno,
4895                                 target_ulong val)
4896 {
4897     env->jvt = val;
4898     return RISCV_EXCP_NONE;
4899 }
4900 
4901 /*
4902  * Control and Status Register function table
4903  * riscv_csr_operations::predicate() must be provided for an implemented CSR
4904  */
4905 riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
4906     /* User Floating-Point CSRs */
4907     [CSR_FFLAGS]   = { "fflags",   fs,     read_fflags,  write_fflags },
4908     [CSR_FRM]      = { "frm",      fs,     read_frm,     write_frm    },
4909     [CSR_FCSR]     = { "fcsr",     fs,     read_fcsr,    write_fcsr   },
4910     /* Vector CSRs */
4911     [CSR_VSTART]   = { "vstart",   vs,     read_vstart,  write_vstart },
4912     [CSR_VXSAT]    = { "vxsat",    vs,     read_vxsat,   write_vxsat  },
4913     [CSR_VXRM]     = { "vxrm",     vs,     read_vxrm,    write_vxrm   },
4914     [CSR_VCSR]     = { "vcsr",     vs,     read_vcsr,    write_vcsr   },
4915     [CSR_VL]       = { "vl",       vs,     read_vl                    },
4916     [CSR_VTYPE]    = { "vtype",    vs,     read_vtype                 },
4917     [CSR_VLENB]    = { "vlenb",    vs,     read_vlenb                 },
4918     /* User Timers and Counters */
4919     [CSR_CYCLE]    = { "cycle",    ctr,    read_hpmcounter  },
4920     [CSR_INSTRET]  = { "instret",  ctr,    read_hpmcounter  },
4921     [CSR_CYCLEH]   = { "cycleh",   ctr32,  read_hpmcounterh },
4922     [CSR_INSTRETH] = { "instreth", ctr32,  read_hpmcounterh },
4923 
4924     /*
4925      * In privileged mode, the monitor will have to emulate TIME CSRs only if
4926      * rdtime callback is not provided by machine/platform emulation.
4927      */
4928     [CSR_TIME]  = { "time",  ctr,   read_time  },
4929     [CSR_TIMEH] = { "timeh", ctr32, read_timeh },
4930 
4931     /* Crypto Extension */
4932     [CSR_SEED] = { "seed", seed, NULL, NULL, rmw_seed },
4933 
4934     /* Zcmt Extension */
4935     [CSR_JVT] = {"jvt", zcmt, read_jvt, write_jvt},
4936 
4937 #if !defined(CONFIG_USER_ONLY)
4938     /* Machine Timers and Counters */
4939     [CSR_MCYCLE]    = { "mcycle",    any,   read_hpmcounter,
4940                         write_mhpmcounter                    },
4941     [CSR_MINSTRET]  = { "minstret",  any,   read_hpmcounter,
4942                         write_mhpmcounter                    },
4943     [CSR_MCYCLEH]   = { "mcycleh",   any32, read_hpmcounterh,
4944                         write_mhpmcounterh                   },
4945     [CSR_MINSTRETH] = { "minstreth", any32, read_hpmcounterh,
4946                         write_mhpmcounterh                   },
4947 
4948     /* Machine Information Registers */
4949     [CSR_MVENDORID] = { "mvendorid", any,   read_mvendorid },
4950     [CSR_MARCHID]   = { "marchid",   any,   read_marchid   },
4951     [CSR_MIMPID]    = { "mimpid",    any,   read_mimpid    },
4952     [CSR_MHARTID]   = { "mhartid",   any,   read_mhartid   },
4953 
4954     [CSR_MCONFIGPTR]  = { "mconfigptr", any,   read_zero,
4955                           .min_priv_ver = PRIV_VERSION_1_12_0 },
4956     /* Machine Trap Setup */
4957     [CSR_MSTATUS]     = { "mstatus",    any,   read_mstatus, write_mstatus,
4958                           NULL,                read_mstatus_i128           },
4959     [CSR_MISA]        = { "misa",       any,   read_misa,    write_misa,
4960                           NULL,                read_misa_i128              },
4961     [CSR_MIDELEG]     = { "mideleg",    any,   NULL, NULL,   rmw_mideleg   },
4962     [CSR_MEDELEG]     = { "medeleg",    any,   read_medeleg, write_medeleg },
4963     [CSR_MIE]         = { "mie",        any,   NULL, NULL,   rmw_mie       },
4964     [CSR_MTVEC]       = { "mtvec",      any,   read_mtvec,   write_mtvec   },
4965     [CSR_MCOUNTEREN]  = { "mcounteren", umode, read_mcounteren,
4966                           write_mcounteren                                 },
4967 
4968     [CSR_MSTATUSH]    = { "mstatush",   any32, read_mstatush,
4969                           write_mstatush                                   },
4970     [CSR_MEDELEGH]    = { "medelegh",   any32, read_zero, write_ignore,
4971                           .min_priv_ver = PRIV_VERSION_1_13_0              },
4972     [CSR_HEDELEGH]    = { "hedelegh",   hmode32, read_hedelegh, write_hedelegh,
4973                           .min_priv_ver = PRIV_VERSION_1_13_0              },
4974 
4975     /* Machine Trap Handling */
4976     [CSR_MSCRATCH] = { "mscratch", any,  read_mscratch, write_mscratch,
4977                        NULL, read_mscratch_i128, write_mscratch_i128   },
4978     [CSR_MEPC]     = { "mepc",     any,  read_mepc,     write_mepc     },
4979     [CSR_MCAUSE]   = { "mcause",   any,  read_mcause,   write_mcause   },
4980     [CSR_MTVAL]    = { "mtval",    any,  read_mtval,    write_mtval    },
4981     [CSR_MIP]      = { "mip",      any,  NULL,    NULL, rmw_mip        },
4982 
4983     /* Machine-Level Window to Indirectly Accessed Registers (AIA) */
4984     [CSR_MISELECT] = { "miselect", aia_any,   NULL, NULL,    rmw_xiselect },
4985     [CSR_MIREG]    = { "mireg",    aia_any,   NULL, NULL,    rmw_xireg },
4986 
4987     /* Machine-Level Interrupts (AIA) */
4988     [CSR_MTOPEI]   = { "mtopei",   aia_any, NULL, NULL, rmw_xtopei },
4989     [CSR_MTOPI]    = { "mtopi",    aia_any, read_mtopi },
4990 
4991     /* Virtual Interrupts for Supervisor Level (AIA) */
4992     [CSR_MVIEN]    = { "mvien",    aia_any, NULL, NULL, rmw_mvien   },
4993     [CSR_MVIP]     = { "mvip",     aia_any, NULL, NULL, rmw_mvip    },
4994 
4995     /* Machine-Level High-Half CSRs (AIA) */
4996     [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh },
4997     [CSR_MIEH]     = { "mieh",     aia_any32, NULL, NULL, rmw_mieh     },
4998     [CSR_MVIENH]   = { "mvienh",   aia_any32, NULL, NULL, rmw_mvienh   },
4999     [CSR_MVIPH]    = { "mviph",    aia_any32, NULL, NULL, rmw_mviph    },
5000     [CSR_MIPH]     = { "miph",     aia_any32, NULL, NULL, rmw_miph     },
5001 
5002     /* Execution environment configuration */
5003     [CSR_MENVCFG]  = { "menvcfg",  umode, read_menvcfg,  write_menvcfg,
5004                        .min_priv_ver = PRIV_VERSION_1_12_0              },
5005     [CSR_MENVCFGH] = { "menvcfgh", umode32, read_menvcfgh, write_menvcfgh,
5006                        .min_priv_ver = PRIV_VERSION_1_12_0              },
5007     [CSR_SENVCFG]  = { "senvcfg",  smode, read_senvcfg,  write_senvcfg,
5008                        .min_priv_ver = PRIV_VERSION_1_12_0              },
5009     [CSR_HENVCFG]  = { "henvcfg",  hmode, read_henvcfg, write_henvcfg,
5010                        .min_priv_ver = PRIV_VERSION_1_12_0              },
5011     [CSR_HENVCFGH] = { "henvcfgh", hmode32, read_henvcfgh, write_henvcfgh,
5012                        .min_priv_ver = PRIV_VERSION_1_12_0              },
5013 
5014     /* Smstateen extension CSRs */
5015     [CSR_MSTATEEN0] = { "mstateen0", mstateen, read_mstateen, write_mstateen0,
5016                         .min_priv_ver = PRIV_VERSION_1_12_0 },
5017     [CSR_MSTATEEN0H] = { "mstateen0h", mstateen, read_mstateenh,
5018                           write_mstateen0h,
5019                          .min_priv_ver = PRIV_VERSION_1_12_0 },
5020     [CSR_MSTATEEN1] = { "mstateen1", mstateen, read_mstateen,
5021                         write_mstateen_1_3,
5022                         .min_priv_ver = PRIV_VERSION_1_12_0 },
5023     [CSR_MSTATEEN1H] = { "mstateen1h", mstateen, read_mstateenh,
5024                          write_mstateenh_1_3,
5025                          .min_priv_ver = PRIV_VERSION_1_12_0 },
5026     [CSR_MSTATEEN2] = { "mstateen2", mstateen, read_mstateen,
5027                         write_mstateen_1_3,
5028                         .min_priv_ver = PRIV_VERSION_1_12_0 },
5029     [CSR_MSTATEEN2H] = { "mstateen2h", mstateen, read_mstateenh,
5030                          write_mstateenh_1_3,
5031                          .min_priv_ver = PRIV_VERSION_1_12_0 },
5032     [CSR_MSTATEEN3] = { "mstateen3", mstateen, read_mstateen,
5033                         write_mstateen_1_3,
5034                         .min_priv_ver = PRIV_VERSION_1_12_0 },
5035     [CSR_MSTATEEN3H] = { "mstateen3h", mstateen, read_mstateenh,
5036                          write_mstateenh_1_3,
5037                          .min_priv_ver = PRIV_VERSION_1_12_0 },
5038     [CSR_HSTATEEN0] = { "hstateen0", hstateen, read_hstateen, write_hstateen0,
5039                         .min_priv_ver = PRIV_VERSION_1_12_0 },
5040     [CSR_HSTATEEN0H] = { "hstateen0h", hstateenh, read_hstateenh,
5041                          write_hstateen0h,
5042                          .min_priv_ver = PRIV_VERSION_1_12_0 },
5043     [CSR_HSTATEEN1] = { "hstateen1", hstateen, read_hstateen,
5044                         write_hstateen_1_3,
5045                         .min_priv_ver = PRIV_VERSION_1_12_0 },
5046     [CSR_HSTATEEN1H] = { "hstateen1h", hstateenh, read_hstateenh,
5047                          write_hstateenh_1_3,
5048                          .min_priv_ver = PRIV_VERSION_1_12_0 },
5049     [CSR_HSTATEEN2] = { "hstateen2", hstateen, read_hstateen,
5050                         write_hstateen_1_3,
5051                         .min_priv_ver = PRIV_VERSION_1_12_0 },
5052     [CSR_HSTATEEN2H] = { "hstateen2h", hstateenh, read_hstateenh,
5053                          write_hstateenh_1_3,
5054                          .min_priv_ver = PRIV_VERSION_1_12_0 },
5055     [CSR_HSTATEEN3] = { "hstateen3", hstateen, read_hstateen,
5056                         write_hstateen_1_3,
5057                         .min_priv_ver = PRIV_VERSION_1_12_0 },
5058     [CSR_HSTATEEN3H] = { "hstateen3h", hstateenh, read_hstateenh,
5059                          write_hstateenh_1_3,
5060                          .min_priv_ver = PRIV_VERSION_1_12_0 },
5061     [CSR_SSTATEEN0] = { "sstateen0", sstateen, read_sstateen, write_sstateen0,
5062                         .min_priv_ver = PRIV_VERSION_1_12_0 },
5063     [CSR_SSTATEEN1] = { "sstateen1", sstateen, read_sstateen,
5064                         write_sstateen_1_3,
5065                         .min_priv_ver = PRIV_VERSION_1_12_0 },
5066     [CSR_SSTATEEN2] = { "sstateen2", sstateen, read_sstateen,
5067                         write_sstateen_1_3,
5068                         .min_priv_ver = PRIV_VERSION_1_12_0 },
5069     [CSR_SSTATEEN3] = { "sstateen3", sstateen, read_sstateen,
5070                         write_sstateen_1_3,
5071                         .min_priv_ver = PRIV_VERSION_1_12_0 },
5072 
5073     /* Supervisor Trap Setup */
5074     [CSR_SSTATUS]    = { "sstatus",    smode, read_sstatus,    write_sstatus,
5075                          NULL,                read_sstatus_i128              },
5076     [CSR_SIE]        = { "sie",        smode, NULL,   NULL,    rmw_sie       },
5077     [CSR_STVEC]      = { "stvec",      smode, read_stvec,      write_stvec   },
5078     [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren,
5079                          write_scounteren                                    },
5080 
5081     /* Supervisor Trap Handling */
5082     [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch,
5083                        NULL, read_sscratch_i128, write_sscratch_i128    },
5084     [CSR_SEPC]     = { "sepc",     smode, read_sepc,     write_sepc     },
5085     [CSR_SCAUSE]   = { "scause",   smode, read_scause,   write_scause   },
5086     [CSR_STVAL]    = { "stval",    smode, read_stval,    write_stval    },
5087     [CSR_SIP]      = { "sip",      smode, NULL,    NULL, rmw_sip        },
5088     [CSR_STIMECMP] = { "stimecmp", sstc, read_stimecmp, write_stimecmp,
5089                        .min_priv_ver = PRIV_VERSION_1_12_0 },
5090     [CSR_STIMECMPH] = { "stimecmph", sstc_32, read_stimecmph, write_stimecmph,
5091                         .min_priv_ver = PRIV_VERSION_1_12_0 },
5092     [CSR_VSTIMECMP] = { "vstimecmp", sstc, read_vstimecmp,
5093                         write_vstimecmp,
5094                         .min_priv_ver = PRIV_VERSION_1_12_0 },
5095     [CSR_VSTIMECMPH] = { "vstimecmph", sstc_32, read_vstimecmph,
5096                          write_vstimecmph,
5097                          .min_priv_ver = PRIV_VERSION_1_12_0 },
5098 
5099     /* Supervisor Protection and Translation */
5100     [CSR_SATP]     = { "satp",     satp, read_satp,     write_satp     },
5101 
5102     /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
5103     [CSR_SISELECT]   = { "siselect",   aia_smode, NULL, NULL, rmw_xiselect },
5104     [CSR_SIREG]      = { "sireg",      aia_smode, NULL, NULL, rmw_xireg },
5105 
5106     /* Supervisor-Level Interrupts (AIA) */
5107     [CSR_STOPEI]     = { "stopei",     aia_smode, NULL, NULL, rmw_xtopei },
5108     [CSR_STOPI]      = { "stopi",      aia_smode, read_stopi },
5109 
5110     /* Supervisor-Level High-Half CSRs (AIA) */
5111     [CSR_SIEH]       = { "sieh",   aia_smode32, NULL, NULL, rmw_sieh },
5112     [CSR_SIPH]       = { "siph",   aia_smode32, NULL, NULL, rmw_siph },
5113 
5114     [CSR_HSTATUS]     = { "hstatus",     hmode,   read_hstatus, write_hstatus,
5115                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5116     [CSR_HEDELEG]     = { "hedeleg",     hmode,   read_hedeleg, write_hedeleg,
5117                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5118     [CSR_HIDELEG]     = { "hideleg",     hmode,   NULL,   NULL, rmw_hideleg,
5119                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5120     [CSR_HVIP]        = { "hvip",        hmode,   NULL,   NULL, rmw_hvip,
5121                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5122     [CSR_HIP]         = { "hip",         hmode,   NULL,   NULL, rmw_hip,
5123                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5124     [CSR_HIE]         = { "hie",         hmode,   NULL,   NULL, rmw_hie,
5125                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5126     [CSR_HCOUNTEREN]  = { "hcounteren",  hmode,   read_hcounteren,
5127                           write_hcounteren,
5128                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5129     [CSR_HGEIE]       = { "hgeie",       hmode,   read_hgeie,   write_hgeie,
5130                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5131     [CSR_HTVAL]       = { "htval",       hmode,   read_htval,   write_htval,
5132                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5133     [CSR_HTINST]      = { "htinst",      hmode,   read_htinst,  write_htinst,
5134                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5135     [CSR_HGEIP]       = { "hgeip",       hmode,   read_hgeip,
5136                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5137     [CSR_HGATP]       = { "hgatp",       hgatp,   read_hgatp,   write_hgatp,
5138                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5139     [CSR_HTIMEDELTA]  = { "htimedelta",  hmode,   read_htimedelta,
5140                           write_htimedelta,
5141                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5142     [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah,
5143                           write_htimedeltah,
5144                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5145 
5146     [CSR_VSSTATUS]    = { "vsstatus",    hmode,   read_vsstatus,
5147                           write_vsstatus,
5148                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5149     [CSR_VSIP]        = { "vsip",        hmode,   NULL,    NULL, rmw_vsip,
5150                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5151     [CSR_VSIE]        = { "vsie",        hmode,   NULL,    NULL, rmw_vsie ,
5152                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5153     [CSR_VSTVEC]      = { "vstvec",      hmode,   read_vstvec,   write_vstvec,
5154                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5155     [CSR_VSSCRATCH]   = { "vsscratch",   hmode,   read_vsscratch,
5156                           write_vsscratch,
5157                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5158     [CSR_VSEPC]       = { "vsepc",       hmode,   read_vsepc,    write_vsepc,
5159                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5160     [CSR_VSCAUSE]     = { "vscause",     hmode,   read_vscause,  write_vscause,
5161                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5162     [CSR_VSTVAL]      = { "vstval",      hmode,   read_vstval,   write_vstval,
5163                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5164     [CSR_VSATP]       = { "vsatp",       hmode,   read_vsatp,    write_vsatp,
5165                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5166 
5167     [CSR_MTVAL2]      = { "mtval2",      hmode,   read_mtval2,   write_mtval2,
5168                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5169     [CSR_MTINST]      = { "mtinst",      hmode,   read_mtinst,   write_mtinst,
5170                           .min_priv_ver = PRIV_VERSION_1_12_0                },
5171 
5172     /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
5173     [CSR_HVIEN]       = { "hvien",       aia_hmode, NULL, NULL, rmw_hvien },
5174     [CSR_HVICTL]      = { "hvictl",      aia_hmode, read_hvictl,
5175                           write_hvictl                                      },
5176     [CSR_HVIPRIO1]    = { "hviprio1",    aia_hmode, read_hviprio1,
5177                           write_hviprio1                                    },
5178     [CSR_HVIPRIO2]    = { "hviprio2",    aia_hmode, read_hviprio2,
5179                           write_hviprio2                                    },
5180     /*
5181      * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
5182      */
5183     [CSR_VSISELECT]   = { "vsiselect",   aia_hmode, NULL, NULL,
5184                           rmw_xiselect                                     },
5185     [CSR_VSIREG]      = { "vsireg",      aia_hmode, NULL, NULL, rmw_xireg  },
5186 
5187     /* VS-Level Interrupts (H-extension with AIA) */
5188     [CSR_VSTOPEI]     = { "vstopei",     aia_hmode, NULL, NULL, rmw_xtopei },
5189     [CSR_VSTOPI]      = { "vstopi",      aia_hmode, read_vstopi },
5190 
5191     /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
5192     [CSR_HIDELEGH]    = { "hidelegh",    aia_hmode32, NULL, NULL,
5193                           rmw_hidelegh                                      },
5194     [CSR_HVIENH]      = { "hvienh",      aia_hmode32, NULL, NULL, rmw_hvienh },
5195     [CSR_HVIPH]       = { "hviph",       aia_hmode32, NULL, NULL, rmw_hviph },
5196     [CSR_HVIPRIO1H]   = { "hviprio1h",   aia_hmode32, read_hviprio1h,
5197                           write_hviprio1h                                   },
5198     [CSR_HVIPRIO2H]   = { "hviprio2h",   aia_hmode32, read_hviprio2h,
5199                           write_hviprio2h                                   },
5200     [CSR_VSIEH]       = { "vsieh",       aia_hmode32, NULL, NULL, rmw_vsieh },
5201     [CSR_VSIPH]       = { "vsiph",       aia_hmode32, NULL, NULL, rmw_vsiph },
5202 
5203     /* Physical Memory Protection */
5204     [CSR_MSECCFG]    = { "mseccfg",   have_mseccfg, read_mseccfg, write_mseccfg,
5205                          .min_priv_ver = PRIV_VERSION_1_11_0           },
5206     [CSR_PMPCFG0]    = { "pmpcfg0",   pmp, read_pmpcfg,  write_pmpcfg  },
5207     [CSR_PMPCFG1]    = { "pmpcfg1",   pmp, read_pmpcfg,  write_pmpcfg  },
5208     [CSR_PMPCFG2]    = { "pmpcfg2",   pmp, read_pmpcfg,  write_pmpcfg  },
5209     [CSR_PMPCFG3]    = { "pmpcfg3",   pmp, read_pmpcfg,  write_pmpcfg  },
5210     [CSR_PMPADDR0]   = { "pmpaddr0",  pmp, read_pmpaddr, write_pmpaddr },
5211     [CSR_PMPADDR1]   = { "pmpaddr1",  pmp, read_pmpaddr, write_pmpaddr },
5212     [CSR_PMPADDR2]   = { "pmpaddr2",  pmp, read_pmpaddr, write_pmpaddr },
5213     [CSR_PMPADDR3]   = { "pmpaddr3",  pmp, read_pmpaddr, write_pmpaddr },
5214     [CSR_PMPADDR4]   = { "pmpaddr4",  pmp, read_pmpaddr, write_pmpaddr },
5215     [CSR_PMPADDR5]   = { "pmpaddr5",  pmp, read_pmpaddr, write_pmpaddr },
5216     [CSR_PMPADDR6]   = { "pmpaddr6",  pmp, read_pmpaddr, write_pmpaddr },
5217     [CSR_PMPADDR7]   = { "pmpaddr7",  pmp, read_pmpaddr, write_pmpaddr },
5218     [CSR_PMPADDR8]   = { "pmpaddr8",  pmp, read_pmpaddr, write_pmpaddr },
5219     [CSR_PMPADDR9]   = { "pmpaddr9",  pmp, read_pmpaddr, write_pmpaddr },
5220     [CSR_PMPADDR10]  = { "pmpaddr10", pmp, read_pmpaddr, write_pmpaddr },
5221     [CSR_PMPADDR11]  = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr },
5222     [CSR_PMPADDR12]  = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr },
5223     [CSR_PMPADDR13]  = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr },
5224     [CSR_PMPADDR14] =  { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
5225     [CSR_PMPADDR15] =  { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
5226 
5227     /* Debug CSRs */
5228     [CSR_TSELECT]   =  { "tselect",  debug, read_tselect,  write_tselect  },
5229     [CSR_TDATA1]    =  { "tdata1",   debug, read_tdata,    write_tdata    },
5230     [CSR_TDATA2]    =  { "tdata2",   debug, read_tdata,    write_tdata    },
5231     [CSR_TDATA3]    =  { "tdata3",   debug, read_tdata,    write_tdata    },
5232     [CSR_TINFO]     =  { "tinfo",    debug, read_tinfo,    write_ignore   },
5233     [CSR_MCONTEXT]  =  { "mcontext", debug, read_mcontext, write_mcontext },
5234 
5235     /* User Pointer Masking */
5236     [CSR_UMTE]    =    { "umte",    pointer_masking, read_umte,  write_umte },
5237     [CSR_UPMMASK] =    { "upmmask", pointer_masking, read_upmmask,
5238                          write_upmmask                                      },
5239     [CSR_UPMBASE] =    { "upmbase", pointer_masking, read_upmbase,
5240                          write_upmbase                                      },
5241     /* Machine Pointer Masking */
5242     [CSR_MMTE]    =    { "mmte",    pointer_masking, read_mmte,  write_mmte },
5243     [CSR_MPMMASK] =    { "mpmmask", pointer_masking, read_mpmmask,
5244                          write_mpmmask                                      },
5245     [CSR_MPMBASE] =    { "mpmbase", pointer_masking, read_mpmbase,
5246                          write_mpmbase                                      },
5247     /* Supervisor Pointer Masking */
5248     [CSR_SMTE]    =    { "smte",    pointer_masking, read_smte,  write_smte },
5249     [CSR_SPMMASK] =    { "spmmask", pointer_masking, read_spmmask,
5250                          write_spmmask                                      },
5251     [CSR_SPMBASE] =    { "spmbase", pointer_masking, read_spmbase,
5252                          write_spmbase                                      },
5253 
5254     /* Performance Counters */
5255     [CSR_HPMCOUNTER3]    = { "hpmcounter3",    ctr,    read_hpmcounter },
5256     [CSR_HPMCOUNTER4]    = { "hpmcounter4",    ctr,    read_hpmcounter },
5257     [CSR_HPMCOUNTER5]    = { "hpmcounter5",    ctr,    read_hpmcounter },
5258     [CSR_HPMCOUNTER6]    = { "hpmcounter6",    ctr,    read_hpmcounter },
5259     [CSR_HPMCOUNTER7]    = { "hpmcounter7",    ctr,    read_hpmcounter },
5260     [CSR_HPMCOUNTER8]    = { "hpmcounter8",    ctr,    read_hpmcounter },
5261     [CSR_HPMCOUNTER9]    = { "hpmcounter9",    ctr,    read_hpmcounter },
5262     [CSR_HPMCOUNTER10]   = { "hpmcounter10",   ctr,    read_hpmcounter },
5263     [CSR_HPMCOUNTER11]   = { "hpmcounter11",   ctr,    read_hpmcounter },
5264     [CSR_HPMCOUNTER12]   = { "hpmcounter12",   ctr,    read_hpmcounter },
5265     [CSR_HPMCOUNTER13]   = { "hpmcounter13",   ctr,    read_hpmcounter },
5266     [CSR_HPMCOUNTER14]   = { "hpmcounter14",   ctr,    read_hpmcounter },
5267     [CSR_HPMCOUNTER15]   = { "hpmcounter15",   ctr,    read_hpmcounter },
5268     [CSR_HPMCOUNTER16]   = { "hpmcounter16",   ctr,    read_hpmcounter },
5269     [CSR_HPMCOUNTER17]   = { "hpmcounter17",   ctr,    read_hpmcounter },
5270     [CSR_HPMCOUNTER18]   = { "hpmcounter18",   ctr,    read_hpmcounter },
5271     [CSR_HPMCOUNTER19]   = { "hpmcounter19",   ctr,    read_hpmcounter },
5272     [CSR_HPMCOUNTER20]   = { "hpmcounter20",   ctr,    read_hpmcounter },
5273     [CSR_HPMCOUNTER21]   = { "hpmcounter21",   ctr,    read_hpmcounter },
5274     [CSR_HPMCOUNTER22]   = { "hpmcounter22",   ctr,    read_hpmcounter },
5275     [CSR_HPMCOUNTER23]   = { "hpmcounter23",   ctr,    read_hpmcounter },
5276     [CSR_HPMCOUNTER24]   = { "hpmcounter24",   ctr,    read_hpmcounter },
5277     [CSR_HPMCOUNTER25]   = { "hpmcounter25",   ctr,    read_hpmcounter },
5278     [CSR_HPMCOUNTER26]   = { "hpmcounter26",   ctr,    read_hpmcounter },
5279     [CSR_HPMCOUNTER27]   = { "hpmcounter27",   ctr,    read_hpmcounter },
5280     [CSR_HPMCOUNTER28]   = { "hpmcounter28",   ctr,    read_hpmcounter },
5281     [CSR_HPMCOUNTER29]   = { "hpmcounter29",   ctr,    read_hpmcounter },
5282     [CSR_HPMCOUNTER30]   = { "hpmcounter30",   ctr,    read_hpmcounter },
5283     [CSR_HPMCOUNTER31]   = { "hpmcounter31",   ctr,    read_hpmcounter },
5284 
5285     [CSR_MHPMCOUNTER3]   = { "mhpmcounter3",   mctr,    read_hpmcounter,
5286                              write_mhpmcounter                         },
5287     [CSR_MHPMCOUNTER4]   = { "mhpmcounter4",   mctr,    read_hpmcounter,
5288                              write_mhpmcounter                         },
5289     [CSR_MHPMCOUNTER5]   = { "mhpmcounter5",   mctr,    read_hpmcounter,
5290                              write_mhpmcounter                         },
5291     [CSR_MHPMCOUNTER6]   = { "mhpmcounter6",   mctr,    read_hpmcounter,
5292                              write_mhpmcounter                         },
5293     [CSR_MHPMCOUNTER7]   = { "mhpmcounter7",   mctr,    read_hpmcounter,
5294                              write_mhpmcounter                         },
5295     [CSR_MHPMCOUNTER8]   = { "mhpmcounter8",   mctr,    read_hpmcounter,
5296                              write_mhpmcounter                         },
5297     [CSR_MHPMCOUNTER9]   = { "mhpmcounter9",   mctr,    read_hpmcounter,
5298                              write_mhpmcounter                         },
5299     [CSR_MHPMCOUNTER10]  = { "mhpmcounter10",  mctr,    read_hpmcounter,
5300                              write_mhpmcounter                         },
5301     [CSR_MHPMCOUNTER11]  = { "mhpmcounter11",  mctr,    read_hpmcounter,
5302                              write_mhpmcounter                         },
5303     [CSR_MHPMCOUNTER12]  = { "mhpmcounter12",  mctr,    read_hpmcounter,
5304                              write_mhpmcounter                         },
5305     [CSR_MHPMCOUNTER13]  = { "mhpmcounter13",  mctr,    read_hpmcounter,
5306                              write_mhpmcounter                         },
5307     [CSR_MHPMCOUNTER14]  = { "mhpmcounter14",  mctr,    read_hpmcounter,
5308                              write_mhpmcounter                         },
5309     [CSR_MHPMCOUNTER15]  = { "mhpmcounter15",  mctr,    read_hpmcounter,
5310                              write_mhpmcounter                         },
5311     [CSR_MHPMCOUNTER16]  = { "mhpmcounter16",  mctr,    read_hpmcounter,
5312                              write_mhpmcounter                         },
5313     [CSR_MHPMCOUNTER17]  = { "mhpmcounter17",  mctr,    read_hpmcounter,
5314                              write_mhpmcounter                         },
5315     [CSR_MHPMCOUNTER18]  = { "mhpmcounter18",  mctr,    read_hpmcounter,
5316                              write_mhpmcounter                         },
5317     [CSR_MHPMCOUNTER19]  = { "mhpmcounter19",  mctr,    read_hpmcounter,
5318                              write_mhpmcounter                         },
5319     [CSR_MHPMCOUNTER20]  = { "mhpmcounter20",  mctr,    read_hpmcounter,
5320                              write_mhpmcounter                         },
5321     [CSR_MHPMCOUNTER21]  = { "mhpmcounter21",  mctr,    read_hpmcounter,
5322                              write_mhpmcounter                         },
5323     [CSR_MHPMCOUNTER22]  = { "mhpmcounter22",  mctr,    read_hpmcounter,
5324                              write_mhpmcounter                         },
5325     [CSR_MHPMCOUNTER23]  = { "mhpmcounter23",  mctr,    read_hpmcounter,
5326                              write_mhpmcounter                         },
5327     [CSR_MHPMCOUNTER24]  = { "mhpmcounter24",  mctr,    read_hpmcounter,
5328                              write_mhpmcounter                         },
5329     [CSR_MHPMCOUNTER25]  = { "mhpmcounter25",  mctr,    read_hpmcounter,
5330                              write_mhpmcounter                         },
5331     [CSR_MHPMCOUNTER26]  = { "mhpmcounter26",  mctr,    read_hpmcounter,
5332                              write_mhpmcounter                         },
5333     [CSR_MHPMCOUNTER27]  = { "mhpmcounter27",  mctr,    read_hpmcounter,
5334                              write_mhpmcounter                         },
5335     [CSR_MHPMCOUNTER28]  = { "mhpmcounter28",  mctr,    read_hpmcounter,
5336                              write_mhpmcounter                         },
5337     [CSR_MHPMCOUNTER29]  = { "mhpmcounter29",  mctr,    read_hpmcounter,
5338                              write_mhpmcounter                         },
5339     [CSR_MHPMCOUNTER30]  = { "mhpmcounter30",  mctr,    read_hpmcounter,
5340                              write_mhpmcounter                         },
5341     [CSR_MHPMCOUNTER31]  = { "mhpmcounter31",  mctr,    read_hpmcounter,
5342                              write_mhpmcounter                         },
5343 
5344     [CSR_MCOUNTINHIBIT]  = { "mcountinhibit",  any, read_mcountinhibit,
5345                              write_mcountinhibit,
5346                              .min_priv_ver = PRIV_VERSION_1_11_0       },
5347 
5348     [CSR_MCYCLECFG]      = { "mcyclecfg",   smcntrpmf, read_mcyclecfg,
5349                              write_mcyclecfg,
5350                              .min_priv_ver = PRIV_VERSION_1_12_0       },
5351     [CSR_MINSTRETCFG]    = { "minstretcfg", smcntrpmf, read_minstretcfg,
5352                              write_minstretcfg,
5353                              .min_priv_ver = PRIV_VERSION_1_12_0       },
5354 
5355     [CSR_MHPMEVENT3]     = { "mhpmevent3",     any,    read_mhpmevent,
5356                              write_mhpmevent                           },
5357     [CSR_MHPMEVENT4]     = { "mhpmevent4",     any,    read_mhpmevent,
5358                              write_mhpmevent                           },
5359     [CSR_MHPMEVENT5]     = { "mhpmevent5",     any,    read_mhpmevent,
5360                              write_mhpmevent                           },
5361     [CSR_MHPMEVENT6]     = { "mhpmevent6",     any,    read_mhpmevent,
5362                              write_mhpmevent                           },
5363     [CSR_MHPMEVENT7]     = { "mhpmevent7",     any,    read_mhpmevent,
5364                              write_mhpmevent                           },
5365     [CSR_MHPMEVENT8]     = { "mhpmevent8",     any,    read_mhpmevent,
5366                              write_mhpmevent                           },
5367     [CSR_MHPMEVENT9]     = { "mhpmevent9",     any,    read_mhpmevent,
5368                              write_mhpmevent                           },
5369     [CSR_MHPMEVENT10]    = { "mhpmevent10",    any,    read_mhpmevent,
5370                              write_mhpmevent                           },
5371     [CSR_MHPMEVENT11]    = { "mhpmevent11",    any,    read_mhpmevent,
5372                              write_mhpmevent                           },
5373     [CSR_MHPMEVENT12]    = { "mhpmevent12",    any,    read_mhpmevent,
5374                              write_mhpmevent                           },
5375     [CSR_MHPMEVENT13]    = { "mhpmevent13",    any,    read_mhpmevent,
5376                              write_mhpmevent                           },
5377     [CSR_MHPMEVENT14]    = { "mhpmevent14",    any,    read_mhpmevent,
5378                              write_mhpmevent                           },
5379     [CSR_MHPMEVENT15]    = { "mhpmevent15",    any,    read_mhpmevent,
5380                              write_mhpmevent                           },
5381     [CSR_MHPMEVENT16]    = { "mhpmevent16",    any,    read_mhpmevent,
5382                              write_mhpmevent                           },
5383     [CSR_MHPMEVENT17]    = { "mhpmevent17",    any,    read_mhpmevent,
5384                              write_mhpmevent                           },
5385     [CSR_MHPMEVENT18]    = { "mhpmevent18",    any,    read_mhpmevent,
5386                              write_mhpmevent                           },
5387     [CSR_MHPMEVENT19]    = { "mhpmevent19",    any,    read_mhpmevent,
5388                              write_mhpmevent                           },
5389     [CSR_MHPMEVENT20]    = { "mhpmevent20",    any,    read_mhpmevent,
5390                              write_mhpmevent                           },
5391     [CSR_MHPMEVENT21]    = { "mhpmevent21",    any,    read_mhpmevent,
5392                              write_mhpmevent                           },
5393     [CSR_MHPMEVENT22]    = { "mhpmevent22",    any,    read_mhpmevent,
5394                              write_mhpmevent                           },
5395     [CSR_MHPMEVENT23]    = { "mhpmevent23",    any,    read_mhpmevent,
5396                              write_mhpmevent                           },
5397     [CSR_MHPMEVENT24]    = { "mhpmevent24",    any,    read_mhpmevent,
5398                              write_mhpmevent                           },
5399     [CSR_MHPMEVENT25]    = { "mhpmevent25",    any,    read_mhpmevent,
5400                              write_mhpmevent                           },
5401     [CSR_MHPMEVENT26]    = { "mhpmevent26",    any,    read_mhpmevent,
5402                              write_mhpmevent                           },
5403     [CSR_MHPMEVENT27]    = { "mhpmevent27",    any,    read_mhpmevent,
5404                              write_mhpmevent                           },
5405     [CSR_MHPMEVENT28]    = { "mhpmevent28",    any,    read_mhpmevent,
5406                              write_mhpmevent                           },
5407     [CSR_MHPMEVENT29]    = { "mhpmevent29",    any,    read_mhpmevent,
5408                              write_mhpmevent                           },
5409     [CSR_MHPMEVENT30]    = { "mhpmevent30",    any,    read_mhpmevent,
5410                              write_mhpmevent                           },
5411     [CSR_MHPMEVENT31]    = { "mhpmevent31",    any,    read_mhpmevent,
5412                              write_mhpmevent                           },
5413 
5414     [CSR_MCYCLECFGH]     = { "mcyclecfgh",   smcntrpmf_32, read_mcyclecfgh,
5415                              write_mcyclecfgh,
5416                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5417     [CSR_MINSTRETCFGH]   = { "minstretcfgh", smcntrpmf_32, read_minstretcfgh,
5418                              write_minstretcfgh,
5419                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5420 
5421     [CSR_MHPMEVENT3H]    = { "mhpmevent3h",    sscofpmf_32,  read_mhpmeventh,
5422                              write_mhpmeventh,
5423                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5424     [CSR_MHPMEVENT4H]    = { "mhpmevent4h",    sscofpmf_32,  read_mhpmeventh,
5425                              write_mhpmeventh,
5426                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5427     [CSR_MHPMEVENT5H]    = { "mhpmevent5h",    sscofpmf_32,  read_mhpmeventh,
5428                              write_mhpmeventh,
5429                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5430     [CSR_MHPMEVENT6H]    = { "mhpmevent6h",    sscofpmf_32,  read_mhpmeventh,
5431                              write_mhpmeventh,
5432                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5433     [CSR_MHPMEVENT7H]    = { "mhpmevent7h",    sscofpmf_32,  read_mhpmeventh,
5434                              write_mhpmeventh,
5435                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5436     [CSR_MHPMEVENT8H]    = { "mhpmevent8h",    sscofpmf_32,  read_mhpmeventh,
5437                              write_mhpmeventh,
5438                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5439     [CSR_MHPMEVENT9H]    = { "mhpmevent9h",    sscofpmf_32,  read_mhpmeventh,
5440                              write_mhpmeventh,
5441                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5442     [CSR_MHPMEVENT10H]   = { "mhpmevent10h",    sscofpmf_32,  read_mhpmeventh,
5443                              write_mhpmeventh,
5444                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5445     [CSR_MHPMEVENT11H]   = { "mhpmevent11h",    sscofpmf_32,  read_mhpmeventh,
5446                              write_mhpmeventh,
5447                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5448     [CSR_MHPMEVENT12H]   = { "mhpmevent12h",    sscofpmf_32,  read_mhpmeventh,
5449                              write_mhpmeventh,
5450                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5451     [CSR_MHPMEVENT13H]   = { "mhpmevent13h",    sscofpmf_32,  read_mhpmeventh,
5452                              write_mhpmeventh,
5453                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5454     [CSR_MHPMEVENT14H]   = { "mhpmevent14h",    sscofpmf_32,  read_mhpmeventh,
5455                              write_mhpmeventh,
5456                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5457     [CSR_MHPMEVENT15H]   = { "mhpmevent15h",    sscofpmf_32,  read_mhpmeventh,
5458                              write_mhpmeventh,
5459                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5460     [CSR_MHPMEVENT16H]   = { "mhpmevent16h",    sscofpmf_32,  read_mhpmeventh,
5461                              write_mhpmeventh,
5462                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5463     [CSR_MHPMEVENT17H]   = { "mhpmevent17h",    sscofpmf_32,  read_mhpmeventh,
5464                              write_mhpmeventh,
5465                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5466     [CSR_MHPMEVENT18H]   = { "mhpmevent18h",    sscofpmf_32,  read_mhpmeventh,
5467                              write_mhpmeventh,
5468                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5469     [CSR_MHPMEVENT19H]   = { "mhpmevent19h",    sscofpmf_32,  read_mhpmeventh,
5470                              write_mhpmeventh,
5471                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5472     [CSR_MHPMEVENT20H]   = { "mhpmevent20h",    sscofpmf_32,  read_mhpmeventh,
5473                              write_mhpmeventh,
5474                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5475     [CSR_MHPMEVENT21H]   = { "mhpmevent21h",    sscofpmf_32,  read_mhpmeventh,
5476                              write_mhpmeventh,
5477                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5478     [CSR_MHPMEVENT22H]   = { "mhpmevent22h",    sscofpmf_32,  read_mhpmeventh,
5479                              write_mhpmeventh,
5480                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5481     [CSR_MHPMEVENT23H]   = { "mhpmevent23h",    sscofpmf_32,  read_mhpmeventh,
5482                              write_mhpmeventh,
5483                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5484     [CSR_MHPMEVENT24H]   = { "mhpmevent24h",    sscofpmf_32,  read_mhpmeventh,
5485                              write_mhpmeventh,
5486                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5487     [CSR_MHPMEVENT25H]   = { "mhpmevent25h",    sscofpmf_32,  read_mhpmeventh,
5488                              write_mhpmeventh,
5489                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5490     [CSR_MHPMEVENT26H]   = { "mhpmevent26h",    sscofpmf_32,  read_mhpmeventh,
5491                              write_mhpmeventh,
5492                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5493     [CSR_MHPMEVENT27H]   = { "mhpmevent27h",    sscofpmf_32,  read_mhpmeventh,
5494                              write_mhpmeventh,
5495                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5496     [CSR_MHPMEVENT28H]   = { "mhpmevent28h",    sscofpmf_32,  read_mhpmeventh,
5497                              write_mhpmeventh,
5498                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5499     [CSR_MHPMEVENT29H]   = { "mhpmevent29h",    sscofpmf_32,  read_mhpmeventh,
5500                              write_mhpmeventh,
5501                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5502     [CSR_MHPMEVENT30H]   = { "mhpmevent30h",    sscofpmf_32,  read_mhpmeventh,
5503                              write_mhpmeventh,
5504                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5505     [CSR_MHPMEVENT31H]   = { "mhpmevent31h",    sscofpmf_32,  read_mhpmeventh,
5506                              write_mhpmeventh,
5507                              .min_priv_ver = PRIV_VERSION_1_12_0        },
5508 
5509     [CSR_HPMCOUNTER3H]   = { "hpmcounter3h",   ctr32,  read_hpmcounterh },
5510     [CSR_HPMCOUNTER4H]   = { "hpmcounter4h",   ctr32,  read_hpmcounterh },
5511     [CSR_HPMCOUNTER5H]   = { "hpmcounter5h",   ctr32,  read_hpmcounterh },
5512     [CSR_HPMCOUNTER6H]   = { "hpmcounter6h",   ctr32,  read_hpmcounterh },
5513     [CSR_HPMCOUNTER7H]   = { "hpmcounter7h",   ctr32,  read_hpmcounterh },
5514     [CSR_HPMCOUNTER8H]   = { "hpmcounter8h",   ctr32,  read_hpmcounterh },
5515     [CSR_HPMCOUNTER9H]   = { "hpmcounter9h",   ctr32,  read_hpmcounterh },
5516     [CSR_HPMCOUNTER10H]  = { "hpmcounter10h",  ctr32,  read_hpmcounterh },
5517     [CSR_HPMCOUNTER11H]  = { "hpmcounter11h",  ctr32,  read_hpmcounterh },
5518     [CSR_HPMCOUNTER12H]  = { "hpmcounter12h",  ctr32,  read_hpmcounterh },
5519     [CSR_HPMCOUNTER13H]  = { "hpmcounter13h",  ctr32,  read_hpmcounterh },
5520     [CSR_HPMCOUNTER14H]  = { "hpmcounter14h",  ctr32,  read_hpmcounterh },
5521     [CSR_HPMCOUNTER15H]  = { "hpmcounter15h",  ctr32,  read_hpmcounterh },
5522     [CSR_HPMCOUNTER16H]  = { "hpmcounter16h",  ctr32,  read_hpmcounterh },
5523     [CSR_HPMCOUNTER17H]  = { "hpmcounter17h",  ctr32,  read_hpmcounterh },
5524     [CSR_HPMCOUNTER18H]  = { "hpmcounter18h",  ctr32,  read_hpmcounterh },
5525     [CSR_HPMCOUNTER19H]  = { "hpmcounter19h",  ctr32,  read_hpmcounterh },
5526     [CSR_HPMCOUNTER20H]  = { "hpmcounter20h",  ctr32,  read_hpmcounterh },
5527     [CSR_HPMCOUNTER21H]  = { "hpmcounter21h",  ctr32,  read_hpmcounterh },
5528     [CSR_HPMCOUNTER22H]  = { "hpmcounter22h",  ctr32,  read_hpmcounterh },
5529     [CSR_HPMCOUNTER23H]  = { "hpmcounter23h",  ctr32,  read_hpmcounterh },
5530     [CSR_HPMCOUNTER24H]  = { "hpmcounter24h",  ctr32,  read_hpmcounterh },
5531     [CSR_HPMCOUNTER25H]  = { "hpmcounter25h",  ctr32,  read_hpmcounterh },
5532     [CSR_HPMCOUNTER26H]  = { "hpmcounter26h",  ctr32,  read_hpmcounterh },
5533     [CSR_HPMCOUNTER27H]  = { "hpmcounter27h",  ctr32,  read_hpmcounterh },
5534     [CSR_HPMCOUNTER28H]  = { "hpmcounter28h",  ctr32,  read_hpmcounterh },
5535     [CSR_HPMCOUNTER29H]  = { "hpmcounter29h",  ctr32,  read_hpmcounterh },
5536     [CSR_HPMCOUNTER30H]  = { "hpmcounter30h",  ctr32,  read_hpmcounterh },
5537     [CSR_HPMCOUNTER31H]  = { "hpmcounter31h",  ctr32,  read_hpmcounterh },
5538 
5539     [CSR_MHPMCOUNTER3H]  = { "mhpmcounter3h",  mctr32,  read_hpmcounterh,
5540                              write_mhpmcounterh                         },
5541     [CSR_MHPMCOUNTER4H]  = { "mhpmcounter4h",  mctr32,  read_hpmcounterh,
5542                              write_mhpmcounterh                         },
5543     [CSR_MHPMCOUNTER5H]  = { "mhpmcounter5h",  mctr32,  read_hpmcounterh,
5544                              write_mhpmcounterh                         },
5545     [CSR_MHPMCOUNTER6H]  = { "mhpmcounter6h",  mctr32,  read_hpmcounterh,
5546                              write_mhpmcounterh                         },
5547     [CSR_MHPMCOUNTER7H]  = { "mhpmcounter7h",  mctr32,  read_hpmcounterh,
5548                              write_mhpmcounterh                         },
5549     [CSR_MHPMCOUNTER8H]  = { "mhpmcounter8h",  mctr32,  read_hpmcounterh,
5550                              write_mhpmcounterh                         },
5551     [CSR_MHPMCOUNTER9H]  = { "mhpmcounter9h",  mctr32,  read_hpmcounterh,
5552                              write_mhpmcounterh                         },
5553     [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", mctr32,  read_hpmcounterh,
5554                              write_mhpmcounterh                         },
5555     [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", mctr32,  read_hpmcounterh,
5556                              write_mhpmcounterh                         },
5557     [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", mctr32,  read_hpmcounterh,
5558                              write_mhpmcounterh                         },
5559     [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", mctr32,  read_hpmcounterh,
5560                              write_mhpmcounterh                         },
5561     [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", mctr32,  read_hpmcounterh,
5562                              write_mhpmcounterh                         },
5563     [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", mctr32,  read_hpmcounterh,
5564                              write_mhpmcounterh                         },
5565     [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", mctr32,  read_hpmcounterh,
5566                              write_mhpmcounterh                         },
5567     [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", mctr32,  read_hpmcounterh,
5568                              write_mhpmcounterh                         },
5569     [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", mctr32,  read_hpmcounterh,
5570                              write_mhpmcounterh                         },
5571     [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", mctr32,  read_hpmcounterh,
5572                              write_mhpmcounterh                         },
5573     [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", mctr32,  read_hpmcounterh,
5574                              write_mhpmcounterh                         },
5575     [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", mctr32,  read_hpmcounterh,
5576                              write_mhpmcounterh                         },
5577     [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", mctr32,  read_hpmcounterh,
5578                              write_mhpmcounterh                         },
5579     [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", mctr32,  read_hpmcounterh,
5580                              write_mhpmcounterh                         },
5581     [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", mctr32,  read_hpmcounterh,
5582                              write_mhpmcounterh                         },
5583     [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", mctr32,  read_hpmcounterh,
5584                              write_mhpmcounterh                         },
5585     [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", mctr32,  read_hpmcounterh,
5586                              write_mhpmcounterh                         },
5587     [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", mctr32,  read_hpmcounterh,
5588                              write_mhpmcounterh                         },
5589     [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", mctr32,  read_hpmcounterh,
5590                              write_mhpmcounterh                         },
5591     [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", mctr32,  read_hpmcounterh,
5592                              write_mhpmcounterh                         },
5593     [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", mctr32,  read_hpmcounterh,
5594                              write_mhpmcounterh                         },
5595     [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", mctr32,  read_hpmcounterh,
5596                              write_mhpmcounterh                         },
5597     [CSR_SCOUNTOVF]      = { "scountovf", sscofpmf,  read_scountovf,
5598                              .min_priv_ver = PRIV_VERSION_1_12_0 },
5599 
5600 #endif /* !CONFIG_USER_ONLY */
5601 };
5602