1 /*
2 * RISC-V Control and Status Registers.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/timer.h"
23 #include "cpu.h"
24 #include "tcg/tcg-cpu.h"
25 #include "pmu.h"
26 #include "time_helper.h"
27 #include "exec/exec-all.h"
28 #include "exec/tb-flush.h"
29 #include "sysemu/cpu-timers.h"
30 #include "qemu/guest-random.h"
31 #include "qapi/error.h"
32
33 /* CSR function table public API */
riscv_get_csr_ops(int csrno,riscv_csr_operations * ops)34 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
35 {
36 *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)];
37 }
38
riscv_set_csr_ops(int csrno,riscv_csr_operations * ops)39 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
40 {
41 csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops;
42 }
43
44 /* Predicates */
45 #if !defined(CONFIG_USER_ONLY)
smstateen_acc_ok(CPURISCVState * env,int index,uint64_t bit)46 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit)
47 {
48 bool virt = env->virt_enabled;
49
50 if (env->priv == PRV_M || !riscv_cpu_cfg(env)->ext_smstateen) {
51 return RISCV_EXCP_NONE;
52 }
53
54 if (!(env->mstateen[index] & bit)) {
55 return RISCV_EXCP_ILLEGAL_INST;
56 }
57
58 if (virt) {
59 if (!(env->hstateen[index] & bit)) {
60 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
61 }
62
63 if (env->priv == PRV_U && !(env->sstateen[index] & bit)) {
64 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
65 }
66 }
67
68 if (env->priv == PRV_U && riscv_has_ext(env, RVS)) {
69 if (!(env->sstateen[index] & bit)) {
70 return RISCV_EXCP_ILLEGAL_INST;
71 }
72 }
73
74 return RISCV_EXCP_NONE;
75 }
76 #endif
77
fs(CPURISCVState * env,int csrno)78 static RISCVException fs(CPURISCVState *env, int csrno)
79 {
80 #if !defined(CONFIG_USER_ONLY)
81 if (!env->debugger && !riscv_cpu_fp_enabled(env) &&
82 !riscv_cpu_cfg(env)->ext_zfinx) {
83 return RISCV_EXCP_ILLEGAL_INST;
84 }
85
86 if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
87 return smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR);
88 }
89 #endif
90 return RISCV_EXCP_NONE;
91 }
92
vs(CPURISCVState * env,int csrno)93 static RISCVException vs(CPURISCVState *env, int csrno)
94 {
95 if (riscv_cpu_cfg(env)->ext_zve32x) {
96 #if !defined(CONFIG_USER_ONLY)
97 if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
98 return RISCV_EXCP_ILLEGAL_INST;
99 }
100 #endif
101 return RISCV_EXCP_NONE;
102 }
103 return RISCV_EXCP_ILLEGAL_INST;
104 }
105
ctr(CPURISCVState * env,int csrno)106 static RISCVException ctr(CPURISCVState *env, int csrno)
107 {
108 #if !defined(CONFIG_USER_ONLY)
109 RISCVCPU *cpu = env_archcpu(env);
110 int ctr_index;
111 target_ulong ctr_mask;
112 int base_csrno = CSR_CYCLE;
113 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
114
115 if (rv32 && csrno >= CSR_CYCLEH) {
116 /* Offset for RV32 hpmcounternh counters */
117 base_csrno += 0x80;
118 }
119 ctr_index = csrno - base_csrno;
120 ctr_mask = BIT(ctr_index);
121
122 if ((csrno >= CSR_CYCLE && csrno <= CSR_INSTRET) ||
123 (csrno >= CSR_CYCLEH && csrno <= CSR_INSTRETH)) {
124 if (!riscv_cpu_cfg(env)->ext_zicntr) {
125 return RISCV_EXCP_ILLEGAL_INST;
126 }
127
128 goto skip_ext_pmu_check;
129 }
130
131 if (!(cpu->pmu_avail_ctrs & ctr_mask)) {
132 /* No counter is enabled in PMU or the counter is out of range */
133 return RISCV_EXCP_ILLEGAL_INST;
134 }
135
136 skip_ext_pmu_check:
137
138 if (env->debugger) {
139 return RISCV_EXCP_NONE;
140 }
141
142 if (env->priv < PRV_M && !get_field(env->mcounteren, ctr_mask)) {
143 return RISCV_EXCP_ILLEGAL_INST;
144 }
145
146 if (env->virt_enabled) {
147 if (!get_field(env->hcounteren, ctr_mask) ||
148 (env->priv == PRV_U && !get_field(env->scounteren, ctr_mask))) {
149 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
150 }
151 }
152
153 if (riscv_has_ext(env, RVS) && env->priv == PRV_U &&
154 !get_field(env->scounteren, ctr_mask)) {
155 return RISCV_EXCP_ILLEGAL_INST;
156 }
157
158 #endif
159 return RISCV_EXCP_NONE;
160 }
161
ctr32(CPURISCVState * env,int csrno)162 static RISCVException ctr32(CPURISCVState *env, int csrno)
163 {
164 if (riscv_cpu_mxl(env) != MXL_RV32) {
165 return RISCV_EXCP_ILLEGAL_INST;
166 }
167
168 return ctr(env, csrno);
169 }
170
zcmt(CPURISCVState * env,int csrno)171 static RISCVException zcmt(CPURISCVState *env, int csrno)
172 {
173 if (!riscv_cpu_cfg(env)->ext_zcmt) {
174 return RISCV_EXCP_ILLEGAL_INST;
175 }
176
177 #if !defined(CONFIG_USER_ONLY)
178 RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_JVT);
179 if (ret != RISCV_EXCP_NONE) {
180 return ret;
181 }
182 #endif
183
184 return RISCV_EXCP_NONE;
185 }
186
cfi_ss(CPURISCVState * env,int csrno)187 static RISCVException cfi_ss(CPURISCVState *env, int csrno)
188 {
189 if (!env_archcpu(env)->cfg.ext_zicfiss) {
190 return RISCV_EXCP_ILLEGAL_INST;
191 }
192
193 /* If ext implemented, M-mode always have access to SSP CSR */
194 if (env->priv == PRV_M) {
195 return RISCV_EXCP_NONE;
196 }
197
198 /* if bcfi not active for current env, access to csr is illegal */
199 if (!cpu_get_bcfien(env)) {
200 #if !defined(CONFIG_USER_ONLY)
201 if (env->debugger) {
202 return RISCV_EXCP_NONE;
203 }
204 #endif
205 return RISCV_EXCP_ILLEGAL_INST;
206 }
207
208 return RISCV_EXCP_NONE;
209 }
210
211 #if !defined(CONFIG_USER_ONLY)
mctr(CPURISCVState * env,int csrno)212 static RISCVException mctr(CPURISCVState *env, int csrno)
213 {
214 RISCVCPU *cpu = env_archcpu(env);
215 uint32_t pmu_avail_ctrs = cpu->pmu_avail_ctrs;
216 int ctr_index;
217 int base_csrno = CSR_MHPMCOUNTER3;
218
219 if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) {
220 /* Offset for RV32 mhpmcounternh counters */
221 csrno -= 0x80;
222 }
223
224 g_assert(csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31);
225
226 ctr_index = csrno - base_csrno;
227 if ((BIT(ctr_index) & pmu_avail_ctrs >> 3) == 0) {
228 /* The PMU is not enabled or counter is out of range */
229 return RISCV_EXCP_ILLEGAL_INST;
230 }
231
232 return RISCV_EXCP_NONE;
233 }
234
mctr32(CPURISCVState * env,int csrno)235 static RISCVException mctr32(CPURISCVState *env, int csrno)
236 {
237 if (riscv_cpu_mxl(env) != MXL_RV32) {
238 return RISCV_EXCP_ILLEGAL_INST;
239 }
240
241 return mctr(env, csrno);
242 }
243
sscofpmf(CPURISCVState * env,int csrno)244 static RISCVException sscofpmf(CPURISCVState *env, int csrno)
245 {
246 if (!riscv_cpu_cfg(env)->ext_sscofpmf) {
247 return RISCV_EXCP_ILLEGAL_INST;
248 }
249
250 return RISCV_EXCP_NONE;
251 }
252
sscofpmf_32(CPURISCVState * env,int csrno)253 static RISCVException sscofpmf_32(CPURISCVState *env, int csrno)
254 {
255 if (riscv_cpu_mxl(env) != MXL_RV32) {
256 return RISCV_EXCP_ILLEGAL_INST;
257 }
258
259 return sscofpmf(env, csrno);
260 }
261
smcntrpmf(CPURISCVState * env,int csrno)262 static RISCVException smcntrpmf(CPURISCVState *env, int csrno)
263 {
264 if (!riscv_cpu_cfg(env)->ext_smcntrpmf) {
265 return RISCV_EXCP_ILLEGAL_INST;
266 }
267
268 return RISCV_EXCP_NONE;
269 }
270
smcntrpmf_32(CPURISCVState * env,int csrno)271 static RISCVException smcntrpmf_32(CPURISCVState *env, int csrno)
272 {
273 if (riscv_cpu_mxl(env) != MXL_RV32) {
274 return RISCV_EXCP_ILLEGAL_INST;
275 }
276
277 return smcntrpmf(env, csrno);
278 }
279
any(CPURISCVState * env,int csrno)280 static RISCVException any(CPURISCVState *env, int csrno)
281 {
282 return RISCV_EXCP_NONE;
283 }
284
any32(CPURISCVState * env,int csrno)285 static RISCVException any32(CPURISCVState *env, int csrno)
286 {
287 if (riscv_cpu_mxl(env) != MXL_RV32) {
288 return RISCV_EXCP_ILLEGAL_INST;
289 }
290
291 return any(env, csrno);
292
293 }
294
aia_any(CPURISCVState * env,int csrno)295 static RISCVException aia_any(CPURISCVState *env, int csrno)
296 {
297 if (!riscv_cpu_cfg(env)->ext_smaia) {
298 return RISCV_EXCP_ILLEGAL_INST;
299 }
300
301 return any(env, csrno);
302 }
303
aia_any32(CPURISCVState * env,int csrno)304 static RISCVException aia_any32(CPURISCVState *env, int csrno)
305 {
306 if (!riscv_cpu_cfg(env)->ext_smaia) {
307 return RISCV_EXCP_ILLEGAL_INST;
308 }
309
310 return any32(env, csrno);
311 }
312
smode(CPURISCVState * env,int csrno)313 static RISCVException smode(CPURISCVState *env, int csrno)
314 {
315 if (riscv_has_ext(env, RVS)) {
316 return RISCV_EXCP_NONE;
317 }
318
319 return RISCV_EXCP_ILLEGAL_INST;
320 }
321
smode32(CPURISCVState * env,int csrno)322 static RISCVException smode32(CPURISCVState *env, int csrno)
323 {
324 if (riscv_cpu_mxl(env) != MXL_RV32) {
325 return RISCV_EXCP_ILLEGAL_INST;
326 }
327
328 return smode(env, csrno);
329 }
330
aia_smode(CPURISCVState * env,int csrno)331 static RISCVException aia_smode(CPURISCVState *env, int csrno)
332 {
333 if (!riscv_cpu_cfg(env)->ext_ssaia) {
334 return RISCV_EXCP_ILLEGAL_INST;
335 }
336
337 return smode(env, csrno);
338 }
339
aia_smode32(CPURISCVState * env,int csrno)340 static RISCVException aia_smode32(CPURISCVState *env, int csrno)
341 {
342 if (!riscv_cpu_cfg(env)->ext_ssaia) {
343 return RISCV_EXCP_ILLEGAL_INST;
344 }
345
346 return smode32(env, csrno);
347 }
348
hmode(CPURISCVState * env,int csrno)349 static RISCVException hmode(CPURISCVState *env, int csrno)
350 {
351 if (riscv_has_ext(env, RVH)) {
352 return RISCV_EXCP_NONE;
353 }
354
355 return RISCV_EXCP_ILLEGAL_INST;
356 }
357
hmode32(CPURISCVState * env,int csrno)358 static RISCVException hmode32(CPURISCVState *env, int csrno)
359 {
360 if (riscv_cpu_mxl(env) != MXL_RV32) {
361 return RISCV_EXCP_ILLEGAL_INST;
362 }
363
364 return hmode(env, csrno);
365
366 }
367
umode(CPURISCVState * env,int csrno)368 static RISCVException umode(CPURISCVState *env, int csrno)
369 {
370 if (riscv_has_ext(env, RVU)) {
371 return RISCV_EXCP_NONE;
372 }
373
374 return RISCV_EXCP_ILLEGAL_INST;
375 }
376
umode32(CPURISCVState * env,int csrno)377 static RISCVException umode32(CPURISCVState *env, int csrno)
378 {
379 if (riscv_cpu_mxl(env) != MXL_RV32) {
380 return RISCV_EXCP_ILLEGAL_INST;
381 }
382
383 return umode(env, csrno);
384 }
385
mstateen(CPURISCVState * env,int csrno)386 static RISCVException mstateen(CPURISCVState *env, int csrno)
387 {
388 if (!riscv_cpu_cfg(env)->ext_smstateen) {
389 return RISCV_EXCP_ILLEGAL_INST;
390 }
391
392 return any(env, csrno);
393 }
394
hstateen_pred(CPURISCVState * env,int csrno,int base)395 static RISCVException hstateen_pred(CPURISCVState *env, int csrno, int base)
396 {
397 if (!riscv_cpu_cfg(env)->ext_smstateen) {
398 return RISCV_EXCP_ILLEGAL_INST;
399 }
400
401 RISCVException ret = hmode(env, csrno);
402 if (ret != RISCV_EXCP_NONE) {
403 return ret;
404 }
405
406 if (env->debugger) {
407 return RISCV_EXCP_NONE;
408 }
409
410 if (env->priv < PRV_M) {
411 if (!(env->mstateen[csrno - base] & SMSTATEEN_STATEEN)) {
412 return RISCV_EXCP_ILLEGAL_INST;
413 }
414 }
415
416 return RISCV_EXCP_NONE;
417 }
418
hstateen(CPURISCVState * env,int csrno)419 static RISCVException hstateen(CPURISCVState *env, int csrno)
420 {
421 return hstateen_pred(env, csrno, CSR_HSTATEEN0);
422 }
423
hstateenh(CPURISCVState * env,int csrno)424 static RISCVException hstateenh(CPURISCVState *env, int csrno)
425 {
426 return hstateen_pred(env, csrno, CSR_HSTATEEN0H);
427 }
428
sstateen(CPURISCVState * env,int csrno)429 static RISCVException sstateen(CPURISCVState *env, int csrno)
430 {
431 bool virt = env->virt_enabled;
432 int index = csrno - CSR_SSTATEEN0;
433
434 if (!riscv_cpu_cfg(env)->ext_smstateen) {
435 return RISCV_EXCP_ILLEGAL_INST;
436 }
437
438 RISCVException ret = smode(env, csrno);
439 if (ret != RISCV_EXCP_NONE) {
440 return ret;
441 }
442
443 if (env->debugger) {
444 return RISCV_EXCP_NONE;
445 }
446
447 if (env->priv < PRV_M) {
448 if (!(env->mstateen[index] & SMSTATEEN_STATEEN)) {
449 return RISCV_EXCP_ILLEGAL_INST;
450 }
451
452 if (virt) {
453 if (!(env->hstateen[index] & SMSTATEEN_STATEEN)) {
454 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
455 }
456 }
457 }
458
459 return RISCV_EXCP_NONE;
460 }
461
sstc(CPURISCVState * env,int csrno)462 static RISCVException sstc(CPURISCVState *env, int csrno)
463 {
464 bool hmode_check = false;
465
466 if (!riscv_cpu_cfg(env)->ext_sstc || !env->rdtime_fn) {
467 return RISCV_EXCP_ILLEGAL_INST;
468 }
469
470 if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) {
471 hmode_check = true;
472 }
473
474 RISCVException ret = hmode_check ? hmode(env, csrno) : smode(env, csrno);
475 if (ret != RISCV_EXCP_NONE) {
476 return ret;
477 }
478
479 if (env->debugger) {
480 return RISCV_EXCP_NONE;
481 }
482
483 if (env->priv == PRV_M) {
484 return RISCV_EXCP_NONE;
485 }
486
487 /*
488 * No need of separate function for rv32 as menvcfg stores both menvcfg
489 * menvcfgh for RV32.
490 */
491 if (!(get_field(env->mcounteren, COUNTEREN_TM) &&
492 get_field(env->menvcfg, MENVCFG_STCE))) {
493 return RISCV_EXCP_ILLEGAL_INST;
494 }
495
496 if (env->virt_enabled) {
497 if (!(get_field(env->hcounteren, COUNTEREN_TM) &&
498 get_field(env->henvcfg, HENVCFG_STCE))) {
499 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
500 }
501 }
502
503 return RISCV_EXCP_NONE;
504 }
505
sstc_32(CPURISCVState * env,int csrno)506 static RISCVException sstc_32(CPURISCVState *env, int csrno)
507 {
508 if (riscv_cpu_mxl(env) != MXL_RV32) {
509 return RISCV_EXCP_ILLEGAL_INST;
510 }
511
512 return sstc(env, csrno);
513 }
514
satp(CPURISCVState * env,int csrno)515 static RISCVException satp(CPURISCVState *env, int csrno)
516 {
517 if (env->priv == PRV_S && !env->virt_enabled &&
518 get_field(env->mstatus, MSTATUS_TVM)) {
519 return RISCV_EXCP_ILLEGAL_INST;
520 }
521 if (env->priv == PRV_S && env->virt_enabled &&
522 get_field(env->hstatus, HSTATUS_VTVM)) {
523 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
524 }
525
526 return smode(env, csrno);
527 }
528
hgatp(CPURISCVState * env,int csrno)529 static RISCVException hgatp(CPURISCVState *env, int csrno)
530 {
531 if (env->priv == PRV_S && !env->virt_enabled &&
532 get_field(env->mstatus, MSTATUS_TVM)) {
533 return RISCV_EXCP_ILLEGAL_INST;
534 }
535
536 return hmode(env, csrno);
537 }
538
539 /* Checks if PointerMasking registers could be accessed */
pointer_masking(CPURISCVState * env,int csrno)540 static RISCVException pointer_masking(CPURISCVState *env, int csrno)
541 {
542 /* Check if j-ext is present */
543 if (riscv_has_ext(env, RVJ)) {
544 return RISCV_EXCP_NONE;
545 }
546 return RISCV_EXCP_ILLEGAL_INST;
547 }
548
aia_hmode(CPURISCVState * env,int csrno)549 static RISCVException aia_hmode(CPURISCVState *env, int csrno)
550 {
551 if (!riscv_cpu_cfg(env)->ext_ssaia) {
552 return RISCV_EXCP_ILLEGAL_INST;
553 }
554
555 return hmode(env, csrno);
556 }
557
aia_hmode32(CPURISCVState * env,int csrno)558 static RISCVException aia_hmode32(CPURISCVState *env, int csrno)
559 {
560 if (!riscv_cpu_cfg(env)->ext_ssaia) {
561 return RISCV_EXCP_ILLEGAL_INST;
562 }
563
564 return hmode32(env, csrno);
565 }
566
pmp(CPURISCVState * env,int csrno)567 static RISCVException pmp(CPURISCVState *env, int csrno)
568 {
569 if (riscv_cpu_cfg(env)->pmp) {
570 if (csrno <= CSR_PMPCFG3) {
571 uint32_t reg_index = csrno - CSR_PMPCFG0;
572
573 /* TODO: RV128 restriction check */
574 if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) {
575 return RISCV_EXCP_ILLEGAL_INST;
576 }
577 }
578
579 return RISCV_EXCP_NONE;
580 }
581
582 return RISCV_EXCP_ILLEGAL_INST;
583 }
584
have_mseccfg(CPURISCVState * env,int csrno)585 static RISCVException have_mseccfg(CPURISCVState *env, int csrno)
586 {
587 if (riscv_cpu_cfg(env)->ext_smepmp) {
588 return RISCV_EXCP_NONE;
589 }
590 if (riscv_cpu_cfg(env)->ext_zkr) {
591 return RISCV_EXCP_NONE;
592 }
593
594 return RISCV_EXCP_ILLEGAL_INST;
595 }
596
debug(CPURISCVState * env,int csrno)597 static RISCVException debug(CPURISCVState *env, int csrno)
598 {
599 if (riscv_cpu_cfg(env)->debug) {
600 return RISCV_EXCP_NONE;
601 }
602
603 return RISCV_EXCP_ILLEGAL_INST;
604 }
605 #endif
606
seed(CPURISCVState * env,int csrno)607 static RISCVException seed(CPURISCVState *env, int csrno)
608 {
609 if (!riscv_cpu_cfg(env)->ext_zkr) {
610 return RISCV_EXCP_ILLEGAL_INST;
611 }
612
613 #if !defined(CONFIG_USER_ONLY)
614 if (env->debugger) {
615 return RISCV_EXCP_NONE;
616 }
617
618 /*
619 * With a CSR read-write instruction:
620 * 1) The seed CSR is always available in machine mode as normal.
621 * 2) Attempted access to seed from virtual modes VS and VU always raises
622 * an exception(virtual instruction exception only if mseccfg.sseed=1).
623 * 3) Without the corresponding access control bit set to 1, any attempted
624 * access to seed from U, S or HS modes will raise an illegal instruction
625 * exception.
626 */
627 if (env->priv == PRV_M) {
628 return RISCV_EXCP_NONE;
629 } else if (env->virt_enabled) {
630 if (env->mseccfg & MSECCFG_SSEED) {
631 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
632 } else {
633 return RISCV_EXCP_ILLEGAL_INST;
634 }
635 } else {
636 if (env->priv == PRV_S && (env->mseccfg & MSECCFG_SSEED)) {
637 return RISCV_EXCP_NONE;
638 } else if (env->priv == PRV_U && (env->mseccfg & MSECCFG_USEED)) {
639 return RISCV_EXCP_NONE;
640 } else {
641 return RISCV_EXCP_ILLEGAL_INST;
642 }
643 }
644 #else
645 return RISCV_EXCP_NONE;
646 #endif
647 }
648
649 /* zicfiss CSR_SSP read and write */
read_ssp(CPURISCVState * env,int csrno,target_ulong * val)650 static int read_ssp(CPURISCVState *env, int csrno, target_ulong *val)
651 {
652 *val = env->ssp;
653 return RISCV_EXCP_NONE;
654 }
655
write_ssp(CPURISCVState * env,int csrno,target_ulong val)656 static int write_ssp(CPURISCVState *env, int csrno, target_ulong val)
657 {
658 env->ssp = val;
659 return RISCV_EXCP_NONE;
660 }
661
662 /* User Floating-Point CSRs */
read_fflags(CPURISCVState * env,int csrno,target_ulong * val)663 static RISCVException read_fflags(CPURISCVState *env, int csrno,
664 target_ulong *val)
665 {
666 *val = riscv_cpu_get_fflags(env);
667 return RISCV_EXCP_NONE;
668 }
669
write_fflags(CPURISCVState * env,int csrno,target_ulong val)670 static RISCVException write_fflags(CPURISCVState *env, int csrno,
671 target_ulong val)
672 {
673 #if !defined(CONFIG_USER_ONLY)
674 if (riscv_has_ext(env, RVF)) {
675 env->mstatus |= MSTATUS_FS;
676 }
677 #endif
678 riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT));
679 return RISCV_EXCP_NONE;
680 }
681
read_frm(CPURISCVState * env,int csrno,target_ulong * val)682 static RISCVException read_frm(CPURISCVState *env, int csrno,
683 target_ulong *val)
684 {
685 *val = env->frm;
686 return RISCV_EXCP_NONE;
687 }
688
write_frm(CPURISCVState * env,int csrno,target_ulong val)689 static RISCVException write_frm(CPURISCVState *env, int csrno,
690 target_ulong val)
691 {
692 #if !defined(CONFIG_USER_ONLY)
693 if (riscv_has_ext(env, RVF)) {
694 env->mstatus |= MSTATUS_FS;
695 }
696 #endif
697 env->frm = val & (FSR_RD >> FSR_RD_SHIFT);
698 return RISCV_EXCP_NONE;
699 }
700
read_fcsr(CPURISCVState * env,int csrno,target_ulong * val)701 static RISCVException read_fcsr(CPURISCVState *env, int csrno,
702 target_ulong *val)
703 {
704 *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT)
705 | (env->frm << FSR_RD_SHIFT);
706 return RISCV_EXCP_NONE;
707 }
708
write_fcsr(CPURISCVState * env,int csrno,target_ulong val)709 static RISCVException write_fcsr(CPURISCVState *env, int csrno,
710 target_ulong val)
711 {
712 #if !defined(CONFIG_USER_ONLY)
713 if (riscv_has_ext(env, RVF)) {
714 env->mstatus |= MSTATUS_FS;
715 }
716 #endif
717 env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
718 riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
719 return RISCV_EXCP_NONE;
720 }
721
read_vtype(CPURISCVState * env,int csrno,target_ulong * val)722 static RISCVException read_vtype(CPURISCVState *env, int csrno,
723 target_ulong *val)
724 {
725 uint64_t vill;
726 switch (env->xl) {
727 case MXL_RV32:
728 vill = (uint32_t)env->vill << 31;
729 break;
730 case MXL_RV64:
731 vill = (uint64_t)env->vill << 63;
732 break;
733 default:
734 g_assert_not_reached();
735 }
736 *val = (target_ulong)vill | env->vtype;
737 return RISCV_EXCP_NONE;
738 }
739
read_vl(CPURISCVState * env,int csrno,target_ulong * val)740 static RISCVException read_vl(CPURISCVState *env, int csrno,
741 target_ulong *val)
742 {
743 *val = env->vl;
744 return RISCV_EXCP_NONE;
745 }
746
read_vlenb(CPURISCVState * env,int csrno,target_ulong * val)747 static RISCVException read_vlenb(CPURISCVState *env, int csrno,
748 target_ulong *val)
749 {
750 *val = riscv_cpu_cfg(env)->vlenb;
751 return RISCV_EXCP_NONE;
752 }
753
read_vxrm(CPURISCVState * env,int csrno,target_ulong * val)754 static RISCVException read_vxrm(CPURISCVState *env, int csrno,
755 target_ulong *val)
756 {
757 *val = env->vxrm;
758 return RISCV_EXCP_NONE;
759 }
760
write_vxrm(CPURISCVState * env,int csrno,target_ulong val)761 static RISCVException write_vxrm(CPURISCVState *env, int csrno,
762 target_ulong val)
763 {
764 #if !defined(CONFIG_USER_ONLY)
765 env->mstatus |= MSTATUS_VS;
766 #endif
767 env->vxrm = val;
768 return RISCV_EXCP_NONE;
769 }
770
read_vxsat(CPURISCVState * env,int csrno,target_ulong * val)771 static RISCVException read_vxsat(CPURISCVState *env, int csrno,
772 target_ulong *val)
773 {
774 *val = env->vxsat & BIT(0);
775 return RISCV_EXCP_NONE;
776 }
777
write_vxsat(CPURISCVState * env,int csrno,target_ulong val)778 static RISCVException write_vxsat(CPURISCVState *env, int csrno,
779 target_ulong val)
780 {
781 #if !defined(CONFIG_USER_ONLY)
782 env->mstatus |= MSTATUS_VS;
783 #endif
784 env->vxsat = val & BIT(0);
785 return RISCV_EXCP_NONE;
786 }
787
read_vstart(CPURISCVState * env,int csrno,target_ulong * val)788 static RISCVException read_vstart(CPURISCVState *env, int csrno,
789 target_ulong *val)
790 {
791 *val = env->vstart;
792 return RISCV_EXCP_NONE;
793 }
794
write_vstart(CPURISCVState * env,int csrno,target_ulong val)795 static RISCVException write_vstart(CPURISCVState *env, int csrno,
796 target_ulong val)
797 {
798 #if !defined(CONFIG_USER_ONLY)
799 env->mstatus |= MSTATUS_VS;
800 #endif
801 /*
802 * The vstart CSR is defined to have only enough writable bits
803 * to hold the largest element index, i.e. lg2(VLEN) bits.
804 */
805 env->vstart = val & ~(~0ULL << ctzl(riscv_cpu_cfg(env)->vlenb << 3));
806 return RISCV_EXCP_NONE;
807 }
808
read_vcsr(CPURISCVState * env,int csrno,target_ulong * val)809 static RISCVException read_vcsr(CPURISCVState *env, int csrno,
810 target_ulong *val)
811 {
812 *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT);
813 return RISCV_EXCP_NONE;
814 }
815
write_vcsr(CPURISCVState * env,int csrno,target_ulong val)816 static RISCVException write_vcsr(CPURISCVState *env, int csrno,
817 target_ulong val)
818 {
819 #if !defined(CONFIG_USER_ONLY)
820 env->mstatus |= MSTATUS_VS;
821 #endif
822 env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT;
823 env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT;
824 return RISCV_EXCP_NONE;
825 }
826
827 #if defined(CONFIG_USER_ONLY)
828 /* User Timers and Counters */
get_ticks(bool shift)829 static target_ulong get_ticks(bool shift)
830 {
831 int64_t val = cpu_get_host_ticks();
832 target_ulong result = shift ? val >> 32 : val;
833
834 return result;
835 }
836
read_time(CPURISCVState * env,int csrno,target_ulong * val)837 static RISCVException read_time(CPURISCVState *env, int csrno,
838 target_ulong *val)
839 {
840 *val = cpu_get_host_ticks();
841 return RISCV_EXCP_NONE;
842 }
843
read_timeh(CPURISCVState * env,int csrno,target_ulong * val)844 static RISCVException read_timeh(CPURISCVState *env, int csrno,
845 target_ulong *val)
846 {
847 *val = cpu_get_host_ticks() >> 32;
848 return RISCV_EXCP_NONE;
849 }
850
read_hpmcounter(CPURISCVState * env,int csrno,target_ulong * val)851 static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
852 target_ulong *val)
853 {
854 *val = get_ticks(false);
855 return RISCV_EXCP_NONE;
856 }
857
read_hpmcounterh(CPURISCVState * env,int csrno,target_ulong * val)858 static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
859 target_ulong *val)
860 {
861 *val = get_ticks(true);
862 return RISCV_EXCP_NONE;
863 }
864
865 #else /* CONFIG_USER_ONLY */
866
read_mcyclecfg(CPURISCVState * env,int csrno,target_ulong * val)867 static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno,
868 target_ulong *val)
869 {
870 *val = env->mcyclecfg;
871 return RISCV_EXCP_NONE;
872 }
873
write_mcyclecfg(CPURISCVState * env,int csrno,target_ulong val)874 static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno,
875 target_ulong val)
876 {
877 uint64_t inh_avail_mask;
878
879 if (riscv_cpu_mxl(env) == MXL_RV32) {
880 env->mcyclecfg = val;
881 } else {
882 /* Set xINH fields if priv mode supported */
883 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MCYCLECFG_BIT_MINH;
884 inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFG_BIT_UINH : 0;
885 inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFG_BIT_SINH : 0;
886 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
887 riscv_has_ext(env, RVU)) ? MCYCLECFG_BIT_VUINH : 0;
888 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
889 riscv_has_ext(env, RVS)) ? MCYCLECFG_BIT_VSINH : 0;
890 env->mcyclecfg = val & inh_avail_mask;
891 }
892
893 return RISCV_EXCP_NONE;
894 }
895
read_mcyclecfgh(CPURISCVState * env,int csrno,target_ulong * val)896 static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno,
897 target_ulong *val)
898 {
899 *val = env->mcyclecfgh;
900 return RISCV_EXCP_NONE;
901 }
902
write_mcyclecfgh(CPURISCVState * env,int csrno,target_ulong val)903 static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno,
904 target_ulong val)
905 {
906 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
907 MCYCLECFGH_BIT_MINH);
908
909 /* Set xINH fields if priv mode supported */
910 inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFGH_BIT_UINH : 0;
911 inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFGH_BIT_SINH : 0;
912 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
913 riscv_has_ext(env, RVU)) ? MCYCLECFGH_BIT_VUINH : 0;
914 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
915 riscv_has_ext(env, RVS)) ? MCYCLECFGH_BIT_VSINH : 0;
916
917 env->mcyclecfgh = val & inh_avail_mask;
918 return RISCV_EXCP_NONE;
919 }
920
read_minstretcfg(CPURISCVState * env,int csrno,target_ulong * val)921 static RISCVException read_minstretcfg(CPURISCVState *env, int csrno,
922 target_ulong *val)
923 {
924 *val = env->minstretcfg;
925 return RISCV_EXCP_NONE;
926 }
927
write_minstretcfg(CPURISCVState * env,int csrno,target_ulong val)928 static RISCVException write_minstretcfg(CPURISCVState *env, int csrno,
929 target_ulong val)
930 {
931 uint64_t inh_avail_mask;
932
933 if (riscv_cpu_mxl(env) == MXL_RV32) {
934 env->minstretcfg = val;
935 } else {
936 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MINSTRETCFG_BIT_MINH;
937 inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFG_BIT_UINH : 0;
938 inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFG_BIT_SINH : 0;
939 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
940 riscv_has_ext(env, RVU)) ? MINSTRETCFG_BIT_VUINH : 0;
941 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
942 riscv_has_ext(env, RVS)) ? MINSTRETCFG_BIT_VSINH : 0;
943 env->minstretcfg = val & inh_avail_mask;
944 }
945 return RISCV_EXCP_NONE;
946 }
947
read_minstretcfgh(CPURISCVState * env,int csrno,target_ulong * val)948 static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno,
949 target_ulong *val)
950 {
951 *val = env->minstretcfgh;
952 return RISCV_EXCP_NONE;
953 }
954
write_minstretcfgh(CPURISCVState * env,int csrno,target_ulong val)955 static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno,
956 target_ulong val)
957 {
958 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
959 MINSTRETCFGH_BIT_MINH);
960
961 inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFGH_BIT_UINH : 0;
962 inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFGH_BIT_SINH : 0;
963 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
964 riscv_has_ext(env, RVU)) ? MINSTRETCFGH_BIT_VUINH : 0;
965 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
966 riscv_has_ext(env, RVS)) ? MINSTRETCFGH_BIT_VSINH : 0;
967
968 env->minstretcfgh = val & inh_avail_mask;
969 return RISCV_EXCP_NONE;
970 }
971
read_mhpmevent(CPURISCVState * env,int csrno,target_ulong * val)972 static RISCVException read_mhpmevent(CPURISCVState *env, int csrno,
973 target_ulong *val)
974 {
975 int evt_index = csrno - CSR_MCOUNTINHIBIT;
976
977 *val = env->mhpmevent_val[evt_index];
978
979 return RISCV_EXCP_NONE;
980 }
981
write_mhpmevent(CPURISCVState * env,int csrno,target_ulong val)982 static RISCVException write_mhpmevent(CPURISCVState *env, int csrno,
983 target_ulong val)
984 {
985 int evt_index = csrno - CSR_MCOUNTINHIBIT;
986 uint64_t mhpmevt_val = val;
987 uint64_t inh_avail_mask;
988
989 if (riscv_cpu_mxl(env) == MXL_RV32) {
990 env->mhpmevent_val[evt_index] = val;
991 mhpmevt_val = mhpmevt_val |
992 ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
993 } else {
994 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MHPMEVENT_BIT_MINH;
995 inh_avail_mask |= riscv_has_ext(env, RVU) ? MHPMEVENT_BIT_UINH : 0;
996 inh_avail_mask |= riscv_has_ext(env, RVS) ? MHPMEVENT_BIT_SINH : 0;
997 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
998 riscv_has_ext(env, RVU)) ? MHPMEVENT_BIT_VUINH : 0;
999 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1000 riscv_has_ext(env, RVS)) ? MHPMEVENT_BIT_VSINH : 0;
1001 mhpmevt_val = val & inh_avail_mask;
1002 env->mhpmevent_val[evt_index] = mhpmevt_val;
1003 }
1004
1005 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1006
1007 return RISCV_EXCP_NONE;
1008 }
1009
read_mhpmeventh(CPURISCVState * env,int csrno,target_ulong * val)1010 static RISCVException read_mhpmeventh(CPURISCVState *env, int csrno,
1011 target_ulong *val)
1012 {
1013 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
1014
1015 *val = env->mhpmeventh_val[evt_index];
1016
1017 return RISCV_EXCP_NONE;
1018 }
1019
write_mhpmeventh(CPURISCVState * env,int csrno,target_ulong val)1020 static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno,
1021 target_ulong val)
1022 {
1023 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
1024 uint64_t mhpmevth_val;
1025 uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
1026 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
1027 MHPMEVENTH_BIT_MINH);
1028
1029 inh_avail_mask |= riscv_has_ext(env, RVU) ? MHPMEVENTH_BIT_UINH : 0;
1030 inh_avail_mask |= riscv_has_ext(env, RVS) ? MHPMEVENTH_BIT_SINH : 0;
1031 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1032 riscv_has_ext(env, RVU)) ? MHPMEVENTH_BIT_VUINH : 0;
1033 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1034 riscv_has_ext(env, RVS)) ? MHPMEVENTH_BIT_VSINH : 0;
1035
1036 mhpmevth_val = val & inh_avail_mask;
1037 mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
1038 env->mhpmeventh_val[evt_index] = mhpmevth_val;
1039
1040 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1041
1042 return RISCV_EXCP_NONE;
1043 }
1044
riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState * env,int counter_idx,bool upper_half)1045 static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env,
1046 int counter_idx,
1047 bool upper_half)
1048 {
1049 int inst = riscv_pmu_ctr_monitor_instructions(env, counter_idx);
1050 uint64_t *counter_arr_virt = env->pmu_fixed_ctrs[inst].counter_virt;
1051 uint64_t *counter_arr = env->pmu_fixed_ctrs[inst].counter;
1052 target_ulong result = 0;
1053 uint64_t curr_val = 0;
1054 uint64_t cfg_val = 0;
1055
1056 if (counter_idx == 0) {
1057 cfg_val = upper_half ? ((uint64_t)env->mcyclecfgh << 32) :
1058 env->mcyclecfg;
1059 } else if (counter_idx == 2) {
1060 cfg_val = upper_half ? ((uint64_t)env->minstretcfgh << 32) :
1061 env->minstretcfg;
1062 } else {
1063 cfg_val = upper_half ?
1064 ((uint64_t)env->mhpmeventh_val[counter_idx] << 32) :
1065 env->mhpmevent_val[counter_idx];
1066 cfg_val &= MHPMEVENT_FILTER_MASK;
1067 }
1068
1069 if (!cfg_val) {
1070 if (icount_enabled()) {
1071 curr_val = inst ? icount_get_raw() : icount_get();
1072 } else {
1073 curr_val = cpu_get_host_ticks();
1074 }
1075
1076 goto done;
1077 }
1078
1079 /* Update counter before reading. */
1080 riscv_pmu_update_fixed_ctrs(env, env->priv, env->virt_enabled);
1081
1082 if (!(cfg_val & MCYCLECFG_BIT_MINH)) {
1083 curr_val += counter_arr[PRV_M];
1084 }
1085
1086 if (!(cfg_val & MCYCLECFG_BIT_SINH)) {
1087 curr_val += counter_arr[PRV_S];
1088 }
1089
1090 if (!(cfg_val & MCYCLECFG_BIT_UINH)) {
1091 curr_val += counter_arr[PRV_U];
1092 }
1093
1094 if (!(cfg_val & MCYCLECFG_BIT_VSINH)) {
1095 curr_val += counter_arr_virt[PRV_S];
1096 }
1097
1098 if (!(cfg_val & MCYCLECFG_BIT_VUINH)) {
1099 curr_val += counter_arr_virt[PRV_U];
1100 }
1101
1102 done:
1103 if (riscv_cpu_mxl(env) == MXL_RV32) {
1104 result = upper_half ? curr_val >> 32 : curr_val;
1105 } else {
1106 result = curr_val;
1107 }
1108
1109 return result;
1110 }
1111
write_mhpmcounter(CPURISCVState * env,int csrno,target_ulong val)1112 static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
1113 target_ulong val)
1114 {
1115 int ctr_idx = csrno - CSR_MCYCLE;
1116 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1117 uint64_t mhpmctr_val = val;
1118
1119 counter->mhpmcounter_val = val;
1120 if (!get_field(env->mcountinhibit, BIT(ctr_idx)) &&
1121 (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1122 riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) {
1123 counter->mhpmcounter_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
1124 ctr_idx, false);
1125 if (ctr_idx > 2) {
1126 if (riscv_cpu_mxl(env) == MXL_RV32) {
1127 mhpmctr_val = mhpmctr_val |
1128 ((uint64_t)counter->mhpmcounterh_val << 32);
1129 }
1130 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
1131 }
1132 } else {
1133 /* Other counters can keep incrementing from the given value */
1134 counter->mhpmcounter_prev = val;
1135 }
1136
1137 return RISCV_EXCP_NONE;
1138 }
1139
write_mhpmcounterh(CPURISCVState * env,int csrno,target_ulong val)1140 static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
1141 target_ulong val)
1142 {
1143 int ctr_idx = csrno - CSR_MCYCLEH;
1144 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1145 uint64_t mhpmctr_val = counter->mhpmcounter_val;
1146 uint64_t mhpmctrh_val = val;
1147
1148 counter->mhpmcounterh_val = val;
1149 mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
1150 if (!get_field(env->mcountinhibit, BIT(ctr_idx)) &&
1151 (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1152 riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) {
1153 counter->mhpmcounterh_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
1154 ctr_idx, true);
1155 if (ctr_idx > 2) {
1156 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
1157 }
1158 } else {
1159 counter->mhpmcounterh_prev = val;
1160 }
1161
1162 return RISCV_EXCP_NONE;
1163 }
1164
riscv_pmu_read_ctr(CPURISCVState * env,target_ulong * val,bool upper_half,uint32_t ctr_idx)1165 RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
1166 bool upper_half, uint32_t ctr_idx)
1167 {
1168 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1169 target_ulong ctr_prev = upper_half ? counter->mhpmcounterh_prev :
1170 counter->mhpmcounter_prev;
1171 target_ulong ctr_val = upper_half ? counter->mhpmcounterh_val :
1172 counter->mhpmcounter_val;
1173
1174 if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
1175 /*
1176 * Counter should not increment if inhibit bit is set. Just return the
1177 * current counter value.
1178 */
1179 *val = ctr_val;
1180 return RISCV_EXCP_NONE;
1181 }
1182
1183 /*
1184 * The kernel computes the perf delta by subtracting the current value from
1185 * the value it initialized previously (ctr_val).
1186 */
1187 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1188 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
1189 *val = riscv_pmu_ctr_get_fixed_counters_val(env, ctr_idx, upper_half) -
1190 ctr_prev + ctr_val;
1191 } else {
1192 *val = ctr_val;
1193 }
1194
1195 return RISCV_EXCP_NONE;
1196 }
1197
read_hpmcounter(CPURISCVState * env,int csrno,target_ulong * val)1198 static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
1199 target_ulong *val)
1200 {
1201 uint16_t ctr_index;
1202
1203 if (csrno >= CSR_MCYCLE && csrno <= CSR_MHPMCOUNTER31) {
1204 ctr_index = csrno - CSR_MCYCLE;
1205 } else if (csrno >= CSR_CYCLE && csrno <= CSR_HPMCOUNTER31) {
1206 ctr_index = csrno - CSR_CYCLE;
1207 } else {
1208 return RISCV_EXCP_ILLEGAL_INST;
1209 }
1210
1211 return riscv_pmu_read_ctr(env, val, false, ctr_index);
1212 }
1213
read_hpmcounterh(CPURISCVState * env,int csrno,target_ulong * val)1214 static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
1215 target_ulong *val)
1216 {
1217 uint16_t ctr_index;
1218
1219 if (csrno >= CSR_MCYCLEH && csrno <= CSR_MHPMCOUNTER31H) {
1220 ctr_index = csrno - CSR_MCYCLEH;
1221 } else if (csrno >= CSR_CYCLEH && csrno <= CSR_HPMCOUNTER31H) {
1222 ctr_index = csrno - CSR_CYCLEH;
1223 } else {
1224 return RISCV_EXCP_ILLEGAL_INST;
1225 }
1226
1227 return riscv_pmu_read_ctr(env, val, true, ctr_index);
1228 }
1229
read_scountovf(CPURISCVState * env,int csrno,target_ulong * val)1230 static RISCVException read_scountovf(CPURISCVState *env, int csrno,
1231 target_ulong *val)
1232 {
1233 int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT;
1234 int i;
1235 *val = 0;
1236 target_ulong *mhpm_evt_val;
1237 uint64_t of_bit_mask;
1238
1239 if (riscv_cpu_mxl(env) == MXL_RV32) {
1240 mhpm_evt_val = env->mhpmeventh_val;
1241 of_bit_mask = MHPMEVENTH_BIT_OF;
1242 } else {
1243 mhpm_evt_val = env->mhpmevent_val;
1244 of_bit_mask = MHPMEVENT_BIT_OF;
1245 }
1246
1247 for (i = mhpmevt_start; i < RV_MAX_MHPMEVENTS; i++) {
1248 if ((get_field(env->mcounteren, BIT(i))) &&
1249 (mhpm_evt_val[i] & of_bit_mask)) {
1250 *val |= BIT(i);
1251 }
1252 }
1253
1254 return RISCV_EXCP_NONE;
1255 }
1256
read_time(CPURISCVState * env,int csrno,target_ulong * val)1257 static RISCVException read_time(CPURISCVState *env, int csrno,
1258 target_ulong *val)
1259 {
1260 uint64_t delta = env->virt_enabled ? env->htimedelta : 0;
1261
1262 if (!env->rdtime_fn) {
1263 return RISCV_EXCP_ILLEGAL_INST;
1264 }
1265
1266 *val = env->rdtime_fn(env->rdtime_fn_arg) + delta;
1267 return RISCV_EXCP_NONE;
1268 }
1269
read_timeh(CPURISCVState * env,int csrno,target_ulong * val)1270 static RISCVException read_timeh(CPURISCVState *env, int csrno,
1271 target_ulong *val)
1272 {
1273 uint64_t delta = env->virt_enabled ? env->htimedelta : 0;
1274
1275 if (!env->rdtime_fn) {
1276 return RISCV_EXCP_ILLEGAL_INST;
1277 }
1278
1279 *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32;
1280 return RISCV_EXCP_NONE;
1281 }
1282
read_vstimecmp(CPURISCVState * env,int csrno,target_ulong * val)1283 static RISCVException read_vstimecmp(CPURISCVState *env, int csrno,
1284 target_ulong *val)
1285 {
1286 *val = env->vstimecmp;
1287
1288 return RISCV_EXCP_NONE;
1289 }
1290
read_vstimecmph(CPURISCVState * env,int csrno,target_ulong * val)1291 static RISCVException read_vstimecmph(CPURISCVState *env, int csrno,
1292 target_ulong *val)
1293 {
1294 *val = env->vstimecmp >> 32;
1295
1296 return RISCV_EXCP_NONE;
1297 }
1298
write_vstimecmp(CPURISCVState * env,int csrno,target_ulong val)1299 static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
1300 target_ulong val)
1301 {
1302 if (riscv_cpu_mxl(env) == MXL_RV32) {
1303 env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val);
1304 } else {
1305 env->vstimecmp = val;
1306 }
1307
1308 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
1309 env->htimedelta, MIP_VSTIP);
1310
1311 return RISCV_EXCP_NONE;
1312 }
1313
write_vstimecmph(CPURISCVState * env,int csrno,target_ulong val)1314 static RISCVException write_vstimecmph(CPURISCVState *env, int csrno,
1315 target_ulong val)
1316 {
1317 env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val);
1318 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
1319 env->htimedelta, MIP_VSTIP);
1320
1321 return RISCV_EXCP_NONE;
1322 }
1323
read_stimecmp(CPURISCVState * env,int csrno,target_ulong * val)1324 static RISCVException read_stimecmp(CPURISCVState *env, int csrno,
1325 target_ulong *val)
1326 {
1327 if (env->virt_enabled) {
1328 *val = env->vstimecmp;
1329 } else {
1330 *val = env->stimecmp;
1331 }
1332
1333 return RISCV_EXCP_NONE;
1334 }
1335
read_stimecmph(CPURISCVState * env,int csrno,target_ulong * val)1336 static RISCVException read_stimecmph(CPURISCVState *env, int csrno,
1337 target_ulong *val)
1338 {
1339 if (env->virt_enabled) {
1340 *val = env->vstimecmp >> 32;
1341 } else {
1342 *val = env->stimecmp >> 32;
1343 }
1344
1345 return RISCV_EXCP_NONE;
1346 }
1347
write_stimecmp(CPURISCVState * env,int csrno,target_ulong val)1348 static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
1349 target_ulong val)
1350 {
1351 if (env->virt_enabled) {
1352 if (env->hvictl & HVICTL_VTI) {
1353 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1354 }
1355 return write_vstimecmp(env, csrno, val);
1356 }
1357
1358 if (riscv_cpu_mxl(env) == MXL_RV32) {
1359 env->stimecmp = deposit64(env->stimecmp, 0, 32, (uint64_t)val);
1360 } else {
1361 env->stimecmp = val;
1362 }
1363
1364 riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP);
1365
1366 return RISCV_EXCP_NONE;
1367 }
1368
write_stimecmph(CPURISCVState * env,int csrno,target_ulong val)1369 static RISCVException write_stimecmph(CPURISCVState *env, int csrno,
1370 target_ulong val)
1371 {
1372 if (env->virt_enabled) {
1373 if (env->hvictl & HVICTL_VTI) {
1374 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1375 }
1376 return write_vstimecmph(env, csrno, val);
1377 }
1378
1379 env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val);
1380 riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP);
1381
1382 return RISCV_EXCP_NONE;
1383 }
1384
1385 #define VSTOPI_NUM_SRCS 5
1386
1387 /*
1388 * All core local interrupts except the fixed ones 0:12. This macro is for
1389 * virtual interrupts logic so please don't change this to avoid messing up
1390 * the whole support, For reference see AIA spec: `5.3 Interrupt filtering and
1391 * virtual interrupts for supervisor level` and `6.3.2 Virtual interrupts for
1392 * VS level`.
1393 */
1394 #define LOCAL_INTERRUPTS (~0x1FFFULL)
1395
1396 static const uint64_t delegable_ints =
1397 S_MODE_INTERRUPTS | VS_MODE_INTERRUPTS | MIP_LCOFIP;
1398 static const uint64_t vs_delegable_ints =
1399 (VS_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & ~MIP_LCOFIP;
1400 static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
1401 HS_MODE_INTERRUPTS | LOCAL_INTERRUPTS;
1402 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
1403 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
1404 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
1405 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \
1406 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \
1407 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \
1408 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \
1409 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \
1410 (1ULL << (RISCV_EXCP_U_ECALL)) | \
1411 (1ULL << (RISCV_EXCP_S_ECALL)) | \
1412 (1ULL << (RISCV_EXCP_VS_ECALL)) | \
1413 (1ULL << (RISCV_EXCP_M_ECALL)) | \
1414 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
1415 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
1416 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
1417 (1ULL << (RISCV_EXCP_SW_CHECK)) | \
1418 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
1419 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
1420 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
1421 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)))
1422 static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
1423 ~((1ULL << (RISCV_EXCP_S_ECALL)) |
1424 (1ULL << (RISCV_EXCP_VS_ECALL)) |
1425 (1ULL << (RISCV_EXCP_M_ECALL)) |
1426 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) |
1427 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) |
1428 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) |
1429 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)));
1430 static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
1431 SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
1432 SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
1433
1434 /*
1435 * Spec allows for bits 13:63 to be either read-only or writable.
1436 * So far we have interrupt LCOFIP in that region which is writable.
1437 *
1438 * Also, spec allows to inject virtual interrupts in this region even
1439 * without any hardware interrupts for that interrupt number.
1440 *
1441 * For now interrupt in 13:63 region are all kept writable. 13 being
1442 * LCOFIP and 14:63 being virtual only. Change this in future if we
1443 * introduce more interrupts that are not writable.
1444 */
1445
1446 /* Bit STIP can be an alias of mip.STIP that's why it's writable in mvip. */
1447 static const uint64_t mvip_writable_mask = MIP_SSIP | MIP_STIP | MIP_SEIP |
1448 LOCAL_INTERRUPTS;
1449 static const uint64_t mvien_writable_mask = MIP_SSIP | MIP_SEIP |
1450 LOCAL_INTERRUPTS;
1451
1452 static const uint64_t sip_writable_mask = SIP_SSIP | LOCAL_INTERRUPTS;
1453 static const uint64_t hip_writable_mask = MIP_VSSIP;
1454 static const uint64_t hvip_writable_mask = MIP_VSSIP | MIP_VSTIP |
1455 MIP_VSEIP | LOCAL_INTERRUPTS;
1456 static const uint64_t hvien_writable_mask = LOCAL_INTERRUPTS;
1457
1458 static const uint64_t vsip_writable_mask = MIP_VSSIP | LOCAL_INTERRUPTS;
1459
1460 const bool valid_vm_1_10_32[16] = {
1461 [VM_1_10_MBARE] = true,
1462 [VM_1_10_SV32] = true
1463 };
1464
1465 const bool valid_vm_1_10_64[16] = {
1466 [VM_1_10_MBARE] = true,
1467 [VM_1_10_SV39] = true,
1468 [VM_1_10_SV48] = true,
1469 [VM_1_10_SV57] = true
1470 };
1471
1472 /* Machine Information Registers */
read_zero(CPURISCVState * env,int csrno,target_ulong * val)1473 static RISCVException read_zero(CPURISCVState *env, int csrno,
1474 target_ulong *val)
1475 {
1476 *val = 0;
1477 return RISCV_EXCP_NONE;
1478 }
1479
write_ignore(CPURISCVState * env,int csrno,target_ulong val)1480 static RISCVException write_ignore(CPURISCVState *env, int csrno,
1481 target_ulong val)
1482 {
1483 return RISCV_EXCP_NONE;
1484 }
1485
read_mvendorid(CPURISCVState * env,int csrno,target_ulong * val)1486 static RISCVException read_mvendorid(CPURISCVState *env, int csrno,
1487 target_ulong *val)
1488 {
1489 *val = riscv_cpu_cfg(env)->mvendorid;
1490 return RISCV_EXCP_NONE;
1491 }
1492
read_marchid(CPURISCVState * env,int csrno,target_ulong * val)1493 static RISCVException read_marchid(CPURISCVState *env, int csrno,
1494 target_ulong *val)
1495 {
1496 *val = riscv_cpu_cfg(env)->marchid;
1497 return RISCV_EXCP_NONE;
1498 }
1499
read_mimpid(CPURISCVState * env,int csrno,target_ulong * val)1500 static RISCVException read_mimpid(CPURISCVState *env, int csrno,
1501 target_ulong *val)
1502 {
1503 *val = riscv_cpu_cfg(env)->mimpid;
1504 return RISCV_EXCP_NONE;
1505 }
1506
read_mhartid(CPURISCVState * env,int csrno,target_ulong * val)1507 static RISCVException read_mhartid(CPURISCVState *env, int csrno,
1508 target_ulong *val)
1509 {
1510 *val = env->mhartid;
1511 return RISCV_EXCP_NONE;
1512 }
1513
1514 /* Machine Trap Setup */
1515
1516 /* We do not store SD explicitly, only compute it on demand. */
add_status_sd(RISCVMXL xl,uint64_t status)1517 static uint64_t add_status_sd(RISCVMXL xl, uint64_t status)
1518 {
1519 if ((status & MSTATUS_FS) == MSTATUS_FS ||
1520 (status & MSTATUS_VS) == MSTATUS_VS ||
1521 (status & MSTATUS_XS) == MSTATUS_XS) {
1522 switch (xl) {
1523 case MXL_RV32:
1524 return status | MSTATUS32_SD;
1525 case MXL_RV64:
1526 return status | MSTATUS64_SD;
1527 case MXL_RV128:
1528 return MSTATUSH128_SD;
1529 default:
1530 g_assert_not_reached();
1531 }
1532 }
1533 return status;
1534 }
1535
read_mstatus(CPURISCVState * env,int csrno,target_ulong * val)1536 static RISCVException read_mstatus(CPURISCVState *env, int csrno,
1537 target_ulong *val)
1538 {
1539 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus);
1540 return RISCV_EXCP_NONE;
1541 }
1542
validate_vm(CPURISCVState * env,target_ulong vm)1543 static bool validate_vm(CPURISCVState *env, target_ulong vm)
1544 {
1545 uint64_t mode_supported = riscv_cpu_cfg(env)->satp_mode.map;
1546 return get_field(mode_supported, (1 << vm));
1547 }
1548
legalize_xatp(CPURISCVState * env,target_ulong old_xatp,target_ulong val)1549 static target_ulong legalize_xatp(CPURISCVState *env, target_ulong old_xatp,
1550 target_ulong val)
1551 {
1552 target_ulong mask;
1553 bool vm;
1554 if (riscv_cpu_mxl(env) == MXL_RV32) {
1555 vm = validate_vm(env, get_field(val, SATP32_MODE));
1556 mask = (val ^ old_xatp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN);
1557 } else {
1558 vm = validate_vm(env, get_field(val, SATP64_MODE));
1559 mask = (val ^ old_xatp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN);
1560 }
1561
1562 if (vm && mask) {
1563 /*
1564 * The ISA defines SATP.MODE=Bare as "no translation", but we still
1565 * pass these through QEMU's TLB emulation as it improves
1566 * performance. Flushing the TLB on SATP writes with paging
1567 * enabled avoids leaking those invalid cached mappings.
1568 */
1569 tlb_flush(env_cpu(env));
1570 return val;
1571 }
1572 return old_xatp;
1573 }
1574
legalize_mpp(CPURISCVState * env,target_ulong old_mpp,target_ulong val)1575 static target_ulong legalize_mpp(CPURISCVState *env, target_ulong old_mpp,
1576 target_ulong val)
1577 {
1578 bool valid = false;
1579 target_ulong new_mpp = get_field(val, MSTATUS_MPP);
1580
1581 switch (new_mpp) {
1582 case PRV_M:
1583 valid = true;
1584 break;
1585 case PRV_S:
1586 valid = riscv_has_ext(env, RVS);
1587 break;
1588 case PRV_U:
1589 valid = riscv_has_ext(env, RVU);
1590 break;
1591 }
1592
1593 /* Remain field unchanged if new_mpp value is invalid */
1594 if (!valid) {
1595 val = set_field(val, MSTATUS_MPP, old_mpp);
1596 }
1597
1598 return val;
1599 }
1600
write_mstatus(CPURISCVState * env,int csrno,target_ulong val)1601 static RISCVException write_mstatus(CPURISCVState *env, int csrno,
1602 target_ulong val)
1603 {
1604 uint64_t mstatus = env->mstatus;
1605 uint64_t mask = 0;
1606 RISCVMXL xl = riscv_cpu_mxl(env);
1607
1608 /*
1609 * MPP field have been made WARL since priv version 1.11. However,
1610 * legalization for it will not break any software running on 1.10.
1611 */
1612 val = legalize_mpp(env, get_field(mstatus, MSTATUS_MPP), val);
1613
1614 /* flush tlb on mstatus fields that affect VM */
1615 if ((val ^ mstatus) & MSTATUS_MXR) {
1616 tlb_flush(env_cpu(env));
1617 }
1618 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
1619 MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM |
1620 MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
1621 MSTATUS_TW;
1622
1623 if (riscv_has_ext(env, RVF)) {
1624 mask |= MSTATUS_FS;
1625 }
1626 if (riscv_has_ext(env, RVV)) {
1627 mask |= MSTATUS_VS;
1628 }
1629
1630 if (xl != MXL_RV32 || env->debugger) {
1631 if (riscv_has_ext(env, RVH)) {
1632 mask |= MSTATUS_MPV | MSTATUS_GVA;
1633 }
1634 if ((val & MSTATUS64_UXL) != 0) {
1635 mask |= MSTATUS64_UXL;
1636 }
1637 }
1638
1639 /* If cfi lp extension is available, then apply cfi lp mask */
1640 if (env_archcpu(env)->cfg.ext_zicfilp) {
1641 mask |= (MSTATUS_MPELP | MSTATUS_SPELP);
1642 }
1643
1644 mstatus = (mstatus & ~mask) | (val & mask);
1645
1646 env->mstatus = mstatus;
1647
1648 /*
1649 * Except in debug mode, UXL/SXL can only be modified by higher
1650 * privilege mode. So xl will not be changed in normal mode.
1651 */
1652 if (env->debugger) {
1653 env->xl = cpu_recompute_xl(env);
1654 }
1655
1656 riscv_cpu_update_mask(env);
1657 return RISCV_EXCP_NONE;
1658 }
1659
read_mstatush(CPURISCVState * env,int csrno,target_ulong * val)1660 static RISCVException read_mstatush(CPURISCVState *env, int csrno,
1661 target_ulong *val)
1662 {
1663 *val = env->mstatus >> 32;
1664 return RISCV_EXCP_NONE;
1665 }
1666
write_mstatush(CPURISCVState * env,int csrno,target_ulong val)1667 static RISCVException write_mstatush(CPURISCVState *env, int csrno,
1668 target_ulong val)
1669 {
1670 uint64_t valh = (uint64_t)val << 32;
1671 uint64_t mask = riscv_has_ext(env, RVH) ? MSTATUS_MPV | MSTATUS_GVA : 0;
1672
1673 env->mstatus = (env->mstatus & ~mask) | (valh & mask);
1674
1675 return RISCV_EXCP_NONE;
1676 }
1677
read_mstatus_i128(CPURISCVState * env,int csrno,Int128 * val)1678 static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno,
1679 Int128 *val)
1680 {
1681 *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128,
1682 env->mstatus));
1683 return RISCV_EXCP_NONE;
1684 }
1685
read_misa_i128(CPURISCVState * env,int csrno,Int128 * val)1686 static RISCVException read_misa_i128(CPURISCVState *env, int csrno,
1687 Int128 *val)
1688 {
1689 *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62);
1690 return RISCV_EXCP_NONE;
1691 }
1692
read_misa(CPURISCVState * env,int csrno,target_ulong * val)1693 static RISCVException read_misa(CPURISCVState *env, int csrno,
1694 target_ulong *val)
1695 {
1696 target_ulong misa;
1697
1698 switch (env->misa_mxl) {
1699 case MXL_RV32:
1700 misa = (target_ulong)MXL_RV32 << 30;
1701 break;
1702 #ifdef TARGET_RISCV64
1703 case MXL_RV64:
1704 misa = (target_ulong)MXL_RV64 << 62;
1705 break;
1706 #endif
1707 default:
1708 g_assert_not_reached();
1709 }
1710
1711 *val = misa | env->misa_ext;
1712 return RISCV_EXCP_NONE;
1713 }
1714
write_misa(CPURISCVState * env,int csrno,target_ulong val)1715 static RISCVException write_misa(CPURISCVState *env, int csrno,
1716 target_ulong val)
1717 {
1718 RISCVCPU *cpu = env_archcpu(env);
1719 uint32_t orig_misa_ext = env->misa_ext;
1720 Error *local_err = NULL;
1721
1722 if (!riscv_cpu_cfg(env)->misa_w) {
1723 /* drop write to misa */
1724 return RISCV_EXCP_NONE;
1725 }
1726
1727 /* Mask extensions that are not supported by this hart */
1728 val &= env->misa_ext_mask;
1729
1730 /*
1731 * Suppress 'C' if next instruction is not aligned
1732 * TODO: this should check next_pc
1733 */
1734 if ((val & RVC) && (GETPC() & ~3) != 0) {
1735 val &= ~RVC;
1736 }
1737
1738 /* Disable RVG if any of its dependencies are disabled */
1739 if (!(val & RVI && val & RVM && val & RVA &&
1740 val & RVF && val & RVD)) {
1741 val &= ~RVG;
1742 }
1743
1744 /* If nothing changed, do nothing. */
1745 if (val == env->misa_ext) {
1746 return RISCV_EXCP_NONE;
1747 }
1748
1749 env->misa_ext = val;
1750 riscv_cpu_validate_set_extensions(cpu, &local_err);
1751 if (local_err != NULL) {
1752 /* Rollback on validation error */
1753 qemu_log_mask(LOG_GUEST_ERROR, "Unable to write MISA ext value "
1754 "0x%x, keeping existing MISA ext 0x%x\n",
1755 env->misa_ext, orig_misa_ext);
1756
1757 env->misa_ext = orig_misa_ext;
1758
1759 return RISCV_EXCP_NONE;
1760 }
1761
1762 if (!(env->misa_ext & RVF)) {
1763 env->mstatus &= ~MSTATUS_FS;
1764 }
1765
1766 /* flush translation cache */
1767 tb_flush(env_cpu(env));
1768 env->xl = riscv_cpu_mxl(env);
1769 return RISCV_EXCP_NONE;
1770 }
1771
read_medeleg(CPURISCVState * env,int csrno,target_ulong * val)1772 static RISCVException read_medeleg(CPURISCVState *env, int csrno,
1773 target_ulong *val)
1774 {
1775 *val = env->medeleg;
1776 return RISCV_EXCP_NONE;
1777 }
1778
write_medeleg(CPURISCVState * env,int csrno,target_ulong val)1779 static RISCVException write_medeleg(CPURISCVState *env, int csrno,
1780 target_ulong val)
1781 {
1782 env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS);
1783 return RISCV_EXCP_NONE;
1784 }
1785
rmw_mideleg64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)1786 static RISCVException rmw_mideleg64(CPURISCVState *env, int csrno,
1787 uint64_t *ret_val,
1788 uint64_t new_val, uint64_t wr_mask)
1789 {
1790 uint64_t mask = wr_mask & delegable_ints;
1791
1792 if (ret_val) {
1793 *ret_val = env->mideleg;
1794 }
1795
1796 env->mideleg = (env->mideleg & ~mask) | (new_val & mask);
1797
1798 if (riscv_has_ext(env, RVH)) {
1799 env->mideleg |= HS_MODE_INTERRUPTS;
1800 }
1801
1802 return RISCV_EXCP_NONE;
1803 }
1804
rmw_mideleg(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1805 static RISCVException rmw_mideleg(CPURISCVState *env, int csrno,
1806 target_ulong *ret_val,
1807 target_ulong new_val, target_ulong wr_mask)
1808 {
1809 uint64_t rval;
1810 RISCVException ret;
1811
1812 ret = rmw_mideleg64(env, csrno, &rval, new_val, wr_mask);
1813 if (ret_val) {
1814 *ret_val = rval;
1815 }
1816
1817 return ret;
1818 }
1819
rmw_midelegh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1820 static RISCVException rmw_midelegh(CPURISCVState *env, int csrno,
1821 target_ulong *ret_val,
1822 target_ulong new_val,
1823 target_ulong wr_mask)
1824 {
1825 uint64_t rval;
1826 RISCVException ret;
1827
1828 ret = rmw_mideleg64(env, csrno, &rval,
1829 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1830 if (ret_val) {
1831 *ret_val = rval >> 32;
1832 }
1833
1834 return ret;
1835 }
1836
rmw_mie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)1837 static RISCVException rmw_mie64(CPURISCVState *env, int csrno,
1838 uint64_t *ret_val,
1839 uint64_t new_val, uint64_t wr_mask)
1840 {
1841 uint64_t mask = wr_mask & all_ints;
1842
1843 if (ret_val) {
1844 *ret_val = env->mie;
1845 }
1846
1847 env->mie = (env->mie & ~mask) | (new_val & mask);
1848
1849 if (!riscv_has_ext(env, RVH)) {
1850 env->mie &= ~((uint64_t)HS_MODE_INTERRUPTS);
1851 }
1852
1853 return RISCV_EXCP_NONE;
1854 }
1855
rmw_mie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1856 static RISCVException rmw_mie(CPURISCVState *env, int csrno,
1857 target_ulong *ret_val,
1858 target_ulong new_val, target_ulong wr_mask)
1859 {
1860 uint64_t rval;
1861 RISCVException ret;
1862
1863 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask);
1864 if (ret_val) {
1865 *ret_val = rval;
1866 }
1867
1868 return ret;
1869 }
1870
rmw_mieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1871 static RISCVException rmw_mieh(CPURISCVState *env, int csrno,
1872 target_ulong *ret_val,
1873 target_ulong new_val, target_ulong wr_mask)
1874 {
1875 uint64_t rval;
1876 RISCVException ret;
1877
1878 ret = rmw_mie64(env, csrno, &rval,
1879 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1880 if (ret_val) {
1881 *ret_val = rval >> 32;
1882 }
1883
1884 return ret;
1885 }
1886
rmw_mvien64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)1887 static RISCVException rmw_mvien64(CPURISCVState *env, int csrno,
1888 uint64_t *ret_val,
1889 uint64_t new_val, uint64_t wr_mask)
1890 {
1891 uint64_t mask = wr_mask & mvien_writable_mask;
1892
1893 if (ret_val) {
1894 *ret_val = env->mvien;
1895 }
1896
1897 env->mvien = (env->mvien & ~mask) | (new_val & mask);
1898
1899 return RISCV_EXCP_NONE;
1900 }
1901
rmw_mvien(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1902 static RISCVException rmw_mvien(CPURISCVState *env, int csrno,
1903 target_ulong *ret_val,
1904 target_ulong new_val, target_ulong wr_mask)
1905 {
1906 uint64_t rval;
1907 RISCVException ret;
1908
1909 ret = rmw_mvien64(env, csrno, &rval, new_val, wr_mask);
1910 if (ret_val) {
1911 *ret_val = rval;
1912 }
1913
1914 return ret;
1915 }
1916
rmw_mvienh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1917 static RISCVException rmw_mvienh(CPURISCVState *env, int csrno,
1918 target_ulong *ret_val,
1919 target_ulong new_val, target_ulong wr_mask)
1920 {
1921 uint64_t rval;
1922 RISCVException ret;
1923
1924 ret = rmw_mvien64(env, csrno, &rval,
1925 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1926 if (ret_val) {
1927 *ret_val = rval >> 32;
1928 }
1929
1930 return ret;
1931 }
1932
read_mtopi(CPURISCVState * env,int csrno,target_ulong * val)1933 static RISCVException read_mtopi(CPURISCVState *env, int csrno,
1934 target_ulong *val)
1935 {
1936 int irq;
1937 uint8_t iprio;
1938
1939 irq = riscv_cpu_mirq_pending(env);
1940 if (irq <= 0 || irq > 63) {
1941 *val = 0;
1942 } else {
1943 iprio = env->miprio[irq];
1944 if (!iprio) {
1945 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_M) {
1946 iprio = IPRIO_MMAXIPRIO;
1947 }
1948 }
1949 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
1950 *val |= iprio;
1951 }
1952
1953 return RISCV_EXCP_NONE;
1954 }
1955
aia_xlate_vs_csrno(CPURISCVState * env,int csrno)1956 static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
1957 {
1958 if (!env->virt_enabled) {
1959 return csrno;
1960 }
1961
1962 switch (csrno) {
1963 case CSR_SISELECT:
1964 return CSR_VSISELECT;
1965 case CSR_SIREG:
1966 return CSR_VSIREG;
1967 case CSR_STOPEI:
1968 return CSR_VSTOPEI;
1969 default:
1970 return csrno;
1971 };
1972 }
1973
rmw_xiselect(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1974 static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
1975 target_ulong *val, target_ulong new_val,
1976 target_ulong wr_mask)
1977 {
1978 target_ulong *iselect;
1979
1980 /* Translate CSR number for VS-mode */
1981 csrno = aia_xlate_vs_csrno(env, csrno);
1982
1983 /* Find the iselect CSR based on CSR number */
1984 switch (csrno) {
1985 case CSR_MISELECT:
1986 iselect = &env->miselect;
1987 break;
1988 case CSR_SISELECT:
1989 iselect = &env->siselect;
1990 break;
1991 case CSR_VSISELECT:
1992 iselect = &env->vsiselect;
1993 break;
1994 default:
1995 return RISCV_EXCP_ILLEGAL_INST;
1996 };
1997
1998 if (val) {
1999 *val = *iselect;
2000 }
2001
2002 wr_mask &= ISELECT_MASK;
2003 if (wr_mask) {
2004 *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask);
2005 }
2006
2007 return RISCV_EXCP_NONE;
2008 }
2009
rmw_iprio(target_ulong xlen,target_ulong iselect,uint8_t * iprio,target_ulong * val,target_ulong new_val,target_ulong wr_mask,int ext_irq_no)2010 static int rmw_iprio(target_ulong xlen,
2011 target_ulong iselect, uint8_t *iprio,
2012 target_ulong *val, target_ulong new_val,
2013 target_ulong wr_mask, int ext_irq_no)
2014 {
2015 int i, firq, nirqs;
2016 target_ulong old_val;
2017
2018 if (iselect < ISELECT_IPRIO0 || ISELECT_IPRIO15 < iselect) {
2019 return -EINVAL;
2020 }
2021 if (xlen != 32 && iselect & 0x1) {
2022 return -EINVAL;
2023 }
2024
2025 nirqs = 4 * (xlen / 32);
2026 firq = ((iselect - ISELECT_IPRIO0) / (xlen / 32)) * (nirqs);
2027
2028 old_val = 0;
2029 for (i = 0; i < nirqs; i++) {
2030 old_val |= ((target_ulong)iprio[firq + i]) << (IPRIO_IRQ_BITS * i);
2031 }
2032
2033 if (val) {
2034 *val = old_val;
2035 }
2036
2037 if (wr_mask) {
2038 new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
2039 for (i = 0; i < nirqs; i++) {
2040 /*
2041 * M-level and S-level external IRQ priority always read-only
2042 * zero. This means default priority order is always preferred
2043 * for M-level and S-level external IRQs.
2044 */
2045 if ((firq + i) == ext_irq_no) {
2046 continue;
2047 }
2048 iprio[firq + i] = (new_val >> (IPRIO_IRQ_BITS * i)) & 0xff;
2049 }
2050 }
2051
2052 return 0;
2053 }
2054
rmw_xireg(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2055 static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
2056 target_ulong *val, target_ulong new_val,
2057 target_ulong wr_mask)
2058 {
2059 bool virt, isel_reserved;
2060 uint8_t *iprio;
2061 int ret = -EINVAL;
2062 target_ulong priv, isel, vgein;
2063
2064 /* Translate CSR number for VS-mode */
2065 csrno = aia_xlate_vs_csrno(env, csrno);
2066
2067 /* Decode register details from CSR number */
2068 virt = false;
2069 isel_reserved = false;
2070 switch (csrno) {
2071 case CSR_MIREG:
2072 iprio = env->miprio;
2073 isel = env->miselect;
2074 priv = PRV_M;
2075 break;
2076 case CSR_SIREG:
2077 if (env->priv == PRV_S && env->mvien & MIP_SEIP &&
2078 env->siselect >= ISELECT_IMSIC_EIDELIVERY &&
2079 env->siselect <= ISELECT_IMSIC_EIE63) {
2080 goto done;
2081 }
2082 iprio = env->siprio;
2083 isel = env->siselect;
2084 priv = PRV_S;
2085 break;
2086 case CSR_VSIREG:
2087 iprio = env->hviprio;
2088 isel = env->vsiselect;
2089 priv = PRV_S;
2090 virt = true;
2091 break;
2092 default:
2093 goto done;
2094 };
2095
2096 /* Find the selected guest interrupt file */
2097 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
2098
2099 if (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) {
2100 /* Local interrupt priority registers not available for VS-mode */
2101 if (!virt) {
2102 ret = rmw_iprio(riscv_cpu_mxl_bits(env),
2103 isel, iprio, val, new_val, wr_mask,
2104 (priv == PRV_M) ? IRQ_M_EXT : IRQ_S_EXT);
2105 }
2106 } else if (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST) {
2107 /* IMSIC registers only available when machine implements it. */
2108 if (env->aia_ireg_rmw_fn[priv]) {
2109 /* Selected guest interrupt file should not be zero */
2110 if (virt && (!vgein || env->geilen < vgein)) {
2111 goto done;
2112 }
2113 /* Call machine specific IMSIC register emulation */
2114 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
2115 AIA_MAKE_IREG(isel, priv, virt, vgein,
2116 riscv_cpu_mxl_bits(env)),
2117 val, new_val, wr_mask);
2118 }
2119 } else {
2120 isel_reserved = true;
2121 }
2122
2123 done:
2124 if (ret) {
2125 return (env->virt_enabled && virt && !isel_reserved) ?
2126 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2127 }
2128 return RISCV_EXCP_NONE;
2129 }
2130
rmw_xtopei(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2131 static RISCVException rmw_xtopei(CPURISCVState *env, int csrno,
2132 target_ulong *val, target_ulong new_val,
2133 target_ulong wr_mask)
2134 {
2135 bool virt;
2136 int ret = -EINVAL;
2137 target_ulong priv, vgein;
2138
2139 /* Translate CSR number for VS-mode */
2140 csrno = aia_xlate_vs_csrno(env, csrno);
2141
2142 /* Decode register details from CSR number */
2143 virt = false;
2144 switch (csrno) {
2145 case CSR_MTOPEI:
2146 priv = PRV_M;
2147 break;
2148 case CSR_STOPEI:
2149 if (env->mvien & MIP_SEIP && env->priv == PRV_S) {
2150 goto done;
2151 }
2152 priv = PRV_S;
2153 break;
2154 case CSR_VSTOPEI:
2155 priv = PRV_S;
2156 virt = true;
2157 break;
2158 default:
2159 goto done;
2160 };
2161
2162 /* IMSIC CSRs only available when machine implements IMSIC. */
2163 if (!env->aia_ireg_rmw_fn[priv]) {
2164 goto done;
2165 }
2166
2167 /* Find the selected guest interrupt file */
2168 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
2169
2170 /* Selected guest interrupt file should be valid */
2171 if (virt && (!vgein || env->geilen < vgein)) {
2172 goto done;
2173 }
2174
2175 /* Call machine specific IMSIC register emulation for TOPEI */
2176 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
2177 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, priv, virt, vgein,
2178 riscv_cpu_mxl_bits(env)),
2179 val, new_val, wr_mask);
2180
2181 done:
2182 if (ret) {
2183 return (env->virt_enabled && virt) ?
2184 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2185 }
2186 return RISCV_EXCP_NONE;
2187 }
2188
read_mtvec(CPURISCVState * env,int csrno,target_ulong * val)2189 static RISCVException read_mtvec(CPURISCVState *env, int csrno,
2190 target_ulong *val)
2191 {
2192 *val = env->mtvec;
2193 return RISCV_EXCP_NONE;
2194 }
2195
write_mtvec(CPURISCVState * env,int csrno,target_ulong val)2196 static RISCVException write_mtvec(CPURISCVState *env, int csrno,
2197 target_ulong val)
2198 {
2199 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
2200 if ((val & 3) < 2) {
2201 env->mtvec = val;
2202 } else {
2203 qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n");
2204 }
2205 return RISCV_EXCP_NONE;
2206 }
2207
read_mcountinhibit(CPURISCVState * env,int csrno,target_ulong * val)2208 static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno,
2209 target_ulong *val)
2210 {
2211 *val = env->mcountinhibit;
2212 return RISCV_EXCP_NONE;
2213 }
2214
write_mcountinhibit(CPURISCVState * env,int csrno,target_ulong val)2215 static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
2216 target_ulong val)
2217 {
2218 int cidx;
2219 PMUCTRState *counter;
2220 RISCVCPU *cpu = env_archcpu(env);
2221 uint32_t present_ctrs = cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_IR;
2222 target_ulong updated_ctrs = (env->mcountinhibit ^ val) & present_ctrs;
2223 uint64_t mhpmctr_val, prev_count, curr_count;
2224
2225 /* WARL register - disable unavailable counters; TM bit is always 0 */
2226 env->mcountinhibit = val & present_ctrs;
2227
2228 /* Check if any other counter is also monitoring cycles/instructions */
2229 for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
2230 if (!(updated_ctrs & BIT(cidx)) ||
2231 (!riscv_pmu_ctr_monitor_cycles(env, cidx) &&
2232 !riscv_pmu_ctr_monitor_instructions(env, cidx))) {
2233 continue;
2234 }
2235
2236 counter = &env->pmu_ctrs[cidx];
2237
2238 if (!get_field(env->mcountinhibit, BIT(cidx))) {
2239 counter->mhpmcounter_prev =
2240 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false);
2241 if (riscv_cpu_mxl(env) == MXL_RV32) {
2242 counter->mhpmcounterh_prev =
2243 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true);
2244 }
2245
2246 if (cidx > 2) {
2247 mhpmctr_val = counter->mhpmcounter_val;
2248 if (riscv_cpu_mxl(env) == MXL_RV32) {
2249 mhpmctr_val = mhpmctr_val |
2250 ((uint64_t)counter->mhpmcounterh_val << 32);
2251 }
2252 riscv_pmu_setup_timer(env, mhpmctr_val, cidx);
2253 }
2254 } else {
2255 curr_count = riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false);
2256
2257 mhpmctr_val = counter->mhpmcounter_val;
2258 prev_count = counter->mhpmcounter_prev;
2259 if (riscv_cpu_mxl(env) == MXL_RV32) {
2260 uint64_t tmp =
2261 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true);
2262
2263 curr_count = curr_count | (tmp << 32);
2264 mhpmctr_val = mhpmctr_val |
2265 ((uint64_t)counter->mhpmcounterh_val << 32);
2266 prev_count = prev_count |
2267 ((uint64_t)counter->mhpmcounterh_prev << 32);
2268 }
2269
2270 /* Adjust the counter for later reads. */
2271 mhpmctr_val = curr_count - prev_count + mhpmctr_val;
2272 counter->mhpmcounter_val = mhpmctr_val;
2273 if (riscv_cpu_mxl(env) == MXL_RV32) {
2274 counter->mhpmcounterh_val = mhpmctr_val >> 32;
2275 }
2276 }
2277 }
2278
2279 return RISCV_EXCP_NONE;
2280 }
2281
read_mcounteren(CPURISCVState * env,int csrno,target_ulong * val)2282 static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
2283 target_ulong *val)
2284 {
2285 *val = env->mcounteren;
2286 return RISCV_EXCP_NONE;
2287 }
2288
write_mcounteren(CPURISCVState * env,int csrno,target_ulong val)2289 static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
2290 target_ulong val)
2291 {
2292 RISCVCPU *cpu = env_archcpu(env);
2293
2294 /* WARL register - disable unavailable counters */
2295 env->mcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
2296 COUNTEREN_IR);
2297 return RISCV_EXCP_NONE;
2298 }
2299
2300 /* Machine Trap Handling */
read_mscratch_i128(CPURISCVState * env,int csrno,Int128 * val)2301 static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno,
2302 Int128 *val)
2303 {
2304 *val = int128_make128(env->mscratch, env->mscratchh);
2305 return RISCV_EXCP_NONE;
2306 }
2307
write_mscratch_i128(CPURISCVState * env,int csrno,Int128 val)2308 static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno,
2309 Int128 val)
2310 {
2311 env->mscratch = int128_getlo(val);
2312 env->mscratchh = int128_gethi(val);
2313 return RISCV_EXCP_NONE;
2314 }
2315
read_mscratch(CPURISCVState * env,int csrno,target_ulong * val)2316 static RISCVException read_mscratch(CPURISCVState *env, int csrno,
2317 target_ulong *val)
2318 {
2319 *val = env->mscratch;
2320 return RISCV_EXCP_NONE;
2321 }
2322
write_mscratch(CPURISCVState * env,int csrno,target_ulong val)2323 static RISCVException write_mscratch(CPURISCVState *env, int csrno,
2324 target_ulong val)
2325 {
2326 env->mscratch = val;
2327 return RISCV_EXCP_NONE;
2328 }
2329
read_mepc(CPURISCVState * env,int csrno,target_ulong * val)2330 static RISCVException read_mepc(CPURISCVState *env, int csrno,
2331 target_ulong *val)
2332 {
2333 *val = env->mepc;
2334 return RISCV_EXCP_NONE;
2335 }
2336
write_mepc(CPURISCVState * env,int csrno,target_ulong val)2337 static RISCVException write_mepc(CPURISCVState *env, int csrno,
2338 target_ulong val)
2339 {
2340 env->mepc = val;
2341 return RISCV_EXCP_NONE;
2342 }
2343
read_mcause(CPURISCVState * env,int csrno,target_ulong * val)2344 static RISCVException read_mcause(CPURISCVState *env, int csrno,
2345 target_ulong *val)
2346 {
2347 *val = env->mcause;
2348 return RISCV_EXCP_NONE;
2349 }
2350
write_mcause(CPURISCVState * env,int csrno,target_ulong val)2351 static RISCVException write_mcause(CPURISCVState *env, int csrno,
2352 target_ulong val)
2353 {
2354 env->mcause = val;
2355 return RISCV_EXCP_NONE;
2356 }
2357
read_mtval(CPURISCVState * env,int csrno,target_ulong * val)2358 static RISCVException read_mtval(CPURISCVState *env, int csrno,
2359 target_ulong *val)
2360 {
2361 *val = env->mtval;
2362 return RISCV_EXCP_NONE;
2363 }
2364
write_mtval(CPURISCVState * env,int csrno,target_ulong val)2365 static RISCVException write_mtval(CPURISCVState *env, int csrno,
2366 target_ulong val)
2367 {
2368 env->mtval = val;
2369 return RISCV_EXCP_NONE;
2370 }
2371
2372 /* Execution environment configuration setup */
read_menvcfg(CPURISCVState * env,int csrno,target_ulong * val)2373 static RISCVException read_menvcfg(CPURISCVState *env, int csrno,
2374 target_ulong *val)
2375 {
2376 *val = env->menvcfg;
2377 return RISCV_EXCP_NONE;
2378 }
2379
write_menvcfg(CPURISCVState * env,int csrno,target_ulong val)2380 static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
2381 target_ulong val)
2382 {
2383 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
2384 uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE;
2385
2386 if (riscv_cpu_mxl(env) == MXL_RV64) {
2387 mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
2388 (cfg->ext_sstc ? MENVCFG_STCE : 0) |
2389 (cfg->ext_svadu ? MENVCFG_ADUE : 0);
2390
2391 if (env_archcpu(env)->cfg.ext_zicfilp) {
2392 mask |= MENVCFG_LPE;
2393 }
2394
2395 if (env_archcpu(env)->cfg.ext_zicfiss) {
2396 mask |= MENVCFG_SSE;
2397 }
2398 }
2399 env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
2400
2401 return RISCV_EXCP_NONE;
2402 }
2403
read_menvcfgh(CPURISCVState * env,int csrno,target_ulong * val)2404 static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
2405 target_ulong *val)
2406 {
2407 *val = env->menvcfg >> 32;
2408 return RISCV_EXCP_NONE;
2409 }
2410
write_menvcfgh(CPURISCVState * env,int csrno,target_ulong val)2411 static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
2412 target_ulong val)
2413 {
2414 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
2415 uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
2416 (cfg->ext_sstc ? MENVCFG_STCE : 0) |
2417 (cfg->ext_svadu ? MENVCFG_ADUE : 0);
2418 uint64_t valh = (uint64_t)val << 32;
2419
2420 env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
2421
2422 return RISCV_EXCP_NONE;
2423 }
2424
read_senvcfg(CPURISCVState * env,int csrno,target_ulong * val)2425 static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
2426 target_ulong *val)
2427 {
2428 RISCVException ret;
2429
2430 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2431 if (ret != RISCV_EXCP_NONE) {
2432 return ret;
2433 }
2434
2435 *val = env->senvcfg;
2436 return RISCV_EXCP_NONE;
2437 }
2438
write_senvcfg(CPURISCVState * env,int csrno,target_ulong val)2439 static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
2440 target_ulong val)
2441 {
2442 uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE;
2443 RISCVException ret;
2444
2445 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2446 if (ret != RISCV_EXCP_NONE) {
2447 return ret;
2448 }
2449
2450 if (env_archcpu(env)->cfg.ext_zicfilp) {
2451 mask |= SENVCFG_LPE;
2452 }
2453
2454 /* Higher mode SSE must be ON for next-less mode SSE to be ON */
2455 if (env_archcpu(env)->cfg.ext_zicfiss &&
2456 get_field(env->menvcfg, MENVCFG_SSE) &&
2457 (env->virt_enabled ? get_field(env->henvcfg, HENVCFG_SSE) : true)) {
2458 mask |= SENVCFG_SSE;
2459 }
2460
2461 env->senvcfg = (env->senvcfg & ~mask) | (val & mask);
2462 return RISCV_EXCP_NONE;
2463 }
2464
read_henvcfg(CPURISCVState * env,int csrno,target_ulong * val)2465 static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
2466 target_ulong *val)
2467 {
2468 RISCVException ret;
2469
2470 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2471 if (ret != RISCV_EXCP_NONE) {
2472 return ret;
2473 }
2474
2475 /*
2476 * henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0
2477 * henvcfg.stce is read_only 0 when menvcfg.stce = 0
2478 * henvcfg.adue is read_only 0 when menvcfg.adue = 0
2479 */
2480 *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
2481 env->menvcfg);
2482 return RISCV_EXCP_NONE;
2483 }
2484
write_henvcfg(CPURISCVState * env,int csrno,target_ulong val)2485 static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
2486 target_ulong val)
2487 {
2488 uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE;
2489 RISCVException ret;
2490
2491 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2492 if (ret != RISCV_EXCP_NONE) {
2493 return ret;
2494 }
2495
2496 if (riscv_cpu_mxl(env) == MXL_RV64) {
2497 mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE);
2498
2499 if (env_archcpu(env)->cfg.ext_zicfilp) {
2500 mask |= HENVCFG_LPE;
2501 }
2502
2503 /* H can light up SSE for VS only if HS had it from menvcfg */
2504 if (env_archcpu(env)->cfg.ext_zicfiss &&
2505 get_field(env->menvcfg, MENVCFG_SSE)) {
2506 mask |= HENVCFG_SSE;
2507 }
2508 }
2509
2510 env->henvcfg = (env->henvcfg & ~mask) | (val & mask);
2511
2512 return RISCV_EXCP_NONE;
2513 }
2514
read_henvcfgh(CPURISCVState * env,int csrno,target_ulong * val)2515 static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
2516 target_ulong *val)
2517 {
2518 RISCVException ret;
2519
2520 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2521 if (ret != RISCV_EXCP_NONE) {
2522 return ret;
2523 }
2524
2525 *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
2526 env->menvcfg)) >> 32;
2527 return RISCV_EXCP_NONE;
2528 }
2529
write_henvcfgh(CPURISCVState * env,int csrno,target_ulong val)2530 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
2531 target_ulong val)
2532 {
2533 uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE |
2534 HENVCFG_ADUE);
2535 uint64_t valh = (uint64_t)val << 32;
2536 RISCVException ret;
2537
2538 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2539 if (ret != RISCV_EXCP_NONE) {
2540 return ret;
2541 }
2542
2543 env->henvcfg = (env->henvcfg & ~mask) | (valh & mask);
2544 return RISCV_EXCP_NONE;
2545 }
2546
read_mstateen(CPURISCVState * env,int csrno,target_ulong * val)2547 static RISCVException read_mstateen(CPURISCVState *env, int csrno,
2548 target_ulong *val)
2549 {
2550 *val = env->mstateen[csrno - CSR_MSTATEEN0];
2551
2552 return RISCV_EXCP_NONE;
2553 }
2554
write_mstateen(CPURISCVState * env,int csrno,uint64_t wr_mask,target_ulong new_val)2555 static RISCVException write_mstateen(CPURISCVState *env, int csrno,
2556 uint64_t wr_mask, target_ulong new_val)
2557 {
2558 uint64_t *reg;
2559
2560 reg = &env->mstateen[csrno - CSR_MSTATEEN0];
2561 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2562
2563 return RISCV_EXCP_NONE;
2564 }
2565
write_mstateen0(CPURISCVState * env,int csrno,target_ulong new_val)2566 static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
2567 target_ulong new_val)
2568 {
2569 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2570 if (!riscv_has_ext(env, RVF)) {
2571 wr_mask |= SMSTATEEN0_FCSR;
2572 }
2573
2574 if (env->priv_ver >= PRIV_VERSION_1_13_0) {
2575 wr_mask |= SMSTATEEN0_P1P13;
2576 }
2577
2578 return write_mstateen(env, csrno, wr_mask, new_val);
2579 }
2580
write_mstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val)2581 static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno,
2582 target_ulong new_val)
2583 {
2584 return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2585 }
2586
read_mstateenh(CPURISCVState * env,int csrno,target_ulong * val)2587 static RISCVException read_mstateenh(CPURISCVState *env, int csrno,
2588 target_ulong *val)
2589 {
2590 *val = env->mstateen[csrno - CSR_MSTATEEN0H] >> 32;
2591
2592 return RISCV_EXCP_NONE;
2593 }
2594
write_mstateenh(CPURISCVState * env,int csrno,uint64_t wr_mask,target_ulong new_val)2595 static RISCVException write_mstateenh(CPURISCVState *env, int csrno,
2596 uint64_t wr_mask, target_ulong new_val)
2597 {
2598 uint64_t *reg, val;
2599
2600 reg = &env->mstateen[csrno - CSR_MSTATEEN0H];
2601 val = (uint64_t)new_val << 32;
2602 val |= *reg & 0xFFFFFFFF;
2603 *reg = (*reg & ~wr_mask) | (val & wr_mask);
2604
2605 return RISCV_EXCP_NONE;
2606 }
2607
write_mstateen0h(CPURISCVState * env,int csrno,target_ulong new_val)2608 static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
2609 target_ulong new_val)
2610 {
2611 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2612
2613 if (env->priv_ver >= PRIV_VERSION_1_13_0) {
2614 wr_mask |= SMSTATEEN0_P1P13;
2615 }
2616
2617 return write_mstateenh(env, csrno, wr_mask, new_val);
2618 }
2619
write_mstateenh_1_3(CPURISCVState * env,int csrno,target_ulong new_val)2620 static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno,
2621 target_ulong new_val)
2622 {
2623 return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
2624 }
2625
read_hstateen(CPURISCVState * env,int csrno,target_ulong * val)2626 static RISCVException read_hstateen(CPURISCVState *env, int csrno,
2627 target_ulong *val)
2628 {
2629 int index = csrno - CSR_HSTATEEN0;
2630
2631 *val = env->hstateen[index] & env->mstateen[index];
2632
2633 return RISCV_EXCP_NONE;
2634 }
2635
write_hstateen(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)2636 static RISCVException write_hstateen(CPURISCVState *env, int csrno,
2637 uint64_t mask, target_ulong new_val)
2638 {
2639 int index = csrno - CSR_HSTATEEN0;
2640 uint64_t *reg, wr_mask;
2641
2642 reg = &env->hstateen[index];
2643 wr_mask = env->mstateen[index] & mask;
2644 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2645
2646 return RISCV_EXCP_NONE;
2647 }
2648
write_hstateen0(CPURISCVState * env,int csrno,target_ulong new_val)2649 static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
2650 target_ulong new_val)
2651 {
2652 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2653
2654 if (!riscv_has_ext(env, RVF)) {
2655 wr_mask |= SMSTATEEN0_FCSR;
2656 }
2657
2658 return write_hstateen(env, csrno, wr_mask, new_val);
2659 }
2660
write_hstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val)2661 static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno,
2662 target_ulong new_val)
2663 {
2664 return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2665 }
2666
read_hstateenh(CPURISCVState * env,int csrno,target_ulong * val)2667 static RISCVException read_hstateenh(CPURISCVState *env, int csrno,
2668 target_ulong *val)
2669 {
2670 int index = csrno - CSR_HSTATEEN0H;
2671
2672 *val = (env->hstateen[index] >> 32) & (env->mstateen[index] >> 32);
2673
2674 return RISCV_EXCP_NONE;
2675 }
2676
write_hstateenh(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)2677 static RISCVException write_hstateenh(CPURISCVState *env, int csrno,
2678 uint64_t mask, target_ulong new_val)
2679 {
2680 int index = csrno - CSR_HSTATEEN0H;
2681 uint64_t *reg, wr_mask, val;
2682
2683 reg = &env->hstateen[index];
2684 val = (uint64_t)new_val << 32;
2685 val |= *reg & 0xFFFFFFFF;
2686 wr_mask = env->mstateen[index] & mask;
2687 *reg = (*reg & ~wr_mask) | (val & wr_mask);
2688
2689 return RISCV_EXCP_NONE;
2690 }
2691
write_hstateen0h(CPURISCVState * env,int csrno,target_ulong new_val)2692 static RISCVException write_hstateen0h(CPURISCVState *env, int csrno,
2693 target_ulong new_val)
2694 {
2695 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2696
2697 return write_hstateenh(env, csrno, wr_mask, new_val);
2698 }
2699
write_hstateenh_1_3(CPURISCVState * env,int csrno,target_ulong new_val)2700 static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno,
2701 target_ulong new_val)
2702 {
2703 return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
2704 }
2705
read_sstateen(CPURISCVState * env,int csrno,target_ulong * val)2706 static RISCVException read_sstateen(CPURISCVState *env, int csrno,
2707 target_ulong *val)
2708 {
2709 bool virt = env->virt_enabled;
2710 int index = csrno - CSR_SSTATEEN0;
2711
2712 *val = env->sstateen[index] & env->mstateen[index];
2713 if (virt) {
2714 *val &= env->hstateen[index];
2715 }
2716
2717 return RISCV_EXCP_NONE;
2718 }
2719
write_sstateen(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)2720 static RISCVException write_sstateen(CPURISCVState *env, int csrno,
2721 uint64_t mask, target_ulong new_val)
2722 {
2723 bool virt = env->virt_enabled;
2724 int index = csrno - CSR_SSTATEEN0;
2725 uint64_t wr_mask;
2726 uint64_t *reg;
2727
2728 wr_mask = env->mstateen[index] & mask;
2729 if (virt) {
2730 wr_mask &= env->hstateen[index];
2731 }
2732
2733 reg = &env->sstateen[index];
2734 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2735
2736 return RISCV_EXCP_NONE;
2737 }
2738
write_sstateen0(CPURISCVState * env,int csrno,target_ulong new_val)2739 static RISCVException write_sstateen0(CPURISCVState *env, int csrno,
2740 target_ulong new_val)
2741 {
2742 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2743
2744 if (!riscv_has_ext(env, RVF)) {
2745 wr_mask |= SMSTATEEN0_FCSR;
2746 }
2747
2748 return write_sstateen(env, csrno, wr_mask, new_val);
2749 }
2750
write_sstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val)2751 static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno,
2752 target_ulong new_val)
2753 {
2754 return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2755 }
2756
rmw_mip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2757 static RISCVException rmw_mip64(CPURISCVState *env, int csrno,
2758 uint64_t *ret_val,
2759 uint64_t new_val, uint64_t wr_mask)
2760 {
2761 uint64_t old_mip, mask = wr_mask & delegable_ints;
2762 uint32_t gin;
2763
2764 if (mask & MIP_SEIP) {
2765 env->software_seip = new_val & MIP_SEIP;
2766 new_val |= env->external_seip * MIP_SEIP;
2767 }
2768
2769 if (riscv_cpu_cfg(env)->ext_sstc && (env->priv == PRV_M) &&
2770 get_field(env->menvcfg, MENVCFG_STCE)) {
2771 /* sstc extension forbids STIP & VSTIP to be writeable in mip */
2772 mask = mask & ~(MIP_STIP | MIP_VSTIP);
2773 }
2774
2775 if (mask) {
2776 old_mip = riscv_cpu_update_mip(env, mask, (new_val & mask));
2777 } else {
2778 old_mip = env->mip;
2779 }
2780
2781 if (csrno != CSR_HVIP) {
2782 gin = get_field(env->hstatus, HSTATUS_VGEIN);
2783 old_mip |= (env->hgeip & ((target_ulong)1 << gin)) ? MIP_VSEIP : 0;
2784 old_mip |= env->vstime_irq ? MIP_VSTIP : 0;
2785 }
2786
2787 if (ret_val) {
2788 *ret_val = old_mip;
2789 }
2790
2791 return RISCV_EXCP_NONE;
2792 }
2793
rmw_mip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2794 static RISCVException rmw_mip(CPURISCVState *env, int csrno,
2795 target_ulong *ret_val,
2796 target_ulong new_val, target_ulong wr_mask)
2797 {
2798 uint64_t rval;
2799 RISCVException ret;
2800
2801 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask);
2802 if (ret_val) {
2803 *ret_val = rval;
2804 }
2805
2806 return ret;
2807 }
2808
rmw_miph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2809 static RISCVException rmw_miph(CPURISCVState *env, int csrno,
2810 target_ulong *ret_val,
2811 target_ulong new_val, target_ulong wr_mask)
2812 {
2813 uint64_t rval;
2814 RISCVException ret;
2815
2816 ret = rmw_mip64(env, csrno, &rval,
2817 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2818 if (ret_val) {
2819 *ret_val = rval >> 32;
2820 }
2821
2822 return ret;
2823 }
2824
2825 /*
2826 * The function is written for two use-cases:
2827 * 1- To access mvip csr as is for m-mode access.
2828 * 2- To access sip as a combination of mip and mvip for s-mode.
2829 *
2830 * Both report bits 1, 5, 9 and 13:63 but with the exception of
2831 * STIP being read-only zero in case of mvip when sstc extension
2832 * is present.
2833 * Also, sip needs to be read-only zero when both mideleg[i] and
2834 * mvien[i] are zero but mvip needs to be an alias of mip.
2835 */
rmw_mvip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2836 static RISCVException rmw_mvip64(CPURISCVState *env, int csrno,
2837 uint64_t *ret_val,
2838 uint64_t new_val, uint64_t wr_mask)
2839 {
2840 RISCVCPU *cpu = env_archcpu(env);
2841 target_ulong ret_mip = 0;
2842 RISCVException ret;
2843 uint64_t old_mvip;
2844
2845 /*
2846 * mideleg[i] mvien[i]
2847 * 0 0 No delegation. mvip[i] is alias of mip[i].
2848 * 0 1 mvip[i] becomes source of interrupt, mip bypassed.
2849 * 1 X mip[i] is source of interrupt and mvip[i] aliases
2850 * mip[i].
2851 *
2852 * So alias condition would be for bits:
2853 * ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (mideleg | ~mvien)) |
2854 * (!sstc & MIP_STIP)
2855 *
2856 * Non-alias condition will be for bits:
2857 * (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (~mideleg & mvien)
2858 *
2859 * alias_mask denotes the bits that come from mip nalias_mask denotes bits
2860 * that come from hvip.
2861 */
2862 uint64_t alias_mask = ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
2863 (env->mideleg | ~env->mvien)) | MIP_STIP;
2864 uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
2865 (~env->mideleg & env->mvien);
2866 uint64_t wr_mask_mvip;
2867 uint64_t wr_mask_mip;
2868
2869 /*
2870 * mideleg[i] mvien[i]
2871 * 0 0 sip[i] read-only zero.
2872 * 0 1 sip[i] alias of mvip[i].
2873 * 1 X sip[i] alias of mip[i].
2874 *
2875 * Both alias and non-alias mask remain same for sip except for bits
2876 * which are zero in both mideleg and mvien.
2877 */
2878 if (csrno == CSR_SIP) {
2879 /* Remove bits that are zero in both mideleg and mvien. */
2880 alias_mask &= (env->mideleg | env->mvien);
2881 nalias_mask &= (env->mideleg | env->mvien);
2882 }
2883
2884 /*
2885 * If sstc is present, mvip.STIP is not an alias of mip.STIP so clear
2886 * that our in mip returned value.
2887 */
2888 if (cpu->cfg.ext_sstc && (env->priv == PRV_M) &&
2889 get_field(env->menvcfg, MENVCFG_STCE)) {
2890 alias_mask &= ~MIP_STIP;
2891 }
2892
2893 wr_mask_mip = wr_mask & alias_mask & mvip_writable_mask;
2894 wr_mask_mvip = wr_mask & nalias_mask & mvip_writable_mask;
2895
2896 /*
2897 * For bits set in alias_mask, mvip needs to be alias of mip, so forward
2898 * this to rmw_mip.
2899 */
2900 ret = rmw_mip(env, CSR_MIP, &ret_mip, new_val, wr_mask_mip);
2901 if (ret != RISCV_EXCP_NONE) {
2902 return ret;
2903 }
2904
2905 old_mvip = env->mvip;
2906
2907 /*
2908 * Write to mvip. Update only non-alias bits. Alias bits were updated
2909 * in mip in rmw_mip above.
2910 */
2911 if (wr_mask_mvip) {
2912 env->mvip = (env->mvip & ~wr_mask_mvip) | (new_val & wr_mask_mvip);
2913
2914 /*
2915 * Given mvip is separate source from mip, we need to trigger interrupt
2916 * from here separately. Normally this happen from riscv_cpu_update_mip.
2917 */
2918 riscv_cpu_interrupt(env);
2919 }
2920
2921 if (ret_val) {
2922 ret_mip &= alias_mask;
2923 old_mvip &= nalias_mask;
2924
2925 *ret_val = old_mvip | ret_mip;
2926 }
2927
2928 return RISCV_EXCP_NONE;
2929 }
2930
rmw_mvip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2931 static RISCVException rmw_mvip(CPURISCVState *env, int csrno,
2932 target_ulong *ret_val,
2933 target_ulong new_val, target_ulong wr_mask)
2934 {
2935 uint64_t rval;
2936 RISCVException ret;
2937
2938 ret = rmw_mvip64(env, csrno, &rval, new_val, wr_mask);
2939 if (ret_val) {
2940 *ret_val = rval;
2941 }
2942
2943 return ret;
2944 }
2945
rmw_mviph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2946 static RISCVException rmw_mviph(CPURISCVState *env, int csrno,
2947 target_ulong *ret_val,
2948 target_ulong new_val, target_ulong wr_mask)
2949 {
2950 uint64_t rval;
2951 RISCVException ret;
2952
2953 ret = rmw_mvip64(env, csrno, &rval,
2954 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2955 if (ret_val) {
2956 *ret_val = rval >> 32;
2957 }
2958
2959 return ret;
2960 }
2961
2962 /* Supervisor Trap Setup */
read_sstatus_i128(CPURISCVState * env,int csrno,Int128 * val)2963 static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
2964 Int128 *val)
2965 {
2966 uint64_t mask = sstatus_v1_10_mask;
2967 uint64_t sstatus = env->mstatus & mask;
2968 if (env->xl != MXL_RV32 || env->debugger) {
2969 mask |= SSTATUS64_UXL;
2970 }
2971
2972 if (env_archcpu(env)->cfg.ext_zicfilp) {
2973 mask |= SSTATUS_SPELP;
2974 }
2975
2976 *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
2977 return RISCV_EXCP_NONE;
2978 }
2979
read_sstatus(CPURISCVState * env,int csrno,target_ulong * val)2980 static RISCVException read_sstatus(CPURISCVState *env, int csrno,
2981 target_ulong *val)
2982 {
2983 target_ulong mask = (sstatus_v1_10_mask);
2984 if (env->xl != MXL_RV32 || env->debugger) {
2985 mask |= SSTATUS64_UXL;
2986 }
2987
2988 if (env_archcpu(env)->cfg.ext_zicfilp) {
2989 mask |= SSTATUS_SPELP;
2990 }
2991
2992 /* TODO: Use SXL not MXL. */
2993 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
2994 return RISCV_EXCP_NONE;
2995 }
2996
write_sstatus(CPURISCVState * env,int csrno,target_ulong val)2997 static RISCVException write_sstatus(CPURISCVState *env, int csrno,
2998 target_ulong val)
2999 {
3000 target_ulong mask = (sstatus_v1_10_mask);
3001
3002 if (env->xl != MXL_RV32 || env->debugger) {
3003 if ((val & SSTATUS64_UXL) != 0) {
3004 mask |= SSTATUS64_UXL;
3005 }
3006 }
3007
3008 if (env_archcpu(env)->cfg.ext_zicfilp) {
3009 mask |= SSTATUS_SPELP;
3010 }
3011
3012 target_ulong newval = (env->mstatus & ~mask) | (val & mask);
3013 return write_mstatus(env, CSR_MSTATUS, newval);
3014 }
3015
rmw_vsie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3016 static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
3017 uint64_t *ret_val,
3018 uint64_t new_val, uint64_t wr_mask)
3019 {
3020 uint64_t alias_mask = (LOCAL_INTERRUPTS | VS_MODE_INTERRUPTS) &
3021 env->hideleg;
3022 uint64_t nalias_mask = LOCAL_INTERRUPTS & (~env->hideleg & env->hvien);
3023 uint64_t rval, rval_vs, vsbits;
3024 uint64_t wr_mask_vsie;
3025 uint64_t wr_mask_mie;
3026 RISCVException ret;
3027
3028 /* Bring VS-level bits to correct position */
3029 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
3030 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
3031 new_val |= vsbits << 1;
3032
3033 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
3034 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
3035 wr_mask |= vsbits << 1;
3036
3037 wr_mask_mie = wr_mask & alias_mask;
3038 wr_mask_vsie = wr_mask & nalias_mask;
3039
3040 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask_mie);
3041
3042 rval_vs = env->vsie & nalias_mask;
3043 env->vsie = (env->vsie & ~wr_mask_vsie) | (new_val & wr_mask_vsie);
3044
3045 if (ret_val) {
3046 rval &= alias_mask;
3047 vsbits = rval & VS_MODE_INTERRUPTS;
3048 rval &= ~VS_MODE_INTERRUPTS;
3049 *ret_val = rval | (vsbits >> 1) | rval_vs;
3050 }
3051
3052 return ret;
3053 }
3054
rmw_vsie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3055 static RISCVException rmw_vsie(CPURISCVState *env, int csrno,
3056 target_ulong *ret_val,
3057 target_ulong new_val, target_ulong wr_mask)
3058 {
3059 uint64_t rval;
3060 RISCVException ret;
3061
3062 ret = rmw_vsie64(env, csrno, &rval, new_val, wr_mask);
3063 if (ret_val) {
3064 *ret_val = rval;
3065 }
3066
3067 return ret;
3068 }
3069
rmw_vsieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3070 static RISCVException rmw_vsieh(CPURISCVState *env, int csrno,
3071 target_ulong *ret_val,
3072 target_ulong new_val, target_ulong wr_mask)
3073 {
3074 uint64_t rval;
3075 RISCVException ret;
3076
3077 ret = rmw_vsie64(env, csrno, &rval,
3078 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3079 if (ret_val) {
3080 *ret_val = rval >> 32;
3081 }
3082
3083 return ret;
3084 }
3085
rmw_sie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3086 static RISCVException rmw_sie64(CPURISCVState *env, int csrno,
3087 uint64_t *ret_val,
3088 uint64_t new_val, uint64_t wr_mask)
3089 {
3090 uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
3091 (~env->mideleg & env->mvien);
3092 uint64_t alias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & env->mideleg;
3093 uint64_t sie_mask = wr_mask & nalias_mask;
3094 RISCVException ret;
3095
3096 /*
3097 * mideleg[i] mvien[i]
3098 * 0 0 sie[i] read-only zero.
3099 * 0 1 sie[i] is a separate writable bit.
3100 * 1 X sie[i] alias of mie[i].
3101 *
3102 * Both alias and non-alias mask remain same for sip except for bits
3103 * which are zero in both mideleg and mvien.
3104 */
3105 if (env->virt_enabled) {
3106 if (env->hvictl & HVICTL_VTI) {
3107 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
3108 }
3109 ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask);
3110 if (ret_val) {
3111 *ret_val &= alias_mask;
3112 }
3113 } else {
3114 ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & alias_mask);
3115 if (ret_val) {
3116 *ret_val &= alias_mask;
3117 *ret_val |= env->sie & nalias_mask;
3118 }
3119
3120 env->sie = (env->sie & ~sie_mask) | (new_val & sie_mask);
3121 }
3122
3123 return ret;
3124 }
3125
rmw_sie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3126 static RISCVException rmw_sie(CPURISCVState *env, int csrno,
3127 target_ulong *ret_val,
3128 target_ulong new_val, target_ulong wr_mask)
3129 {
3130 uint64_t rval;
3131 RISCVException ret;
3132
3133 ret = rmw_sie64(env, csrno, &rval, new_val, wr_mask);
3134 if (ret == RISCV_EXCP_NONE && ret_val) {
3135 *ret_val = rval;
3136 }
3137
3138 return ret;
3139 }
3140
rmw_sieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3141 static RISCVException rmw_sieh(CPURISCVState *env, int csrno,
3142 target_ulong *ret_val,
3143 target_ulong new_val, target_ulong wr_mask)
3144 {
3145 uint64_t rval;
3146 RISCVException ret;
3147
3148 ret = rmw_sie64(env, csrno, &rval,
3149 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3150 if (ret_val) {
3151 *ret_val = rval >> 32;
3152 }
3153
3154 return ret;
3155 }
3156
read_stvec(CPURISCVState * env,int csrno,target_ulong * val)3157 static RISCVException read_stvec(CPURISCVState *env, int csrno,
3158 target_ulong *val)
3159 {
3160 *val = env->stvec;
3161 return RISCV_EXCP_NONE;
3162 }
3163
write_stvec(CPURISCVState * env,int csrno,target_ulong val)3164 static RISCVException write_stvec(CPURISCVState *env, int csrno,
3165 target_ulong val)
3166 {
3167 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
3168 if ((val & 3) < 2) {
3169 env->stvec = val;
3170 } else {
3171 qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n");
3172 }
3173 return RISCV_EXCP_NONE;
3174 }
3175
read_scounteren(CPURISCVState * env,int csrno,target_ulong * val)3176 static RISCVException read_scounteren(CPURISCVState *env, int csrno,
3177 target_ulong *val)
3178 {
3179 *val = env->scounteren;
3180 return RISCV_EXCP_NONE;
3181 }
3182
write_scounteren(CPURISCVState * env,int csrno,target_ulong val)3183 static RISCVException write_scounteren(CPURISCVState *env, int csrno,
3184 target_ulong val)
3185 {
3186 RISCVCPU *cpu = env_archcpu(env);
3187
3188 /* WARL register - disable unavailable counters */
3189 env->scounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
3190 COUNTEREN_IR);
3191 return RISCV_EXCP_NONE;
3192 }
3193
3194 /* Supervisor Trap Handling */
read_sscratch_i128(CPURISCVState * env,int csrno,Int128 * val)3195 static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno,
3196 Int128 *val)
3197 {
3198 *val = int128_make128(env->sscratch, env->sscratchh);
3199 return RISCV_EXCP_NONE;
3200 }
3201
write_sscratch_i128(CPURISCVState * env,int csrno,Int128 val)3202 static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno,
3203 Int128 val)
3204 {
3205 env->sscratch = int128_getlo(val);
3206 env->sscratchh = int128_gethi(val);
3207 return RISCV_EXCP_NONE;
3208 }
3209
read_sscratch(CPURISCVState * env,int csrno,target_ulong * val)3210 static RISCVException read_sscratch(CPURISCVState *env, int csrno,
3211 target_ulong *val)
3212 {
3213 *val = env->sscratch;
3214 return RISCV_EXCP_NONE;
3215 }
3216
write_sscratch(CPURISCVState * env,int csrno,target_ulong val)3217 static RISCVException write_sscratch(CPURISCVState *env, int csrno,
3218 target_ulong val)
3219 {
3220 env->sscratch = val;
3221 return RISCV_EXCP_NONE;
3222 }
3223
read_sepc(CPURISCVState * env,int csrno,target_ulong * val)3224 static RISCVException read_sepc(CPURISCVState *env, int csrno,
3225 target_ulong *val)
3226 {
3227 *val = env->sepc;
3228 return RISCV_EXCP_NONE;
3229 }
3230
write_sepc(CPURISCVState * env,int csrno,target_ulong val)3231 static RISCVException write_sepc(CPURISCVState *env, int csrno,
3232 target_ulong val)
3233 {
3234 env->sepc = val;
3235 return RISCV_EXCP_NONE;
3236 }
3237
read_scause(CPURISCVState * env,int csrno,target_ulong * val)3238 static RISCVException read_scause(CPURISCVState *env, int csrno,
3239 target_ulong *val)
3240 {
3241 *val = env->scause;
3242 return RISCV_EXCP_NONE;
3243 }
3244
write_scause(CPURISCVState * env,int csrno,target_ulong val)3245 static RISCVException write_scause(CPURISCVState *env, int csrno,
3246 target_ulong val)
3247 {
3248 env->scause = val;
3249 return RISCV_EXCP_NONE;
3250 }
3251
read_stval(CPURISCVState * env,int csrno,target_ulong * val)3252 static RISCVException read_stval(CPURISCVState *env, int csrno,
3253 target_ulong *val)
3254 {
3255 *val = env->stval;
3256 return RISCV_EXCP_NONE;
3257 }
3258
write_stval(CPURISCVState * env,int csrno,target_ulong val)3259 static RISCVException write_stval(CPURISCVState *env, int csrno,
3260 target_ulong val)
3261 {
3262 env->stval = val;
3263 return RISCV_EXCP_NONE;
3264 }
3265
3266 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
3267 uint64_t *ret_val,
3268 uint64_t new_val, uint64_t wr_mask);
3269
rmw_vsip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3270 static RISCVException rmw_vsip64(CPURISCVState *env, int csrno,
3271 uint64_t *ret_val,
3272 uint64_t new_val, uint64_t wr_mask)
3273 {
3274 RISCVException ret;
3275 uint64_t rval, mask = env->hideleg & VS_MODE_INTERRUPTS;
3276 uint64_t vsbits;
3277
3278 /* Add virtualized bits into vsip mask. */
3279 mask |= env->hvien & ~env->hideleg;
3280
3281 /* Bring VS-level bits to correct position */
3282 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
3283 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
3284 new_val |= vsbits << 1;
3285 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
3286 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
3287 wr_mask |= vsbits << 1;
3288
3289 ret = rmw_hvip64(env, csrno, &rval, new_val,
3290 wr_mask & mask & vsip_writable_mask);
3291 if (ret_val) {
3292 rval &= mask;
3293 vsbits = rval & VS_MODE_INTERRUPTS;
3294 rval &= ~VS_MODE_INTERRUPTS;
3295 *ret_val = rval | (vsbits >> 1);
3296 }
3297
3298 return ret;
3299 }
3300
rmw_vsip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3301 static RISCVException rmw_vsip(CPURISCVState *env, int csrno,
3302 target_ulong *ret_val,
3303 target_ulong new_val, target_ulong wr_mask)
3304 {
3305 uint64_t rval;
3306 RISCVException ret;
3307
3308 ret = rmw_vsip64(env, csrno, &rval, new_val, wr_mask);
3309 if (ret_val) {
3310 *ret_val = rval;
3311 }
3312
3313 return ret;
3314 }
3315
rmw_vsiph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3316 static RISCVException rmw_vsiph(CPURISCVState *env, int csrno,
3317 target_ulong *ret_val,
3318 target_ulong new_val, target_ulong wr_mask)
3319 {
3320 uint64_t rval;
3321 RISCVException ret;
3322
3323 ret = rmw_vsip64(env, csrno, &rval,
3324 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3325 if (ret_val) {
3326 *ret_val = rval >> 32;
3327 }
3328
3329 return ret;
3330 }
3331
rmw_sip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3332 static RISCVException rmw_sip64(CPURISCVState *env, int csrno,
3333 uint64_t *ret_val,
3334 uint64_t new_val, uint64_t wr_mask)
3335 {
3336 RISCVException ret;
3337 uint64_t mask = (env->mideleg | env->mvien) & sip_writable_mask;
3338
3339 if (env->virt_enabled) {
3340 if (env->hvictl & HVICTL_VTI) {
3341 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
3342 }
3343 ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask);
3344 } else {
3345 ret = rmw_mvip64(env, csrno, ret_val, new_val, wr_mask & mask);
3346 }
3347
3348 if (ret_val) {
3349 *ret_val &= (env->mideleg | env->mvien) &
3350 (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS);
3351 }
3352
3353 return ret;
3354 }
3355
rmw_sip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3356 static RISCVException rmw_sip(CPURISCVState *env, int csrno,
3357 target_ulong *ret_val,
3358 target_ulong new_val, target_ulong wr_mask)
3359 {
3360 uint64_t rval;
3361 RISCVException ret;
3362
3363 ret = rmw_sip64(env, csrno, &rval, new_val, wr_mask);
3364 if (ret_val) {
3365 *ret_val = rval;
3366 }
3367
3368 return ret;
3369 }
3370
rmw_siph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3371 static RISCVException rmw_siph(CPURISCVState *env, int csrno,
3372 target_ulong *ret_val,
3373 target_ulong new_val, target_ulong wr_mask)
3374 {
3375 uint64_t rval;
3376 RISCVException ret;
3377
3378 ret = rmw_sip64(env, csrno, &rval,
3379 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3380 if (ret_val) {
3381 *ret_val = rval >> 32;
3382 }
3383
3384 return ret;
3385 }
3386
3387 /* Supervisor Protection and Translation */
read_satp(CPURISCVState * env,int csrno,target_ulong * val)3388 static RISCVException read_satp(CPURISCVState *env, int csrno,
3389 target_ulong *val)
3390 {
3391 if (!riscv_cpu_cfg(env)->mmu) {
3392 *val = 0;
3393 return RISCV_EXCP_NONE;
3394 }
3395 *val = env->satp;
3396 return RISCV_EXCP_NONE;
3397 }
3398
write_satp(CPURISCVState * env,int csrno,target_ulong val)3399 static RISCVException write_satp(CPURISCVState *env, int csrno,
3400 target_ulong val)
3401 {
3402 if (!riscv_cpu_cfg(env)->mmu) {
3403 return RISCV_EXCP_NONE;
3404 }
3405
3406 env->satp = legalize_xatp(env, env->satp, val);
3407 return RISCV_EXCP_NONE;
3408 }
3409
read_vstopi(CPURISCVState * env,int csrno,target_ulong * val)3410 static RISCVException read_vstopi(CPURISCVState *env, int csrno,
3411 target_ulong *val)
3412 {
3413 int irq, ret;
3414 target_ulong topei;
3415 uint64_t vseip, vsgein;
3416 uint32_t iid, iprio, hviid, hviprio, gein;
3417 uint32_t s, scount = 0, siid[VSTOPI_NUM_SRCS], siprio[VSTOPI_NUM_SRCS];
3418
3419 gein = get_field(env->hstatus, HSTATUS_VGEIN);
3420 hviid = get_field(env->hvictl, HVICTL_IID);
3421 hviprio = get_field(env->hvictl, HVICTL_IPRIO);
3422
3423 if (gein) {
3424 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
3425 vseip = env->mie & (env->mip | vsgein) & MIP_VSEIP;
3426 if (gein <= env->geilen && vseip) {
3427 siid[scount] = IRQ_S_EXT;
3428 siprio[scount] = IPRIO_MMAXIPRIO + 1;
3429 if (env->aia_ireg_rmw_fn[PRV_S]) {
3430 /*
3431 * Call machine specific IMSIC register emulation for
3432 * reading TOPEI.
3433 */
3434 ret = env->aia_ireg_rmw_fn[PRV_S](
3435 env->aia_ireg_rmw_fn_arg[PRV_S],
3436 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, PRV_S, true, gein,
3437 riscv_cpu_mxl_bits(env)),
3438 &topei, 0, 0);
3439 if (!ret && topei) {
3440 siprio[scount] = topei & IMSIC_TOPEI_IPRIO_MASK;
3441 }
3442 }
3443 scount++;
3444 }
3445 } else {
3446 if (hviid == IRQ_S_EXT && hviprio) {
3447 siid[scount] = IRQ_S_EXT;
3448 siprio[scount] = hviprio;
3449 scount++;
3450 }
3451 }
3452
3453 if (env->hvictl & HVICTL_VTI) {
3454 if (hviid != IRQ_S_EXT) {
3455 siid[scount] = hviid;
3456 siprio[scount] = hviprio;
3457 scount++;
3458 }
3459 } else {
3460 irq = riscv_cpu_vsirq_pending(env);
3461 if (irq != IRQ_S_EXT && 0 < irq && irq <= 63) {
3462 siid[scount] = irq;
3463 siprio[scount] = env->hviprio[irq];
3464 scount++;
3465 }
3466 }
3467
3468 iid = 0;
3469 iprio = UINT_MAX;
3470 for (s = 0; s < scount; s++) {
3471 if (siprio[s] < iprio) {
3472 iid = siid[s];
3473 iprio = siprio[s];
3474 }
3475 }
3476
3477 if (iid) {
3478 if (env->hvictl & HVICTL_IPRIOM) {
3479 if (iprio > IPRIO_MMAXIPRIO) {
3480 iprio = IPRIO_MMAXIPRIO;
3481 }
3482 if (!iprio) {
3483 if (riscv_cpu_default_priority(iid) > IPRIO_DEFAULT_S) {
3484 iprio = IPRIO_MMAXIPRIO;
3485 }
3486 }
3487 } else {
3488 iprio = 1;
3489 }
3490 } else {
3491 iprio = 0;
3492 }
3493
3494 *val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT;
3495 *val |= iprio;
3496
3497 return RISCV_EXCP_NONE;
3498 }
3499
read_stopi(CPURISCVState * env,int csrno,target_ulong * val)3500 static RISCVException read_stopi(CPURISCVState *env, int csrno,
3501 target_ulong *val)
3502 {
3503 int irq;
3504 uint8_t iprio;
3505
3506 if (env->virt_enabled) {
3507 return read_vstopi(env, CSR_VSTOPI, val);
3508 }
3509
3510 irq = riscv_cpu_sirq_pending(env);
3511 if (irq <= 0 || irq > 63) {
3512 *val = 0;
3513 } else {
3514 iprio = env->siprio[irq];
3515 if (!iprio) {
3516 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_S) {
3517 iprio = IPRIO_MMAXIPRIO;
3518 }
3519 }
3520 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
3521 *val |= iprio;
3522 }
3523
3524 return RISCV_EXCP_NONE;
3525 }
3526
3527 /* Hypervisor Extensions */
read_hstatus(CPURISCVState * env,int csrno,target_ulong * val)3528 static RISCVException read_hstatus(CPURISCVState *env, int csrno,
3529 target_ulong *val)
3530 {
3531 *val = env->hstatus;
3532 if (riscv_cpu_mxl(env) != MXL_RV32) {
3533 /* We only support 64-bit VSXL */
3534 *val = set_field(*val, HSTATUS_VSXL, 2);
3535 }
3536 /* We only support little endian */
3537 *val = set_field(*val, HSTATUS_VSBE, 0);
3538 return RISCV_EXCP_NONE;
3539 }
3540
write_hstatus(CPURISCVState * env,int csrno,target_ulong val)3541 static RISCVException write_hstatus(CPURISCVState *env, int csrno,
3542 target_ulong val)
3543 {
3544 env->hstatus = val;
3545 if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) {
3546 qemu_log_mask(LOG_UNIMP,
3547 "QEMU does not support mixed HSXLEN options.");
3548 }
3549 if (get_field(val, HSTATUS_VSBE) != 0) {
3550 qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests.");
3551 }
3552 return RISCV_EXCP_NONE;
3553 }
3554
read_hedeleg(CPURISCVState * env,int csrno,target_ulong * val)3555 static RISCVException read_hedeleg(CPURISCVState *env, int csrno,
3556 target_ulong *val)
3557 {
3558 *val = env->hedeleg;
3559 return RISCV_EXCP_NONE;
3560 }
3561
write_hedeleg(CPURISCVState * env,int csrno,target_ulong val)3562 static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
3563 target_ulong val)
3564 {
3565 env->hedeleg = val & vs_delegable_excps;
3566 return RISCV_EXCP_NONE;
3567 }
3568
read_hedelegh(CPURISCVState * env,int csrno,target_ulong * val)3569 static RISCVException read_hedelegh(CPURISCVState *env, int csrno,
3570 target_ulong *val)
3571 {
3572 RISCVException ret;
3573 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13);
3574 if (ret != RISCV_EXCP_NONE) {
3575 return ret;
3576 }
3577
3578 /* Reserved, now read zero */
3579 *val = 0;
3580 return RISCV_EXCP_NONE;
3581 }
3582
write_hedelegh(CPURISCVState * env,int csrno,target_ulong val)3583 static RISCVException write_hedelegh(CPURISCVState *env, int csrno,
3584 target_ulong val)
3585 {
3586 RISCVException ret;
3587 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13);
3588 if (ret != RISCV_EXCP_NONE) {
3589 return ret;
3590 }
3591
3592 /* Reserved, now write ignore */
3593 return RISCV_EXCP_NONE;
3594 }
3595
rmw_hvien64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3596 static RISCVException rmw_hvien64(CPURISCVState *env, int csrno,
3597 uint64_t *ret_val,
3598 uint64_t new_val, uint64_t wr_mask)
3599 {
3600 uint64_t mask = wr_mask & hvien_writable_mask;
3601
3602 if (ret_val) {
3603 *ret_val = env->hvien;
3604 }
3605
3606 env->hvien = (env->hvien & ~mask) | (new_val & mask);
3607
3608 return RISCV_EXCP_NONE;
3609 }
3610
rmw_hvien(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3611 static RISCVException rmw_hvien(CPURISCVState *env, int csrno,
3612 target_ulong *ret_val,
3613 target_ulong new_val, target_ulong wr_mask)
3614 {
3615 uint64_t rval;
3616 RISCVException ret;
3617
3618 ret = rmw_hvien64(env, csrno, &rval, new_val, wr_mask);
3619 if (ret_val) {
3620 *ret_val = rval;
3621 }
3622
3623 return ret;
3624 }
3625
rmw_hvienh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3626 static RISCVException rmw_hvienh(CPURISCVState *env, int csrno,
3627 target_ulong *ret_val,
3628 target_ulong new_val, target_ulong wr_mask)
3629 {
3630 uint64_t rval;
3631 RISCVException ret;
3632
3633 ret = rmw_hvien64(env, csrno, &rval,
3634 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3635 if (ret_val) {
3636 *ret_val = rval >> 32;
3637 }
3638
3639 return ret;
3640 }
3641
rmw_hideleg64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3642 static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno,
3643 uint64_t *ret_val,
3644 uint64_t new_val, uint64_t wr_mask)
3645 {
3646 uint64_t mask = wr_mask & vs_delegable_ints;
3647
3648 if (ret_val) {
3649 *ret_val = env->hideleg & vs_delegable_ints;
3650 }
3651
3652 env->hideleg = (env->hideleg & ~mask) | (new_val & mask);
3653 return RISCV_EXCP_NONE;
3654 }
3655
rmw_hideleg(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3656 static RISCVException rmw_hideleg(CPURISCVState *env, int csrno,
3657 target_ulong *ret_val,
3658 target_ulong new_val, target_ulong wr_mask)
3659 {
3660 uint64_t rval;
3661 RISCVException ret;
3662
3663 ret = rmw_hideleg64(env, csrno, &rval, new_val, wr_mask);
3664 if (ret_val) {
3665 *ret_val = rval;
3666 }
3667
3668 return ret;
3669 }
3670
rmw_hidelegh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3671 static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno,
3672 target_ulong *ret_val,
3673 target_ulong new_val, target_ulong wr_mask)
3674 {
3675 uint64_t rval;
3676 RISCVException ret;
3677
3678 ret = rmw_hideleg64(env, csrno, &rval,
3679 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3680 if (ret_val) {
3681 *ret_val = rval >> 32;
3682 }
3683
3684 return ret;
3685 }
3686
3687 /*
3688 * The function is written for two use-cases:
3689 * 1- To access hvip csr as is for HS-mode access.
3690 * 2- To access vsip as a combination of hvip, and mip for vs-mode.
3691 *
3692 * Both report bits 2, 6, 10 and 13:63.
3693 * vsip needs to be read-only zero when both hideleg[i] and
3694 * hvien[i] are zero.
3695 */
rmw_hvip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3696 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
3697 uint64_t *ret_val,
3698 uint64_t new_val, uint64_t wr_mask)
3699 {
3700 RISCVException ret;
3701 uint64_t old_hvip;
3702 uint64_t ret_mip;
3703
3704 /*
3705 * For bits 10, 6 and 2, vsip[i] is an alias of hip[i]. These bits are
3706 * present in hip, hvip and mip. Where mip[i] is alias of hip[i] and hvip[i]
3707 * is OR'ed in hip[i] to inject virtual interrupts from hypervisor. These
3708 * bits are actually being maintained in mip so we read them from there.
3709 * This way we have a single source of truth and allows for easier
3710 * implementation.
3711 *
3712 * For bits 13:63 we have:
3713 *
3714 * hideleg[i] hvien[i]
3715 * 0 0 No delegation. vsip[i] readonly zero.
3716 * 0 1 vsip[i] is alias of hvip[i], sip bypassed.
3717 * 1 X vsip[i] is alias of sip[i], hvip bypassed.
3718 *
3719 * alias_mask denotes the bits that come from sip (mip here given we
3720 * maintain all bits there). nalias_mask denotes bits that come from
3721 * hvip.
3722 */
3723 uint64_t alias_mask = (env->hideleg | ~env->hvien) | VS_MODE_INTERRUPTS;
3724 uint64_t nalias_mask = (~env->hideleg & env->hvien);
3725 uint64_t wr_mask_hvip;
3726 uint64_t wr_mask_mip;
3727
3728 /*
3729 * Both alias and non-alias mask remain same for vsip except:
3730 * 1- For VS* bits if they are zero in hideleg.
3731 * 2- For 13:63 bits if they are zero in both hideleg and hvien.
3732 */
3733 if (csrno == CSR_VSIP) {
3734 /* zero-out VS* bits that are not delegated to VS mode. */
3735 alias_mask &= (env->hideleg | ~VS_MODE_INTERRUPTS);
3736
3737 /*
3738 * zero-out 13:63 bits that are zero in both hideleg and hvien.
3739 * nalias_mask mask can not contain any VS* bits so only second
3740 * condition applies on it.
3741 */
3742 nalias_mask &= (env->hideleg | env->hvien);
3743 alias_mask &= (env->hideleg | env->hvien);
3744 }
3745
3746 wr_mask_hvip = wr_mask & nalias_mask & hvip_writable_mask;
3747 wr_mask_mip = wr_mask & alias_mask & hvip_writable_mask;
3748
3749 /* Aliased bits, bits 10, 6, 2 need to come from mip. */
3750 ret = rmw_mip64(env, csrno, &ret_mip, new_val, wr_mask_mip);
3751 if (ret != RISCV_EXCP_NONE) {
3752 return ret;
3753 }
3754
3755 old_hvip = env->hvip;
3756
3757 if (wr_mask_hvip) {
3758 env->hvip = (env->hvip & ~wr_mask_hvip) | (new_val & wr_mask_hvip);
3759
3760 /*
3761 * Given hvip is separate source from mip, we need to trigger interrupt
3762 * from here separately. Normally this happen from riscv_cpu_update_mip.
3763 */
3764 riscv_cpu_interrupt(env);
3765 }
3766
3767 if (ret_val) {
3768 /* Only take VS* bits from mip. */
3769 ret_mip &= alias_mask;
3770
3771 /* Take in non-delegated 13:63 bits from hvip. */
3772 old_hvip &= nalias_mask;
3773
3774 *ret_val = ret_mip | old_hvip;
3775 }
3776
3777 return ret;
3778 }
3779
rmw_hvip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3780 static RISCVException rmw_hvip(CPURISCVState *env, int csrno,
3781 target_ulong *ret_val,
3782 target_ulong new_val, target_ulong wr_mask)
3783 {
3784 uint64_t rval;
3785 RISCVException ret;
3786
3787 ret = rmw_hvip64(env, csrno, &rval, new_val, wr_mask);
3788 if (ret_val) {
3789 *ret_val = rval;
3790 }
3791
3792 return ret;
3793 }
3794
rmw_hviph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3795 static RISCVException rmw_hviph(CPURISCVState *env, int csrno,
3796 target_ulong *ret_val,
3797 target_ulong new_val, target_ulong wr_mask)
3798 {
3799 uint64_t rval;
3800 RISCVException ret;
3801
3802 ret = rmw_hvip64(env, csrno, &rval,
3803 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3804 if (ret_val) {
3805 *ret_val = rval >> 32;
3806 }
3807
3808 return ret;
3809 }
3810
rmw_hip(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)3811 static RISCVException rmw_hip(CPURISCVState *env, int csrno,
3812 target_ulong *ret_value,
3813 target_ulong new_value, target_ulong write_mask)
3814 {
3815 int ret = rmw_mip(env, csrno, ret_value, new_value,
3816 write_mask & hip_writable_mask);
3817
3818 if (ret_value) {
3819 *ret_value &= HS_MODE_INTERRUPTS;
3820 }
3821 return ret;
3822 }
3823
rmw_hie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3824 static RISCVException rmw_hie(CPURISCVState *env, int csrno,
3825 target_ulong *ret_val,
3826 target_ulong new_val, target_ulong wr_mask)
3827 {
3828 uint64_t rval;
3829 RISCVException ret;
3830
3831 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & HS_MODE_INTERRUPTS);
3832 if (ret_val) {
3833 *ret_val = rval & HS_MODE_INTERRUPTS;
3834 }
3835
3836 return ret;
3837 }
3838
read_hcounteren(CPURISCVState * env,int csrno,target_ulong * val)3839 static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
3840 target_ulong *val)
3841 {
3842 *val = env->hcounteren;
3843 return RISCV_EXCP_NONE;
3844 }
3845
write_hcounteren(CPURISCVState * env,int csrno,target_ulong val)3846 static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
3847 target_ulong val)
3848 {
3849 RISCVCPU *cpu = env_archcpu(env);
3850
3851 /* WARL register - disable unavailable counters */
3852 env->hcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
3853 COUNTEREN_IR);
3854 return RISCV_EXCP_NONE;
3855 }
3856
read_hgeie(CPURISCVState * env,int csrno,target_ulong * val)3857 static RISCVException read_hgeie(CPURISCVState *env, int csrno,
3858 target_ulong *val)
3859 {
3860 if (val) {
3861 *val = env->hgeie;
3862 }
3863 return RISCV_EXCP_NONE;
3864 }
3865
write_hgeie(CPURISCVState * env,int csrno,target_ulong val)3866 static RISCVException write_hgeie(CPURISCVState *env, int csrno,
3867 target_ulong val)
3868 {
3869 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */
3870 val &= ((((target_ulong)1) << env->geilen) - 1) << 1;
3871 env->hgeie = val;
3872 /* Update mip.SGEIP bit */
3873 riscv_cpu_update_mip(env, MIP_SGEIP,
3874 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
3875 return RISCV_EXCP_NONE;
3876 }
3877
read_htval(CPURISCVState * env,int csrno,target_ulong * val)3878 static RISCVException read_htval(CPURISCVState *env, int csrno,
3879 target_ulong *val)
3880 {
3881 *val = env->htval;
3882 return RISCV_EXCP_NONE;
3883 }
3884
write_htval(CPURISCVState * env,int csrno,target_ulong val)3885 static RISCVException write_htval(CPURISCVState *env, int csrno,
3886 target_ulong val)
3887 {
3888 env->htval = val;
3889 return RISCV_EXCP_NONE;
3890 }
3891
read_htinst(CPURISCVState * env,int csrno,target_ulong * val)3892 static RISCVException read_htinst(CPURISCVState *env, int csrno,
3893 target_ulong *val)
3894 {
3895 *val = env->htinst;
3896 return RISCV_EXCP_NONE;
3897 }
3898
write_htinst(CPURISCVState * env,int csrno,target_ulong val)3899 static RISCVException write_htinst(CPURISCVState *env, int csrno,
3900 target_ulong val)
3901 {
3902 return RISCV_EXCP_NONE;
3903 }
3904
read_hgeip(CPURISCVState * env,int csrno,target_ulong * val)3905 static RISCVException read_hgeip(CPURISCVState *env, int csrno,
3906 target_ulong *val)
3907 {
3908 if (val) {
3909 *val = env->hgeip;
3910 }
3911 return RISCV_EXCP_NONE;
3912 }
3913
read_hgatp(CPURISCVState * env,int csrno,target_ulong * val)3914 static RISCVException read_hgatp(CPURISCVState *env, int csrno,
3915 target_ulong *val)
3916 {
3917 *val = env->hgatp;
3918 return RISCV_EXCP_NONE;
3919 }
3920
write_hgatp(CPURISCVState * env,int csrno,target_ulong val)3921 static RISCVException write_hgatp(CPURISCVState *env, int csrno,
3922 target_ulong val)
3923 {
3924 env->hgatp = legalize_xatp(env, env->hgatp, val);
3925 return RISCV_EXCP_NONE;
3926 }
3927
read_htimedelta(CPURISCVState * env,int csrno,target_ulong * val)3928 static RISCVException read_htimedelta(CPURISCVState *env, int csrno,
3929 target_ulong *val)
3930 {
3931 if (!env->rdtime_fn) {
3932 return RISCV_EXCP_ILLEGAL_INST;
3933 }
3934
3935 *val = env->htimedelta;
3936 return RISCV_EXCP_NONE;
3937 }
3938
write_htimedelta(CPURISCVState * env,int csrno,target_ulong val)3939 static RISCVException write_htimedelta(CPURISCVState *env, int csrno,
3940 target_ulong val)
3941 {
3942 if (!env->rdtime_fn) {
3943 return RISCV_EXCP_ILLEGAL_INST;
3944 }
3945
3946 if (riscv_cpu_mxl(env) == MXL_RV32) {
3947 env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val);
3948 } else {
3949 env->htimedelta = val;
3950 }
3951
3952 if (riscv_cpu_cfg(env)->ext_sstc && env->rdtime_fn) {
3953 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
3954 env->htimedelta, MIP_VSTIP);
3955 }
3956
3957 return RISCV_EXCP_NONE;
3958 }
3959
read_htimedeltah(CPURISCVState * env,int csrno,target_ulong * val)3960 static RISCVException read_htimedeltah(CPURISCVState *env, int csrno,
3961 target_ulong *val)
3962 {
3963 if (!env->rdtime_fn) {
3964 return RISCV_EXCP_ILLEGAL_INST;
3965 }
3966
3967 *val = env->htimedelta >> 32;
3968 return RISCV_EXCP_NONE;
3969 }
3970
write_htimedeltah(CPURISCVState * env,int csrno,target_ulong val)3971 static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
3972 target_ulong val)
3973 {
3974 if (!env->rdtime_fn) {
3975 return RISCV_EXCP_ILLEGAL_INST;
3976 }
3977
3978 env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val);
3979
3980 if (riscv_cpu_cfg(env)->ext_sstc && env->rdtime_fn) {
3981 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
3982 env->htimedelta, MIP_VSTIP);
3983 }
3984
3985 return RISCV_EXCP_NONE;
3986 }
3987
read_hvictl(CPURISCVState * env,int csrno,target_ulong * val)3988 static RISCVException read_hvictl(CPURISCVState *env, int csrno,
3989 target_ulong *val)
3990 {
3991 *val = env->hvictl;
3992 return RISCV_EXCP_NONE;
3993 }
3994
write_hvictl(CPURISCVState * env,int csrno,target_ulong val)3995 static RISCVException write_hvictl(CPURISCVState *env, int csrno,
3996 target_ulong val)
3997 {
3998 env->hvictl = val & HVICTL_VALID_MASK;
3999 return RISCV_EXCP_NONE;
4000 }
4001
read_hvipriox(CPURISCVState * env,int first_index,uint8_t * iprio,target_ulong * val)4002 static RISCVException read_hvipriox(CPURISCVState *env, int first_index,
4003 uint8_t *iprio, target_ulong *val)
4004 {
4005 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
4006
4007 /* First index has to be a multiple of number of irqs per register */
4008 if (first_index % num_irqs) {
4009 return (env->virt_enabled) ?
4010 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
4011 }
4012
4013 /* Fill-up return value */
4014 *val = 0;
4015 for (i = 0; i < num_irqs; i++) {
4016 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
4017 continue;
4018 }
4019 if (rdzero) {
4020 continue;
4021 }
4022 *val |= ((target_ulong)iprio[irq]) << (i * 8);
4023 }
4024
4025 return RISCV_EXCP_NONE;
4026 }
4027
write_hvipriox(CPURISCVState * env,int first_index,uint8_t * iprio,target_ulong val)4028 static RISCVException write_hvipriox(CPURISCVState *env, int first_index,
4029 uint8_t *iprio, target_ulong val)
4030 {
4031 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
4032
4033 /* First index has to be a multiple of number of irqs per register */
4034 if (first_index % num_irqs) {
4035 return (env->virt_enabled) ?
4036 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
4037 }
4038
4039 /* Fill-up priority array */
4040 for (i = 0; i < num_irqs; i++) {
4041 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
4042 continue;
4043 }
4044 if (rdzero) {
4045 iprio[irq] = 0;
4046 } else {
4047 iprio[irq] = (val >> (i * 8)) & 0xff;
4048 }
4049 }
4050
4051 return RISCV_EXCP_NONE;
4052 }
4053
read_hviprio1(CPURISCVState * env,int csrno,target_ulong * val)4054 static RISCVException read_hviprio1(CPURISCVState *env, int csrno,
4055 target_ulong *val)
4056 {
4057 return read_hvipriox(env, 0, env->hviprio, val);
4058 }
4059
write_hviprio1(CPURISCVState * env,int csrno,target_ulong val)4060 static RISCVException write_hviprio1(CPURISCVState *env, int csrno,
4061 target_ulong val)
4062 {
4063 return write_hvipriox(env, 0, env->hviprio, val);
4064 }
4065
read_hviprio1h(CPURISCVState * env,int csrno,target_ulong * val)4066 static RISCVException read_hviprio1h(CPURISCVState *env, int csrno,
4067 target_ulong *val)
4068 {
4069 return read_hvipriox(env, 4, env->hviprio, val);
4070 }
4071
write_hviprio1h(CPURISCVState * env,int csrno,target_ulong val)4072 static RISCVException write_hviprio1h(CPURISCVState *env, int csrno,
4073 target_ulong val)
4074 {
4075 return write_hvipriox(env, 4, env->hviprio, val);
4076 }
4077
read_hviprio2(CPURISCVState * env,int csrno,target_ulong * val)4078 static RISCVException read_hviprio2(CPURISCVState *env, int csrno,
4079 target_ulong *val)
4080 {
4081 return read_hvipriox(env, 8, env->hviprio, val);
4082 }
4083
write_hviprio2(CPURISCVState * env,int csrno,target_ulong val)4084 static RISCVException write_hviprio2(CPURISCVState *env, int csrno,
4085 target_ulong val)
4086 {
4087 return write_hvipriox(env, 8, env->hviprio, val);
4088 }
4089
read_hviprio2h(CPURISCVState * env,int csrno,target_ulong * val)4090 static RISCVException read_hviprio2h(CPURISCVState *env, int csrno,
4091 target_ulong *val)
4092 {
4093 return read_hvipriox(env, 12, env->hviprio, val);
4094 }
4095
write_hviprio2h(CPURISCVState * env,int csrno,target_ulong val)4096 static RISCVException write_hviprio2h(CPURISCVState *env, int csrno,
4097 target_ulong val)
4098 {
4099 return write_hvipriox(env, 12, env->hviprio, val);
4100 }
4101
4102 /* Virtual CSR Registers */
read_vsstatus(CPURISCVState * env,int csrno,target_ulong * val)4103 static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
4104 target_ulong *val)
4105 {
4106 *val = env->vsstatus;
4107 return RISCV_EXCP_NONE;
4108 }
4109
write_vsstatus(CPURISCVState * env,int csrno,target_ulong val)4110 static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
4111 target_ulong val)
4112 {
4113 uint64_t mask = (target_ulong)-1;
4114 if ((val & VSSTATUS64_UXL) == 0) {
4115 mask &= ~VSSTATUS64_UXL;
4116 }
4117 env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
4118 return RISCV_EXCP_NONE;
4119 }
4120
read_vstvec(CPURISCVState * env,int csrno,target_ulong * val)4121 static RISCVException read_vstvec(CPURISCVState *env, int csrno,
4122 target_ulong *val)
4123 {
4124 *val = env->vstvec;
4125 return RISCV_EXCP_NONE;
4126 }
4127
write_vstvec(CPURISCVState * env,int csrno,target_ulong val)4128 static RISCVException write_vstvec(CPURISCVState *env, int csrno,
4129 target_ulong val)
4130 {
4131 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
4132 if ((val & 3) < 2) {
4133 env->vstvec = val;
4134 } else {
4135 qemu_log_mask(LOG_UNIMP, "CSR_VSTVEC: reserved mode not supported\n");
4136 }
4137 return RISCV_EXCP_NONE;
4138 }
4139
read_vsscratch(CPURISCVState * env,int csrno,target_ulong * val)4140 static RISCVException read_vsscratch(CPURISCVState *env, int csrno,
4141 target_ulong *val)
4142 {
4143 *val = env->vsscratch;
4144 return RISCV_EXCP_NONE;
4145 }
4146
write_vsscratch(CPURISCVState * env,int csrno,target_ulong val)4147 static RISCVException write_vsscratch(CPURISCVState *env, int csrno,
4148 target_ulong val)
4149 {
4150 env->vsscratch = val;
4151 return RISCV_EXCP_NONE;
4152 }
4153
read_vsepc(CPURISCVState * env,int csrno,target_ulong * val)4154 static RISCVException read_vsepc(CPURISCVState *env, int csrno,
4155 target_ulong *val)
4156 {
4157 *val = env->vsepc;
4158 return RISCV_EXCP_NONE;
4159 }
4160
write_vsepc(CPURISCVState * env,int csrno,target_ulong val)4161 static RISCVException write_vsepc(CPURISCVState *env, int csrno,
4162 target_ulong val)
4163 {
4164 env->vsepc = val;
4165 return RISCV_EXCP_NONE;
4166 }
4167
read_vscause(CPURISCVState * env,int csrno,target_ulong * val)4168 static RISCVException read_vscause(CPURISCVState *env, int csrno,
4169 target_ulong *val)
4170 {
4171 *val = env->vscause;
4172 return RISCV_EXCP_NONE;
4173 }
4174
write_vscause(CPURISCVState * env,int csrno,target_ulong val)4175 static RISCVException write_vscause(CPURISCVState *env, int csrno,
4176 target_ulong val)
4177 {
4178 env->vscause = val;
4179 return RISCV_EXCP_NONE;
4180 }
4181
read_vstval(CPURISCVState * env,int csrno,target_ulong * val)4182 static RISCVException read_vstval(CPURISCVState *env, int csrno,
4183 target_ulong *val)
4184 {
4185 *val = env->vstval;
4186 return RISCV_EXCP_NONE;
4187 }
4188
write_vstval(CPURISCVState * env,int csrno,target_ulong val)4189 static RISCVException write_vstval(CPURISCVState *env, int csrno,
4190 target_ulong val)
4191 {
4192 env->vstval = val;
4193 return RISCV_EXCP_NONE;
4194 }
4195
read_vsatp(CPURISCVState * env,int csrno,target_ulong * val)4196 static RISCVException read_vsatp(CPURISCVState *env, int csrno,
4197 target_ulong *val)
4198 {
4199 *val = env->vsatp;
4200 return RISCV_EXCP_NONE;
4201 }
4202
write_vsatp(CPURISCVState * env,int csrno,target_ulong val)4203 static RISCVException write_vsatp(CPURISCVState *env, int csrno,
4204 target_ulong val)
4205 {
4206 env->vsatp = legalize_xatp(env, env->vsatp, val);
4207 return RISCV_EXCP_NONE;
4208 }
4209
read_mtval2(CPURISCVState * env,int csrno,target_ulong * val)4210 static RISCVException read_mtval2(CPURISCVState *env, int csrno,
4211 target_ulong *val)
4212 {
4213 *val = env->mtval2;
4214 return RISCV_EXCP_NONE;
4215 }
4216
write_mtval2(CPURISCVState * env,int csrno,target_ulong val)4217 static RISCVException write_mtval2(CPURISCVState *env, int csrno,
4218 target_ulong val)
4219 {
4220 env->mtval2 = val;
4221 return RISCV_EXCP_NONE;
4222 }
4223
read_mtinst(CPURISCVState * env,int csrno,target_ulong * val)4224 static RISCVException read_mtinst(CPURISCVState *env, int csrno,
4225 target_ulong *val)
4226 {
4227 *val = env->mtinst;
4228 return RISCV_EXCP_NONE;
4229 }
4230
write_mtinst(CPURISCVState * env,int csrno,target_ulong val)4231 static RISCVException write_mtinst(CPURISCVState *env, int csrno,
4232 target_ulong val)
4233 {
4234 env->mtinst = val;
4235 return RISCV_EXCP_NONE;
4236 }
4237
4238 /* Physical Memory Protection */
read_mseccfg(CPURISCVState * env,int csrno,target_ulong * val)4239 static RISCVException read_mseccfg(CPURISCVState *env, int csrno,
4240 target_ulong *val)
4241 {
4242 *val = mseccfg_csr_read(env);
4243 return RISCV_EXCP_NONE;
4244 }
4245
write_mseccfg(CPURISCVState * env,int csrno,target_ulong val)4246 static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
4247 target_ulong val)
4248 {
4249 mseccfg_csr_write(env, val);
4250 return RISCV_EXCP_NONE;
4251 }
4252
read_pmpcfg(CPURISCVState * env,int csrno,target_ulong * val)4253 static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
4254 target_ulong *val)
4255 {
4256 uint32_t reg_index = csrno - CSR_PMPCFG0;
4257
4258 *val = pmpcfg_csr_read(env, reg_index);
4259 return RISCV_EXCP_NONE;
4260 }
4261
write_pmpcfg(CPURISCVState * env,int csrno,target_ulong val)4262 static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
4263 target_ulong val)
4264 {
4265 uint32_t reg_index = csrno - CSR_PMPCFG0;
4266
4267 pmpcfg_csr_write(env, reg_index, val);
4268 return RISCV_EXCP_NONE;
4269 }
4270
read_pmpaddr(CPURISCVState * env,int csrno,target_ulong * val)4271 static RISCVException read_pmpaddr(CPURISCVState *env, int csrno,
4272 target_ulong *val)
4273 {
4274 *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0);
4275 return RISCV_EXCP_NONE;
4276 }
4277
write_pmpaddr(CPURISCVState * env,int csrno,target_ulong val)4278 static RISCVException write_pmpaddr(CPURISCVState *env, int csrno,
4279 target_ulong val)
4280 {
4281 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val);
4282 return RISCV_EXCP_NONE;
4283 }
4284
read_tselect(CPURISCVState * env,int csrno,target_ulong * val)4285 static RISCVException read_tselect(CPURISCVState *env, int csrno,
4286 target_ulong *val)
4287 {
4288 *val = tselect_csr_read(env);
4289 return RISCV_EXCP_NONE;
4290 }
4291
write_tselect(CPURISCVState * env,int csrno,target_ulong val)4292 static RISCVException write_tselect(CPURISCVState *env, int csrno,
4293 target_ulong val)
4294 {
4295 tselect_csr_write(env, val);
4296 return RISCV_EXCP_NONE;
4297 }
4298
read_tdata(CPURISCVState * env,int csrno,target_ulong * val)4299 static RISCVException read_tdata(CPURISCVState *env, int csrno,
4300 target_ulong *val)
4301 {
4302 /* return 0 in tdata1 to end the trigger enumeration */
4303 if (env->trigger_cur >= RV_MAX_TRIGGERS && csrno == CSR_TDATA1) {
4304 *val = 0;
4305 return RISCV_EXCP_NONE;
4306 }
4307
4308 if (!tdata_available(env, csrno - CSR_TDATA1)) {
4309 return RISCV_EXCP_ILLEGAL_INST;
4310 }
4311
4312 *val = tdata_csr_read(env, csrno - CSR_TDATA1);
4313 return RISCV_EXCP_NONE;
4314 }
4315
write_tdata(CPURISCVState * env,int csrno,target_ulong val)4316 static RISCVException write_tdata(CPURISCVState *env, int csrno,
4317 target_ulong val)
4318 {
4319 if (!tdata_available(env, csrno - CSR_TDATA1)) {
4320 return RISCV_EXCP_ILLEGAL_INST;
4321 }
4322
4323 tdata_csr_write(env, csrno - CSR_TDATA1, val);
4324 return RISCV_EXCP_NONE;
4325 }
4326
read_tinfo(CPURISCVState * env,int csrno,target_ulong * val)4327 static RISCVException read_tinfo(CPURISCVState *env, int csrno,
4328 target_ulong *val)
4329 {
4330 *val = tinfo_csr_read(env);
4331 return RISCV_EXCP_NONE;
4332 }
4333
read_mcontext(CPURISCVState * env,int csrno,target_ulong * val)4334 static RISCVException read_mcontext(CPURISCVState *env, int csrno,
4335 target_ulong *val)
4336 {
4337 *val = env->mcontext;
4338 return RISCV_EXCP_NONE;
4339 }
4340
write_mcontext(CPURISCVState * env,int csrno,target_ulong val)4341 static RISCVException write_mcontext(CPURISCVState *env, int csrno,
4342 target_ulong val)
4343 {
4344 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
4345 int32_t mask;
4346
4347 if (riscv_has_ext(env, RVH)) {
4348 /* Spec suggest 7-bit for RV32 and 14-bit for RV64 w/ H extension */
4349 mask = rv32 ? MCONTEXT32_HCONTEXT : MCONTEXT64_HCONTEXT;
4350 } else {
4351 /* Spec suggest 6-bit for RV32 and 13-bit for RV64 w/o H extension */
4352 mask = rv32 ? MCONTEXT32 : MCONTEXT64;
4353 }
4354
4355 env->mcontext = val & mask;
4356 return RISCV_EXCP_NONE;
4357 }
4358
4359 /*
4360 * Functions to access Pointer Masking feature registers
4361 * We have to check if current priv lvl could modify
4362 * csr in given mode
4363 */
check_pm_current_disabled(CPURISCVState * env,int csrno)4364 static bool check_pm_current_disabled(CPURISCVState *env, int csrno)
4365 {
4366 int csr_priv = get_field(csrno, 0x300);
4367 int pm_current;
4368
4369 if (env->debugger) {
4370 return false;
4371 }
4372 /*
4373 * If priv lvls differ that means we're accessing csr from higher priv lvl,
4374 * so allow the access
4375 */
4376 if (env->priv != csr_priv) {
4377 return false;
4378 }
4379 switch (env->priv) {
4380 case PRV_M:
4381 pm_current = get_field(env->mmte, M_PM_CURRENT);
4382 break;
4383 case PRV_S:
4384 pm_current = get_field(env->mmte, S_PM_CURRENT);
4385 break;
4386 case PRV_U:
4387 pm_current = get_field(env->mmte, U_PM_CURRENT);
4388 break;
4389 default:
4390 g_assert_not_reached();
4391 }
4392 /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */
4393 return !pm_current;
4394 }
4395
read_mmte(CPURISCVState * env,int csrno,target_ulong * val)4396 static RISCVException read_mmte(CPURISCVState *env, int csrno,
4397 target_ulong *val)
4398 {
4399 *val = env->mmte & MMTE_MASK;
4400 return RISCV_EXCP_NONE;
4401 }
4402
write_mmte(CPURISCVState * env,int csrno,target_ulong val)4403 static RISCVException write_mmte(CPURISCVState *env, int csrno,
4404 target_ulong val)
4405 {
4406 uint64_t mstatus;
4407 target_ulong wpri_val = val & MMTE_MASK;
4408
4409 if (val != wpri_val) {
4410 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
4411 TARGET_FMT_lx "\n", "MMTE: WPRI violation written 0x",
4412 val, "vs expected 0x", wpri_val);
4413 }
4414 /* for machine mode pm.current is hardwired to 1 */
4415 wpri_val |= MMTE_M_PM_CURRENT;
4416
4417 /* hardwiring pm.instruction bit to 0, since it's not supported yet */
4418 wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN);
4419 env->mmte = wpri_val | EXT_STATUS_DIRTY;
4420 riscv_cpu_update_mask(env);
4421
4422 /* Set XS and SD bits, since PM CSRs are dirty */
4423 mstatus = env->mstatus | MSTATUS_XS;
4424 write_mstatus(env, csrno, mstatus);
4425 return RISCV_EXCP_NONE;
4426 }
4427
read_smte(CPURISCVState * env,int csrno,target_ulong * val)4428 static RISCVException read_smte(CPURISCVState *env, int csrno,
4429 target_ulong *val)
4430 {
4431 *val = env->mmte & SMTE_MASK;
4432 return RISCV_EXCP_NONE;
4433 }
4434
write_smte(CPURISCVState * env,int csrno,target_ulong val)4435 static RISCVException write_smte(CPURISCVState *env, int csrno,
4436 target_ulong val)
4437 {
4438 target_ulong wpri_val = val & SMTE_MASK;
4439
4440 if (val != wpri_val) {
4441 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
4442 TARGET_FMT_lx "\n", "SMTE: WPRI violation written 0x",
4443 val, "vs expected 0x", wpri_val);
4444 }
4445
4446 /* if pm.current==0 we can't modify current PM CSRs */
4447 if (check_pm_current_disabled(env, csrno)) {
4448 return RISCV_EXCP_NONE;
4449 }
4450
4451 wpri_val |= (env->mmte & ~SMTE_MASK);
4452 write_mmte(env, csrno, wpri_val);
4453 return RISCV_EXCP_NONE;
4454 }
4455
read_umte(CPURISCVState * env,int csrno,target_ulong * val)4456 static RISCVException read_umte(CPURISCVState *env, int csrno,
4457 target_ulong *val)
4458 {
4459 *val = env->mmte & UMTE_MASK;
4460 return RISCV_EXCP_NONE;
4461 }
4462
write_umte(CPURISCVState * env,int csrno,target_ulong val)4463 static RISCVException write_umte(CPURISCVState *env, int csrno,
4464 target_ulong val)
4465 {
4466 target_ulong wpri_val = val & UMTE_MASK;
4467
4468 if (val != wpri_val) {
4469 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
4470 TARGET_FMT_lx "\n", "UMTE: WPRI violation written 0x",
4471 val, "vs expected 0x", wpri_val);
4472 }
4473
4474 if (check_pm_current_disabled(env, csrno)) {
4475 return RISCV_EXCP_NONE;
4476 }
4477
4478 wpri_val |= (env->mmte & ~UMTE_MASK);
4479 write_mmte(env, csrno, wpri_val);
4480 return RISCV_EXCP_NONE;
4481 }
4482
read_mpmmask(CPURISCVState * env,int csrno,target_ulong * val)4483 static RISCVException read_mpmmask(CPURISCVState *env, int csrno,
4484 target_ulong *val)
4485 {
4486 *val = env->mpmmask;
4487 return RISCV_EXCP_NONE;
4488 }
4489
write_mpmmask(CPURISCVState * env,int csrno,target_ulong val)4490 static RISCVException write_mpmmask(CPURISCVState *env, int csrno,
4491 target_ulong val)
4492 {
4493 uint64_t mstatus;
4494
4495 env->mpmmask = val;
4496 if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) {
4497 env->cur_pmmask = val;
4498 }
4499 env->mmte |= EXT_STATUS_DIRTY;
4500
4501 /* Set XS and SD bits, since PM CSRs are dirty */
4502 mstatus = env->mstatus | MSTATUS_XS;
4503 write_mstatus(env, csrno, mstatus);
4504 return RISCV_EXCP_NONE;
4505 }
4506
read_spmmask(CPURISCVState * env,int csrno,target_ulong * val)4507 static RISCVException read_spmmask(CPURISCVState *env, int csrno,
4508 target_ulong *val)
4509 {
4510 *val = env->spmmask;
4511 return RISCV_EXCP_NONE;
4512 }
4513
write_spmmask(CPURISCVState * env,int csrno,target_ulong val)4514 static RISCVException write_spmmask(CPURISCVState *env, int csrno,
4515 target_ulong val)
4516 {
4517 uint64_t mstatus;
4518
4519 /* if pm.current==0 we can't modify current PM CSRs */
4520 if (check_pm_current_disabled(env, csrno)) {
4521 return RISCV_EXCP_NONE;
4522 }
4523 env->spmmask = val;
4524 if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) {
4525 env->cur_pmmask = val;
4526 if (cpu_get_xl(env, PRV_S) == MXL_RV32) {
4527 env->cur_pmmask &= UINT32_MAX;
4528 }
4529 }
4530 env->mmte |= EXT_STATUS_DIRTY;
4531
4532 /* Set XS and SD bits, since PM CSRs are dirty */
4533 mstatus = env->mstatus | MSTATUS_XS;
4534 write_mstatus(env, csrno, mstatus);
4535 return RISCV_EXCP_NONE;
4536 }
4537
read_upmmask(CPURISCVState * env,int csrno,target_ulong * val)4538 static RISCVException read_upmmask(CPURISCVState *env, int csrno,
4539 target_ulong *val)
4540 {
4541 *val = env->upmmask;
4542 return RISCV_EXCP_NONE;
4543 }
4544
write_upmmask(CPURISCVState * env,int csrno,target_ulong val)4545 static RISCVException write_upmmask(CPURISCVState *env, int csrno,
4546 target_ulong val)
4547 {
4548 uint64_t mstatus;
4549
4550 /* if pm.current==0 we can't modify current PM CSRs */
4551 if (check_pm_current_disabled(env, csrno)) {
4552 return RISCV_EXCP_NONE;
4553 }
4554 env->upmmask = val;
4555 if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) {
4556 env->cur_pmmask = val;
4557 if (cpu_get_xl(env, PRV_U) == MXL_RV32) {
4558 env->cur_pmmask &= UINT32_MAX;
4559 }
4560 }
4561 env->mmte |= EXT_STATUS_DIRTY;
4562
4563 /* Set XS and SD bits, since PM CSRs are dirty */
4564 mstatus = env->mstatus | MSTATUS_XS;
4565 write_mstatus(env, csrno, mstatus);
4566 return RISCV_EXCP_NONE;
4567 }
4568
read_mpmbase(CPURISCVState * env,int csrno,target_ulong * val)4569 static RISCVException read_mpmbase(CPURISCVState *env, int csrno,
4570 target_ulong *val)
4571 {
4572 *val = env->mpmbase;
4573 return RISCV_EXCP_NONE;
4574 }
4575
write_mpmbase(CPURISCVState * env,int csrno,target_ulong val)4576 static RISCVException write_mpmbase(CPURISCVState *env, int csrno,
4577 target_ulong val)
4578 {
4579 uint64_t mstatus;
4580
4581 env->mpmbase = val;
4582 if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) {
4583 env->cur_pmbase = val;
4584 }
4585 env->mmte |= EXT_STATUS_DIRTY;
4586
4587 /* Set XS and SD bits, since PM CSRs are dirty */
4588 mstatus = env->mstatus | MSTATUS_XS;
4589 write_mstatus(env, csrno, mstatus);
4590 return RISCV_EXCP_NONE;
4591 }
4592
read_spmbase(CPURISCVState * env,int csrno,target_ulong * val)4593 static RISCVException read_spmbase(CPURISCVState *env, int csrno,
4594 target_ulong *val)
4595 {
4596 *val = env->spmbase;
4597 return RISCV_EXCP_NONE;
4598 }
4599
write_spmbase(CPURISCVState * env,int csrno,target_ulong val)4600 static RISCVException write_spmbase(CPURISCVState *env, int csrno,
4601 target_ulong val)
4602 {
4603 uint64_t mstatus;
4604
4605 /* if pm.current==0 we can't modify current PM CSRs */
4606 if (check_pm_current_disabled(env, csrno)) {
4607 return RISCV_EXCP_NONE;
4608 }
4609 env->spmbase = val;
4610 if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) {
4611 env->cur_pmbase = val;
4612 if (cpu_get_xl(env, PRV_S) == MXL_RV32) {
4613 env->cur_pmbase &= UINT32_MAX;
4614 }
4615 }
4616 env->mmte |= EXT_STATUS_DIRTY;
4617
4618 /* Set XS and SD bits, since PM CSRs are dirty */
4619 mstatus = env->mstatus | MSTATUS_XS;
4620 write_mstatus(env, csrno, mstatus);
4621 return RISCV_EXCP_NONE;
4622 }
4623
read_upmbase(CPURISCVState * env,int csrno,target_ulong * val)4624 static RISCVException read_upmbase(CPURISCVState *env, int csrno,
4625 target_ulong *val)
4626 {
4627 *val = env->upmbase;
4628 return RISCV_EXCP_NONE;
4629 }
4630
write_upmbase(CPURISCVState * env,int csrno,target_ulong val)4631 static RISCVException write_upmbase(CPURISCVState *env, int csrno,
4632 target_ulong val)
4633 {
4634 uint64_t mstatus;
4635
4636 /* if pm.current==0 we can't modify current PM CSRs */
4637 if (check_pm_current_disabled(env, csrno)) {
4638 return RISCV_EXCP_NONE;
4639 }
4640 env->upmbase = val;
4641 if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) {
4642 env->cur_pmbase = val;
4643 if (cpu_get_xl(env, PRV_U) == MXL_RV32) {
4644 env->cur_pmbase &= UINT32_MAX;
4645 }
4646 }
4647 env->mmte |= EXT_STATUS_DIRTY;
4648
4649 /* Set XS and SD bits, since PM CSRs are dirty */
4650 mstatus = env->mstatus | MSTATUS_XS;
4651 write_mstatus(env, csrno, mstatus);
4652 return RISCV_EXCP_NONE;
4653 }
4654
4655 #endif
4656
4657 /* Crypto Extension */
riscv_new_csr_seed(target_ulong new_value,target_ulong write_mask)4658 target_ulong riscv_new_csr_seed(target_ulong new_value,
4659 target_ulong write_mask)
4660 {
4661 uint16_t random_v;
4662 Error *random_e = NULL;
4663 int random_r;
4664 target_ulong rval;
4665
4666 random_r = qemu_guest_getrandom(&random_v, 2, &random_e);
4667 if (unlikely(random_r < 0)) {
4668 /*
4669 * Failed, for unknown reasons in the crypto subsystem.
4670 * The best we can do is log the reason and return a
4671 * failure indication to the guest. There is no reason
4672 * we know to expect the failure to be transitory, so
4673 * indicate DEAD to avoid having the guest spin on WAIT.
4674 */
4675 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
4676 __func__, error_get_pretty(random_e));
4677 error_free(random_e);
4678 rval = SEED_OPST_DEAD;
4679 } else {
4680 rval = random_v | SEED_OPST_ES16;
4681 }
4682
4683 return rval;
4684 }
4685
rmw_seed(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)4686 static RISCVException rmw_seed(CPURISCVState *env, int csrno,
4687 target_ulong *ret_value,
4688 target_ulong new_value,
4689 target_ulong write_mask)
4690 {
4691 target_ulong rval;
4692
4693 rval = riscv_new_csr_seed(new_value, write_mask);
4694
4695 if (ret_value) {
4696 *ret_value = rval;
4697 }
4698
4699 return RISCV_EXCP_NONE;
4700 }
4701
4702 /*
4703 * riscv_csrrw - read and/or update control and status register
4704 *
4705 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0);
4706 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1);
4707 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value);
4708 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
4709 */
4710
riscv_csrrw_check(CPURISCVState * env,int csrno,bool write)4711 static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
4712 int csrno,
4713 bool write)
4714 {
4715 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
4716 bool read_only = get_field(csrno, 0xC00) == 3;
4717 int csr_min_priv = csr_ops[csrno].min_priv_ver;
4718
4719 /* ensure the CSR extension is enabled */
4720 if (!riscv_cpu_cfg(env)->ext_zicsr) {
4721 return RISCV_EXCP_ILLEGAL_INST;
4722 }
4723
4724 /* ensure CSR is implemented by checking predicate */
4725 if (!csr_ops[csrno].predicate) {
4726 return RISCV_EXCP_ILLEGAL_INST;
4727 }
4728
4729 /* privileged spec version check */
4730 if (env->priv_ver < csr_min_priv) {
4731 return RISCV_EXCP_ILLEGAL_INST;
4732 }
4733
4734 /* read / write check */
4735 if (write && read_only) {
4736 return RISCV_EXCP_ILLEGAL_INST;
4737 }
4738
4739 /*
4740 * The predicate() not only does existence check but also does some
4741 * access control check which triggers for example virtual instruction
4742 * exception in some cases. When writing read-only CSRs in those cases
4743 * illegal instruction exception should be triggered instead of virtual
4744 * instruction exception. Hence this comes after the read / write check.
4745 */
4746 RISCVException ret = csr_ops[csrno].predicate(env, csrno);
4747 if (ret != RISCV_EXCP_NONE) {
4748 return ret;
4749 }
4750
4751 #if !defined(CONFIG_USER_ONLY)
4752 int csr_priv, effective_priv = env->priv;
4753
4754 if (riscv_has_ext(env, RVH) && env->priv == PRV_S &&
4755 !env->virt_enabled) {
4756 /*
4757 * We are in HS mode. Add 1 to the effective privilege level to
4758 * allow us to access the Hypervisor CSRs.
4759 */
4760 effective_priv++;
4761 }
4762
4763 csr_priv = get_field(csrno, 0x300);
4764 if (!env->debugger && (effective_priv < csr_priv)) {
4765 if (csr_priv == (PRV_S + 1) && env->virt_enabled) {
4766 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
4767 }
4768 return RISCV_EXCP_ILLEGAL_INST;
4769 }
4770 #endif
4771 return RISCV_EXCP_NONE;
4772 }
4773
riscv_csrrw_do64(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)4774 static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
4775 target_ulong *ret_value,
4776 target_ulong new_value,
4777 target_ulong write_mask)
4778 {
4779 RISCVException ret;
4780 target_ulong old_value = 0;
4781
4782 /* execute combined read/write operation if it exists */
4783 if (csr_ops[csrno].op) {
4784 return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask);
4785 }
4786
4787 /*
4788 * ret_value == NULL means that rd=x0 and we're coming from helper_csrw()
4789 * and we can't throw side effects caused by CSR reads.
4790 */
4791 if (ret_value) {
4792 /* if no accessor exists then return failure */
4793 if (!csr_ops[csrno].read) {
4794 return RISCV_EXCP_ILLEGAL_INST;
4795 }
4796 /* read old value */
4797 ret = csr_ops[csrno].read(env, csrno, &old_value);
4798 if (ret != RISCV_EXCP_NONE) {
4799 return ret;
4800 }
4801 }
4802
4803 /* write value if writable and write mask set, otherwise drop writes */
4804 if (write_mask) {
4805 new_value = (old_value & ~write_mask) | (new_value & write_mask);
4806 if (csr_ops[csrno].write) {
4807 ret = csr_ops[csrno].write(env, csrno, new_value);
4808 if (ret != RISCV_EXCP_NONE) {
4809 return ret;
4810 }
4811 }
4812 }
4813
4814 /* return old value */
4815 if (ret_value) {
4816 *ret_value = old_value;
4817 }
4818
4819 return RISCV_EXCP_NONE;
4820 }
4821
riscv_csrr(CPURISCVState * env,int csrno,target_ulong * ret_value)4822 RISCVException riscv_csrr(CPURISCVState *env, int csrno,
4823 target_ulong *ret_value)
4824 {
4825 RISCVException ret = riscv_csrrw_check(env, csrno, false);
4826 if (ret != RISCV_EXCP_NONE) {
4827 return ret;
4828 }
4829
4830 return riscv_csrrw_do64(env, csrno, ret_value, 0, 0);
4831 }
4832
riscv_csrrw(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)4833 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
4834 target_ulong *ret_value,
4835 target_ulong new_value, target_ulong write_mask)
4836 {
4837 RISCVException ret = riscv_csrrw_check(env, csrno, true);
4838 if (ret != RISCV_EXCP_NONE) {
4839 return ret;
4840 }
4841
4842 return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask);
4843 }
4844
riscv_csrrw_do128(CPURISCVState * env,int csrno,Int128 * ret_value,Int128 new_value,Int128 write_mask)4845 static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
4846 Int128 *ret_value,
4847 Int128 new_value,
4848 Int128 write_mask)
4849 {
4850 RISCVException ret;
4851 Int128 old_value;
4852
4853 /* read old value */
4854 ret = csr_ops[csrno].read128(env, csrno, &old_value);
4855 if (ret != RISCV_EXCP_NONE) {
4856 return ret;
4857 }
4858
4859 /* write value if writable and write mask set, otherwise drop writes */
4860 if (int128_nz(write_mask)) {
4861 new_value = int128_or(int128_and(old_value, int128_not(write_mask)),
4862 int128_and(new_value, write_mask));
4863 if (csr_ops[csrno].write128) {
4864 ret = csr_ops[csrno].write128(env, csrno, new_value);
4865 if (ret != RISCV_EXCP_NONE) {
4866 return ret;
4867 }
4868 } else if (csr_ops[csrno].write) {
4869 /* avoids having to write wrappers for all registers */
4870 ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value));
4871 if (ret != RISCV_EXCP_NONE) {
4872 return ret;
4873 }
4874 }
4875 }
4876
4877 /* return old value */
4878 if (ret_value) {
4879 *ret_value = old_value;
4880 }
4881
4882 return RISCV_EXCP_NONE;
4883 }
4884
riscv_csrr_i128(CPURISCVState * env,int csrno,Int128 * ret_value)4885 RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
4886 Int128 *ret_value)
4887 {
4888 RISCVException ret;
4889
4890 ret = riscv_csrrw_check(env, csrno, false);
4891 if (ret != RISCV_EXCP_NONE) {
4892 return ret;
4893 }
4894
4895 if (csr_ops[csrno].read128) {
4896 return riscv_csrrw_do128(env, csrno, ret_value,
4897 int128_zero(), int128_zero());
4898 }
4899
4900 /*
4901 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
4902 * at all defined.
4903 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
4904 * significant), for those, this fallback is correctly handling the
4905 * accesses
4906 */
4907 target_ulong old_value;
4908 ret = riscv_csrrw_do64(env, csrno, &old_value,
4909 (target_ulong)0,
4910 (target_ulong)0);
4911 if (ret == RISCV_EXCP_NONE && ret_value) {
4912 *ret_value = int128_make64(old_value);
4913 }
4914 return ret;
4915 }
4916
riscv_csrrw_i128(CPURISCVState * env,int csrno,Int128 * ret_value,Int128 new_value,Int128 write_mask)4917 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
4918 Int128 *ret_value,
4919 Int128 new_value, Int128 write_mask)
4920 {
4921 RISCVException ret;
4922
4923 ret = riscv_csrrw_check(env, csrno, true);
4924 if (ret != RISCV_EXCP_NONE) {
4925 return ret;
4926 }
4927
4928 if (csr_ops[csrno].read128) {
4929 return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask);
4930 }
4931
4932 /*
4933 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
4934 * at all defined.
4935 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
4936 * significant), for those, this fallback is correctly handling the
4937 * accesses
4938 */
4939 target_ulong old_value;
4940 ret = riscv_csrrw_do64(env, csrno, &old_value,
4941 int128_getlo(new_value),
4942 int128_getlo(write_mask));
4943 if (ret == RISCV_EXCP_NONE && ret_value) {
4944 *ret_value = int128_make64(old_value);
4945 }
4946 return ret;
4947 }
4948
4949 /*
4950 * Debugger support. If not in user mode, set env->debugger before the
4951 * riscv_csrrw call and clear it after the call.
4952 */
riscv_csrrw_debug(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)4953 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
4954 target_ulong *ret_value,
4955 target_ulong new_value,
4956 target_ulong write_mask)
4957 {
4958 RISCVException ret;
4959 #if !defined(CONFIG_USER_ONLY)
4960 env->debugger = true;
4961 #endif
4962 if (!write_mask) {
4963 ret = riscv_csrr(env, csrno, ret_value);
4964 } else {
4965 ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask);
4966 }
4967 #if !defined(CONFIG_USER_ONLY)
4968 env->debugger = false;
4969 #endif
4970 return ret;
4971 }
4972
read_jvt(CPURISCVState * env,int csrno,target_ulong * val)4973 static RISCVException read_jvt(CPURISCVState *env, int csrno,
4974 target_ulong *val)
4975 {
4976 *val = env->jvt;
4977 return RISCV_EXCP_NONE;
4978 }
4979
write_jvt(CPURISCVState * env,int csrno,target_ulong val)4980 static RISCVException write_jvt(CPURISCVState *env, int csrno,
4981 target_ulong val)
4982 {
4983 env->jvt = val;
4984 return RISCV_EXCP_NONE;
4985 }
4986
4987 /*
4988 * Control and Status Register function table
4989 * riscv_csr_operations::predicate() must be provided for an implemented CSR
4990 */
4991 riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
4992 /* User Floating-Point CSRs */
4993 [CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags },
4994 [CSR_FRM] = { "frm", fs, read_frm, write_frm },
4995 [CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr },
4996 /* Vector CSRs */
4997 [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart },
4998 [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat },
4999 [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm },
5000 [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr },
5001 [CSR_VL] = { "vl", vs, read_vl },
5002 [CSR_VTYPE] = { "vtype", vs, read_vtype },
5003 [CSR_VLENB] = { "vlenb", vs, read_vlenb },
5004 /* User Timers and Counters */
5005 [CSR_CYCLE] = { "cycle", ctr, read_hpmcounter },
5006 [CSR_INSTRET] = { "instret", ctr, read_hpmcounter },
5007 [CSR_CYCLEH] = { "cycleh", ctr32, read_hpmcounterh },
5008 [CSR_INSTRETH] = { "instreth", ctr32, read_hpmcounterh },
5009
5010 /*
5011 * In privileged mode, the monitor will have to emulate TIME CSRs only if
5012 * rdtime callback is not provided by machine/platform emulation.
5013 */
5014 [CSR_TIME] = { "time", ctr, read_time },
5015 [CSR_TIMEH] = { "timeh", ctr32, read_timeh },
5016
5017 /* Crypto Extension */
5018 [CSR_SEED] = { "seed", seed, NULL, NULL, rmw_seed },
5019
5020 /* Zcmt Extension */
5021 [CSR_JVT] = {"jvt", zcmt, read_jvt, write_jvt},
5022
5023 /* zicfiss Extension, shadow stack register */
5024 [CSR_SSP] = { "ssp", cfi_ss, read_ssp, write_ssp },
5025
5026 #if !defined(CONFIG_USER_ONLY)
5027 /* Machine Timers and Counters */
5028 [CSR_MCYCLE] = { "mcycle", any, read_hpmcounter,
5029 write_mhpmcounter },
5030 [CSR_MINSTRET] = { "minstret", any, read_hpmcounter,
5031 write_mhpmcounter },
5032 [CSR_MCYCLEH] = { "mcycleh", any32, read_hpmcounterh,
5033 write_mhpmcounterh },
5034 [CSR_MINSTRETH] = { "minstreth", any32, read_hpmcounterh,
5035 write_mhpmcounterh },
5036
5037 /* Machine Information Registers */
5038 [CSR_MVENDORID] = { "mvendorid", any, read_mvendorid },
5039 [CSR_MARCHID] = { "marchid", any, read_marchid },
5040 [CSR_MIMPID] = { "mimpid", any, read_mimpid },
5041 [CSR_MHARTID] = { "mhartid", any, read_mhartid },
5042
5043 [CSR_MCONFIGPTR] = { "mconfigptr", any, read_zero,
5044 .min_priv_ver = PRIV_VERSION_1_12_0 },
5045 /* Machine Trap Setup */
5046 [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus,
5047 NULL, read_mstatus_i128 },
5048 [CSR_MISA] = { "misa", any, read_misa, write_misa,
5049 NULL, read_misa_i128 },
5050 [CSR_MIDELEG] = { "mideleg", any, NULL, NULL, rmw_mideleg },
5051 [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg },
5052 [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie },
5053 [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec },
5054 [CSR_MCOUNTEREN] = { "mcounteren", umode, read_mcounteren,
5055 write_mcounteren },
5056
5057 [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush,
5058 write_mstatush },
5059 [CSR_MEDELEGH] = { "medelegh", any32, read_zero, write_ignore,
5060 .min_priv_ver = PRIV_VERSION_1_13_0 },
5061 [CSR_HEDELEGH] = { "hedelegh", hmode32, read_hedelegh, write_hedelegh,
5062 .min_priv_ver = PRIV_VERSION_1_13_0 },
5063
5064 /* Machine Trap Handling */
5065 [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch,
5066 NULL, read_mscratch_i128, write_mscratch_i128 },
5067 [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc },
5068 [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause },
5069 [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval },
5070 [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
5071
5072 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */
5073 [CSR_MISELECT] = { "miselect", aia_any, NULL, NULL, rmw_xiselect },
5074 [CSR_MIREG] = { "mireg", aia_any, NULL, NULL, rmw_xireg },
5075
5076 /* Machine-Level Interrupts (AIA) */
5077 [CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei },
5078 [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi },
5079
5080 /* Virtual Interrupts for Supervisor Level (AIA) */
5081 [CSR_MVIEN] = { "mvien", aia_any, NULL, NULL, rmw_mvien },
5082 [CSR_MVIP] = { "mvip", aia_any, NULL, NULL, rmw_mvip },
5083
5084 /* Machine-Level High-Half CSRs (AIA) */
5085 [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh },
5086 [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh },
5087 [CSR_MVIENH] = { "mvienh", aia_any32, NULL, NULL, rmw_mvienh },
5088 [CSR_MVIPH] = { "mviph", aia_any32, NULL, NULL, rmw_mviph },
5089 [CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph },
5090
5091 /* Execution environment configuration */
5092 [CSR_MENVCFG] = { "menvcfg", umode, read_menvcfg, write_menvcfg,
5093 .min_priv_ver = PRIV_VERSION_1_12_0 },
5094 [CSR_MENVCFGH] = { "menvcfgh", umode32, read_menvcfgh, write_menvcfgh,
5095 .min_priv_ver = PRIV_VERSION_1_12_0 },
5096 [CSR_SENVCFG] = { "senvcfg", smode, read_senvcfg, write_senvcfg,
5097 .min_priv_ver = PRIV_VERSION_1_12_0 },
5098 [CSR_HENVCFG] = { "henvcfg", hmode, read_henvcfg, write_henvcfg,
5099 .min_priv_ver = PRIV_VERSION_1_12_0 },
5100 [CSR_HENVCFGH] = { "henvcfgh", hmode32, read_henvcfgh, write_henvcfgh,
5101 .min_priv_ver = PRIV_VERSION_1_12_0 },
5102
5103 /* Smstateen extension CSRs */
5104 [CSR_MSTATEEN0] = { "mstateen0", mstateen, read_mstateen, write_mstateen0,
5105 .min_priv_ver = PRIV_VERSION_1_12_0 },
5106 [CSR_MSTATEEN0H] = { "mstateen0h", mstateen, read_mstateenh,
5107 write_mstateen0h,
5108 .min_priv_ver = PRIV_VERSION_1_12_0 },
5109 [CSR_MSTATEEN1] = { "mstateen1", mstateen, read_mstateen,
5110 write_mstateen_1_3,
5111 .min_priv_ver = PRIV_VERSION_1_12_0 },
5112 [CSR_MSTATEEN1H] = { "mstateen1h", mstateen, read_mstateenh,
5113 write_mstateenh_1_3,
5114 .min_priv_ver = PRIV_VERSION_1_12_0 },
5115 [CSR_MSTATEEN2] = { "mstateen2", mstateen, read_mstateen,
5116 write_mstateen_1_3,
5117 .min_priv_ver = PRIV_VERSION_1_12_0 },
5118 [CSR_MSTATEEN2H] = { "mstateen2h", mstateen, read_mstateenh,
5119 write_mstateenh_1_3,
5120 .min_priv_ver = PRIV_VERSION_1_12_0 },
5121 [CSR_MSTATEEN3] = { "mstateen3", mstateen, read_mstateen,
5122 write_mstateen_1_3,
5123 .min_priv_ver = PRIV_VERSION_1_12_0 },
5124 [CSR_MSTATEEN3H] = { "mstateen3h", mstateen, read_mstateenh,
5125 write_mstateenh_1_3,
5126 .min_priv_ver = PRIV_VERSION_1_12_0 },
5127 [CSR_HSTATEEN0] = { "hstateen0", hstateen, read_hstateen, write_hstateen0,
5128 .min_priv_ver = PRIV_VERSION_1_12_0 },
5129 [CSR_HSTATEEN0H] = { "hstateen0h", hstateenh, read_hstateenh,
5130 write_hstateen0h,
5131 .min_priv_ver = PRIV_VERSION_1_12_0 },
5132 [CSR_HSTATEEN1] = { "hstateen1", hstateen, read_hstateen,
5133 write_hstateen_1_3,
5134 .min_priv_ver = PRIV_VERSION_1_12_0 },
5135 [CSR_HSTATEEN1H] = { "hstateen1h", hstateenh, read_hstateenh,
5136 write_hstateenh_1_3,
5137 .min_priv_ver = PRIV_VERSION_1_12_0 },
5138 [CSR_HSTATEEN2] = { "hstateen2", hstateen, read_hstateen,
5139 write_hstateen_1_3,
5140 .min_priv_ver = PRIV_VERSION_1_12_0 },
5141 [CSR_HSTATEEN2H] = { "hstateen2h", hstateenh, read_hstateenh,
5142 write_hstateenh_1_3,
5143 .min_priv_ver = PRIV_VERSION_1_12_0 },
5144 [CSR_HSTATEEN3] = { "hstateen3", hstateen, read_hstateen,
5145 write_hstateen_1_3,
5146 .min_priv_ver = PRIV_VERSION_1_12_0 },
5147 [CSR_HSTATEEN3H] = { "hstateen3h", hstateenh, read_hstateenh,
5148 write_hstateenh_1_3,
5149 .min_priv_ver = PRIV_VERSION_1_12_0 },
5150 [CSR_SSTATEEN0] = { "sstateen0", sstateen, read_sstateen, write_sstateen0,
5151 .min_priv_ver = PRIV_VERSION_1_12_0 },
5152 [CSR_SSTATEEN1] = { "sstateen1", sstateen, read_sstateen,
5153 write_sstateen_1_3,
5154 .min_priv_ver = PRIV_VERSION_1_12_0 },
5155 [CSR_SSTATEEN2] = { "sstateen2", sstateen, read_sstateen,
5156 write_sstateen_1_3,
5157 .min_priv_ver = PRIV_VERSION_1_12_0 },
5158 [CSR_SSTATEEN3] = { "sstateen3", sstateen, read_sstateen,
5159 write_sstateen_1_3,
5160 .min_priv_ver = PRIV_VERSION_1_12_0 },
5161
5162 /* Supervisor Trap Setup */
5163 [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus,
5164 NULL, read_sstatus_i128 },
5165 [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie },
5166 [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec },
5167 [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren,
5168 write_scounteren },
5169
5170 /* Supervisor Trap Handling */
5171 [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch,
5172 NULL, read_sscratch_i128, write_sscratch_i128 },
5173 [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc },
5174 [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause },
5175 [CSR_STVAL] = { "stval", smode, read_stval, write_stval },
5176 [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip },
5177 [CSR_STIMECMP] = { "stimecmp", sstc, read_stimecmp, write_stimecmp,
5178 .min_priv_ver = PRIV_VERSION_1_12_0 },
5179 [CSR_STIMECMPH] = { "stimecmph", sstc_32, read_stimecmph, write_stimecmph,
5180 .min_priv_ver = PRIV_VERSION_1_12_0 },
5181 [CSR_VSTIMECMP] = { "vstimecmp", sstc, read_vstimecmp,
5182 write_vstimecmp,
5183 .min_priv_ver = PRIV_VERSION_1_12_0 },
5184 [CSR_VSTIMECMPH] = { "vstimecmph", sstc_32, read_vstimecmph,
5185 write_vstimecmph,
5186 .min_priv_ver = PRIV_VERSION_1_12_0 },
5187
5188 /* Supervisor Protection and Translation */
5189 [CSR_SATP] = { "satp", satp, read_satp, write_satp },
5190
5191 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
5192 [CSR_SISELECT] = { "siselect", aia_smode, NULL, NULL, rmw_xiselect },
5193 [CSR_SIREG] = { "sireg", aia_smode, NULL, NULL, rmw_xireg },
5194
5195 /* Supervisor-Level Interrupts (AIA) */
5196 [CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei },
5197 [CSR_STOPI] = { "stopi", aia_smode, read_stopi },
5198
5199 /* Supervisor-Level High-Half CSRs (AIA) */
5200 [CSR_SIEH] = { "sieh", aia_smode32, NULL, NULL, rmw_sieh },
5201 [CSR_SIPH] = { "siph", aia_smode32, NULL, NULL, rmw_siph },
5202
5203 [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus,
5204 .min_priv_ver = PRIV_VERSION_1_12_0 },
5205 [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg,
5206 .min_priv_ver = PRIV_VERSION_1_12_0 },
5207 [CSR_HIDELEG] = { "hideleg", hmode, NULL, NULL, rmw_hideleg,
5208 .min_priv_ver = PRIV_VERSION_1_12_0 },
5209 [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip,
5210 .min_priv_ver = PRIV_VERSION_1_12_0 },
5211 [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip,
5212 .min_priv_ver = PRIV_VERSION_1_12_0 },
5213 [CSR_HIE] = { "hie", hmode, NULL, NULL, rmw_hie,
5214 .min_priv_ver = PRIV_VERSION_1_12_0 },
5215 [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren,
5216 write_hcounteren,
5217 .min_priv_ver = PRIV_VERSION_1_12_0 },
5218 [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie,
5219 .min_priv_ver = PRIV_VERSION_1_12_0 },
5220 [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval,
5221 .min_priv_ver = PRIV_VERSION_1_12_0 },
5222 [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst,
5223 .min_priv_ver = PRIV_VERSION_1_12_0 },
5224 [CSR_HGEIP] = { "hgeip", hmode, read_hgeip,
5225 .min_priv_ver = PRIV_VERSION_1_12_0 },
5226 [CSR_HGATP] = { "hgatp", hgatp, read_hgatp, write_hgatp,
5227 .min_priv_ver = PRIV_VERSION_1_12_0 },
5228 [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta,
5229 write_htimedelta,
5230 .min_priv_ver = PRIV_VERSION_1_12_0 },
5231 [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah,
5232 write_htimedeltah,
5233 .min_priv_ver = PRIV_VERSION_1_12_0 },
5234
5235 [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus,
5236 write_vsstatus,
5237 .min_priv_ver = PRIV_VERSION_1_12_0 },
5238 [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip,
5239 .min_priv_ver = PRIV_VERSION_1_12_0 },
5240 [CSR_VSIE] = { "vsie", hmode, NULL, NULL, rmw_vsie ,
5241 .min_priv_ver = PRIV_VERSION_1_12_0 },
5242 [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec,
5243 .min_priv_ver = PRIV_VERSION_1_12_0 },
5244 [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch,
5245 write_vsscratch,
5246 .min_priv_ver = PRIV_VERSION_1_12_0 },
5247 [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc,
5248 .min_priv_ver = PRIV_VERSION_1_12_0 },
5249 [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause,
5250 .min_priv_ver = PRIV_VERSION_1_12_0 },
5251 [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval,
5252 .min_priv_ver = PRIV_VERSION_1_12_0 },
5253 [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp,
5254 .min_priv_ver = PRIV_VERSION_1_12_0 },
5255
5256 [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2,
5257 .min_priv_ver = PRIV_VERSION_1_12_0 },
5258 [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst,
5259 .min_priv_ver = PRIV_VERSION_1_12_0 },
5260
5261 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
5262 [CSR_HVIEN] = { "hvien", aia_hmode, NULL, NULL, rmw_hvien },
5263 [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl,
5264 write_hvictl },
5265 [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1,
5266 write_hviprio1 },
5267 [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2,
5268 write_hviprio2 },
5269 /*
5270 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
5271 */
5272 [CSR_VSISELECT] = { "vsiselect", aia_hmode, NULL, NULL,
5273 rmw_xiselect },
5274 [CSR_VSIREG] = { "vsireg", aia_hmode, NULL, NULL, rmw_xireg },
5275
5276 /* VS-Level Interrupts (H-extension with AIA) */
5277 [CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei },
5278 [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi },
5279
5280 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
5281 [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL,
5282 rmw_hidelegh },
5283 [CSR_HVIENH] = { "hvienh", aia_hmode32, NULL, NULL, rmw_hvienh },
5284 [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph },
5285 [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h,
5286 write_hviprio1h },
5287 [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h,
5288 write_hviprio2h },
5289 [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh },
5290 [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph },
5291
5292 /* Physical Memory Protection */
5293 [CSR_MSECCFG] = { "mseccfg", have_mseccfg, read_mseccfg, write_mseccfg,
5294 .min_priv_ver = PRIV_VERSION_1_11_0 },
5295 [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg },
5296 [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg },
5297 [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg },
5298 [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg },
5299 [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr },
5300 [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr },
5301 [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr },
5302 [CSR_PMPADDR3] = { "pmpaddr3", pmp, read_pmpaddr, write_pmpaddr },
5303 [CSR_PMPADDR4] = { "pmpaddr4", pmp, read_pmpaddr, write_pmpaddr },
5304 [CSR_PMPADDR5] = { "pmpaddr5", pmp, read_pmpaddr, write_pmpaddr },
5305 [CSR_PMPADDR6] = { "pmpaddr6", pmp, read_pmpaddr, write_pmpaddr },
5306 [CSR_PMPADDR7] = { "pmpaddr7", pmp, read_pmpaddr, write_pmpaddr },
5307 [CSR_PMPADDR8] = { "pmpaddr8", pmp, read_pmpaddr, write_pmpaddr },
5308 [CSR_PMPADDR9] = { "pmpaddr9", pmp, read_pmpaddr, write_pmpaddr },
5309 [CSR_PMPADDR10] = { "pmpaddr10", pmp, read_pmpaddr, write_pmpaddr },
5310 [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr },
5311 [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr },
5312 [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr },
5313 [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
5314 [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
5315
5316 /* Debug CSRs */
5317 [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect },
5318 [CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata },
5319 [CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata },
5320 [CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata },
5321 [CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore },
5322 [CSR_MCONTEXT] = { "mcontext", debug, read_mcontext, write_mcontext },
5323
5324 /* User Pointer Masking */
5325 [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte },
5326 [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask,
5327 write_upmmask },
5328 [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase,
5329 write_upmbase },
5330 /* Machine Pointer Masking */
5331 [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte },
5332 [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask,
5333 write_mpmmask },
5334 [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase,
5335 write_mpmbase },
5336 /* Supervisor Pointer Masking */
5337 [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte },
5338 [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask,
5339 write_spmmask },
5340 [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase,
5341 write_spmbase },
5342
5343 /* Performance Counters */
5344 [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter },
5345 [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter },
5346 [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_hpmcounter },
5347 [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_hpmcounter },
5348 [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_hpmcounter },
5349 [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_hpmcounter },
5350 [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_hpmcounter },
5351 [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_hpmcounter },
5352 [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_hpmcounter },
5353 [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_hpmcounter },
5354 [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_hpmcounter },
5355 [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_hpmcounter },
5356 [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_hpmcounter },
5357 [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_hpmcounter },
5358 [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_hpmcounter },
5359 [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_hpmcounter },
5360 [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_hpmcounter },
5361 [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_hpmcounter },
5362 [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_hpmcounter },
5363 [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_hpmcounter },
5364 [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_hpmcounter },
5365 [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_hpmcounter },
5366 [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_hpmcounter },
5367 [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_hpmcounter },
5368 [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_hpmcounter },
5369 [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_hpmcounter },
5370 [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_hpmcounter },
5371 [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_hpmcounter },
5372 [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_hpmcounter },
5373
5374 [CSR_MHPMCOUNTER3] = { "mhpmcounter3", mctr, read_hpmcounter,
5375 write_mhpmcounter },
5376 [CSR_MHPMCOUNTER4] = { "mhpmcounter4", mctr, read_hpmcounter,
5377 write_mhpmcounter },
5378 [CSR_MHPMCOUNTER5] = { "mhpmcounter5", mctr, read_hpmcounter,
5379 write_mhpmcounter },
5380 [CSR_MHPMCOUNTER6] = { "mhpmcounter6", mctr, read_hpmcounter,
5381 write_mhpmcounter },
5382 [CSR_MHPMCOUNTER7] = { "mhpmcounter7", mctr, read_hpmcounter,
5383 write_mhpmcounter },
5384 [CSR_MHPMCOUNTER8] = { "mhpmcounter8", mctr, read_hpmcounter,
5385 write_mhpmcounter },
5386 [CSR_MHPMCOUNTER9] = { "mhpmcounter9", mctr, read_hpmcounter,
5387 write_mhpmcounter },
5388 [CSR_MHPMCOUNTER10] = { "mhpmcounter10", mctr, read_hpmcounter,
5389 write_mhpmcounter },
5390 [CSR_MHPMCOUNTER11] = { "mhpmcounter11", mctr, read_hpmcounter,
5391 write_mhpmcounter },
5392 [CSR_MHPMCOUNTER12] = { "mhpmcounter12", mctr, read_hpmcounter,
5393 write_mhpmcounter },
5394 [CSR_MHPMCOUNTER13] = { "mhpmcounter13", mctr, read_hpmcounter,
5395 write_mhpmcounter },
5396 [CSR_MHPMCOUNTER14] = { "mhpmcounter14", mctr, read_hpmcounter,
5397 write_mhpmcounter },
5398 [CSR_MHPMCOUNTER15] = { "mhpmcounter15", mctr, read_hpmcounter,
5399 write_mhpmcounter },
5400 [CSR_MHPMCOUNTER16] = { "mhpmcounter16", mctr, read_hpmcounter,
5401 write_mhpmcounter },
5402 [CSR_MHPMCOUNTER17] = { "mhpmcounter17", mctr, read_hpmcounter,
5403 write_mhpmcounter },
5404 [CSR_MHPMCOUNTER18] = { "mhpmcounter18", mctr, read_hpmcounter,
5405 write_mhpmcounter },
5406 [CSR_MHPMCOUNTER19] = { "mhpmcounter19", mctr, read_hpmcounter,
5407 write_mhpmcounter },
5408 [CSR_MHPMCOUNTER20] = { "mhpmcounter20", mctr, read_hpmcounter,
5409 write_mhpmcounter },
5410 [CSR_MHPMCOUNTER21] = { "mhpmcounter21", mctr, read_hpmcounter,
5411 write_mhpmcounter },
5412 [CSR_MHPMCOUNTER22] = { "mhpmcounter22", mctr, read_hpmcounter,
5413 write_mhpmcounter },
5414 [CSR_MHPMCOUNTER23] = { "mhpmcounter23", mctr, read_hpmcounter,
5415 write_mhpmcounter },
5416 [CSR_MHPMCOUNTER24] = { "mhpmcounter24", mctr, read_hpmcounter,
5417 write_mhpmcounter },
5418 [CSR_MHPMCOUNTER25] = { "mhpmcounter25", mctr, read_hpmcounter,
5419 write_mhpmcounter },
5420 [CSR_MHPMCOUNTER26] = { "mhpmcounter26", mctr, read_hpmcounter,
5421 write_mhpmcounter },
5422 [CSR_MHPMCOUNTER27] = { "mhpmcounter27", mctr, read_hpmcounter,
5423 write_mhpmcounter },
5424 [CSR_MHPMCOUNTER28] = { "mhpmcounter28", mctr, read_hpmcounter,
5425 write_mhpmcounter },
5426 [CSR_MHPMCOUNTER29] = { "mhpmcounter29", mctr, read_hpmcounter,
5427 write_mhpmcounter },
5428 [CSR_MHPMCOUNTER30] = { "mhpmcounter30", mctr, read_hpmcounter,
5429 write_mhpmcounter },
5430 [CSR_MHPMCOUNTER31] = { "mhpmcounter31", mctr, read_hpmcounter,
5431 write_mhpmcounter },
5432
5433 [CSR_MCOUNTINHIBIT] = { "mcountinhibit", any, read_mcountinhibit,
5434 write_mcountinhibit,
5435 .min_priv_ver = PRIV_VERSION_1_11_0 },
5436
5437 [CSR_MCYCLECFG] = { "mcyclecfg", smcntrpmf, read_mcyclecfg,
5438 write_mcyclecfg,
5439 .min_priv_ver = PRIV_VERSION_1_12_0 },
5440 [CSR_MINSTRETCFG] = { "minstretcfg", smcntrpmf, read_minstretcfg,
5441 write_minstretcfg,
5442 .min_priv_ver = PRIV_VERSION_1_12_0 },
5443
5444 [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent,
5445 write_mhpmevent },
5446 [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent,
5447 write_mhpmevent },
5448 [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_mhpmevent,
5449 write_mhpmevent },
5450 [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_mhpmevent,
5451 write_mhpmevent },
5452 [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_mhpmevent,
5453 write_mhpmevent },
5454 [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_mhpmevent,
5455 write_mhpmevent },
5456 [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_mhpmevent,
5457 write_mhpmevent },
5458 [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_mhpmevent,
5459 write_mhpmevent },
5460 [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_mhpmevent,
5461 write_mhpmevent },
5462 [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_mhpmevent,
5463 write_mhpmevent },
5464 [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_mhpmevent,
5465 write_mhpmevent },
5466 [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_mhpmevent,
5467 write_mhpmevent },
5468 [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_mhpmevent,
5469 write_mhpmevent },
5470 [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_mhpmevent,
5471 write_mhpmevent },
5472 [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_mhpmevent,
5473 write_mhpmevent },
5474 [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_mhpmevent,
5475 write_mhpmevent },
5476 [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_mhpmevent,
5477 write_mhpmevent },
5478 [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_mhpmevent,
5479 write_mhpmevent },
5480 [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_mhpmevent,
5481 write_mhpmevent },
5482 [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_mhpmevent,
5483 write_mhpmevent },
5484 [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_mhpmevent,
5485 write_mhpmevent },
5486 [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_mhpmevent,
5487 write_mhpmevent },
5488 [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_mhpmevent,
5489 write_mhpmevent },
5490 [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_mhpmevent,
5491 write_mhpmevent },
5492 [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_mhpmevent,
5493 write_mhpmevent },
5494 [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_mhpmevent,
5495 write_mhpmevent },
5496 [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_mhpmevent,
5497 write_mhpmevent },
5498 [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_mhpmevent,
5499 write_mhpmevent },
5500 [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent,
5501 write_mhpmevent },
5502
5503 [CSR_MCYCLECFGH] = { "mcyclecfgh", smcntrpmf_32, read_mcyclecfgh,
5504 write_mcyclecfgh,
5505 .min_priv_ver = PRIV_VERSION_1_12_0 },
5506 [CSR_MINSTRETCFGH] = { "minstretcfgh", smcntrpmf_32, read_minstretcfgh,
5507 write_minstretcfgh,
5508 .min_priv_ver = PRIV_VERSION_1_12_0 },
5509
5510 [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf_32, read_mhpmeventh,
5511 write_mhpmeventh,
5512 .min_priv_ver = PRIV_VERSION_1_12_0 },
5513 [CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf_32, read_mhpmeventh,
5514 write_mhpmeventh,
5515 .min_priv_ver = PRIV_VERSION_1_12_0 },
5516 [CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf_32, read_mhpmeventh,
5517 write_mhpmeventh,
5518 .min_priv_ver = PRIV_VERSION_1_12_0 },
5519 [CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf_32, read_mhpmeventh,
5520 write_mhpmeventh,
5521 .min_priv_ver = PRIV_VERSION_1_12_0 },
5522 [CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf_32, read_mhpmeventh,
5523 write_mhpmeventh,
5524 .min_priv_ver = PRIV_VERSION_1_12_0 },
5525 [CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf_32, read_mhpmeventh,
5526 write_mhpmeventh,
5527 .min_priv_ver = PRIV_VERSION_1_12_0 },
5528 [CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf_32, read_mhpmeventh,
5529 write_mhpmeventh,
5530 .min_priv_ver = PRIV_VERSION_1_12_0 },
5531 [CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf_32, read_mhpmeventh,
5532 write_mhpmeventh,
5533 .min_priv_ver = PRIV_VERSION_1_12_0 },
5534 [CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf_32, read_mhpmeventh,
5535 write_mhpmeventh,
5536 .min_priv_ver = PRIV_VERSION_1_12_0 },
5537 [CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf_32, read_mhpmeventh,
5538 write_mhpmeventh,
5539 .min_priv_ver = PRIV_VERSION_1_12_0 },
5540 [CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf_32, read_mhpmeventh,
5541 write_mhpmeventh,
5542 .min_priv_ver = PRIV_VERSION_1_12_0 },
5543 [CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf_32, read_mhpmeventh,
5544 write_mhpmeventh,
5545 .min_priv_ver = PRIV_VERSION_1_12_0 },
5546 [CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf_32, read_mhpmeventh,
5547 write_mhpmeventh,
5548 .min_priv_ver = PRIV_VERSION_1_12_0 },
5549 [CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf_32, read_mhpmeventh,
5550 write_mhpmeventh,
5551 .min_priv_ver = PRIV_VERSION_1_12_0 },
5552 [CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf_32, read_mhpmeventh,
5553 write_mhpmeventh,
5554 .min_priv_ver = PRIV_VERSION_1_12_0 },
5555 [CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf_32, read_mhpmeventh,
5556 write_mhpmeventh,
5557 .min_priv_ver = PRIV_VERSION_1_12_0 },
5558 [CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf_32, read_mhpmeventh,
5559 write_mhpmeventh,
5560 .min_priv_ver = PRIV_VERSION_1_12_0 },
5561 [CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf_32, read_mhpmeventh,
5562 write_mhpmeventh,
5563 .min_priv_ver = PRIV_VERSION_1_12_0 },
5564 [CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf_32, read_mhpmeventh,
5565 write_mhpmeventh,
5566 .min_priv_ver = PRIV_VERSION_1_12_0 },
5567 [CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf_32, read_mhpmeventh,
5568 write_mhpmeventh,
5569 .min_priv_ver = PRIV_VERSION_1_12_0 },
5570 [CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf_32, read_mhpmeventh,
5571 write_mhpmeventh,
5572 .min_priv_ver = PRIV_VERSION_1_12_0 },
5573 [CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf_32, read_mhpmeventh,
5574 write_mhpmeventh,
5575 .min_priv_ver = PRIV_VERSION_1_12_0 },
5576 [CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf_32, read_mhpmeventh,
5577 write_mhpmeventh,
5578 .min_priv_ver = PRIV_VERSION_1_12_0 },
5579 [CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf_32, read_mhpmeventh,
5580 write_mhpmeventh,
5581 .min_priv_ver = PRIV_VERSION_1_12_0 },
5582 [CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf_32, read_mhpmeventh,
5583 write_mhpmeventh,
5584 .min_priv_ver = PRIV_VERSION_1_12_0 },
5585 [CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf_32, read_mhpmeventh,
5586 write_mhpmeventh,
5587 .min_priv_ver = PRIV_VERSION_1_12_0 },
5588 [CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf_32, read_mhpmeventh,
5589 write_mhpmeventh,
5590 .min_priv_ver = PRIV_VERSION_1_12_0 },
5591 [CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf_32, read_mhpmeventh,
5592 write_mhpmeventh,
5593 .min_priv_ver = PRIV_VERSION_1_12_0 },
5594 [CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf_32, read_mhpmeventh,
5595 write_mhpmeventh,
5596 .min_priv_ver = PRIV_VERSION_1_12_0 },
5597
5598 [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_hpmcounterh },
5599 [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_hpmcounterh },
5600 [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_hpmcounterh },
5601 [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_hpmcounterh },
5602 [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_hpmcounterh },
5603 [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_hpmcounterh },
5604 [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_hpmcounterh },
5605 [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_hpmcounterh },
5606 [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_hpmcounterh },
5607 [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_hpmcounterh },
5608 [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_hpmcounterh },
5609 [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_hpmcounterh },
5610 [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_hpmcounterh },
5611 [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_hpmcounterh },
5612 [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_hpmcounterh },
5613 [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_hpmcounterh },
5614 [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_hpmcounterh },
5615 [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_hpmcounterh },
5616 [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_hpmcounterh },
5617 [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_hpmcounterh },
5618 [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_hpmcounterh },
5619 [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_hpmcounterh },
5620 [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_hpmcounterh },
5621 [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_hpmcounterh },
5622 [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_hpmcounterh },
5623 [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_hpmcounterh },
5624 [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_hpmcounterh },
5625 [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_hpmcounterh },
5626 [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_hpmcounterh },
5627
5628 [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", mctr32, read_hpmcounterh,
5629 write_mhpmcounterh },
5630 [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", mctr32, read_hpmcounterh,
5631 write_mhpmcounterh },
5632 [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", mctr32, read_hpmcounterh,
5633 write_mhpmcounterh },
5634 [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", mctr32, read_hpmcounterh,
5635 write_mhpmcounterh },
5636 [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", mctr32, read_hpmcounterh,
5637 write_mhpmcounterh },
5638 [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", mctr32, read_hpmcounterh,
5639 write_mhpmcounterh },
5640 [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", mctr32, read_hpmcounterh,
5641 write_mhpmcounterh },
5642 [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", mctr32, read_hpmcounterh,
5643 write_mhpmcounterh },
5644 [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", mctr32, read_hpmcounterh,
5645 write_mhpmcounterh },
5646 [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", mctr32, read_hpmcounterh,
5647 write_mhpmcounterh },
5648 [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", mctr32, read_hpmcounterh,
5649 write_mhpmcounterh },
5650 [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", mctr32, read_hpmcounterh,
5651 write_mhpmcounterh },
5652 [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", mctr32, read_hpmcounterh,
5653 write_mhpmcounterh },
5654 [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", mctr32, read_hpmcounterh,
5655 write_mhpmcounterh },
5656 [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", mctr32, read_hpmcounterh,
5657 write_mhpmcounterh },
5658 [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", mctr32, read_hpmcounterh,
5659 write_mhpmcounterh },
5660 [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", mctr32, read_hpmcounterh,
5661 write_mhpmcounterh },
5662 [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", mctr32, read_hpmcounterh,
5663 write_mhpmcounterh },
5664 [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", mctr32, read_hpmcounterh,
5665 write_mhpmcounterh },
5666 [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", mctr32, read_hpmcounterh,
5667 write_mhpmcounterh },
5668 [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", mctr32, read_hpmcounterh,
5669 write_mhpmcounterh },
5670 [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", mctr32, read_hpmcounterh,
5671 write_mhpmcounterh },
5672 [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", mctr32, read_hpmcounterh,
5673 write_mhpmcounterh },
5674 [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", mctr32, read_hpmcounterh,
5675 write_mhpmcounterh },
5676 [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", mctr32, read_hpmcounterh,
5677 write_mhpmcounterh },
5678 [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", mctr32, read_hpmcounterh,
5679 write_mhpmcounterh },
5680 [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", mctr32, read_hpmcounterh,
5681 write_mhpmcounterh },
5682 [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", mctr32, read_hpmcounterh,
5683 write_mhpmcounterh },
5684 [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", mctr32, read_hpmcounterh,
5685 write_mhpmcounterh },
5686 [CSR_SCOUNTOVF] = { "scountovf", sscofpmf, read_scountovf,
5687 .min_priv_ver = PRIV_VERSION_1_12_0 },
5688
5689 #endif /* !CONFIG_USER_ONLY */
5690 };
5691