1 /*
2 * RISC-V Control and Status Registers.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/timer.h"
23 #include "cpu.h"
24 #include "tcg/tcg-cpu.h"
25 #include "pmu.h"
26 #include "time_helper.h"
27 #include "exec/cputlb.h"
28 #include "exec/tb-flush.h"
29 #include "exec/icount.h"
30 #include "accel/tcg/getpc.h"
31 #include "qemu/guest-random.h"
32 #include "qapi/error.h"
33 #include "tcg/insn-start-words.h"
34 #include "internals.h"
35 #include <stdbool.h>
36
37 /* CSR function table public API */
riscv_get_csr_ops(int csrno,riscv_csr_operations * ops)38 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
39 {
40 *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)];
41 }
42
riscv_set_csr_ops(int csrno,const riscv_csr_operations * ops)43 void riscv_set_csr_ops(int csrno, const riscv_csr_operations *ops)
44 {
45 csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops;
46 }
47
48 /* Predicates */
49 #if !defined(CONFIG_USER_ONLY)
smstateen_acc_ok(CPURISCVState * env,int index,uint64_t bit)50 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit)
51 {
52 bool virt = env->virt_enabled;
53
54 if (env->priv == PRV_M || !riscv_cpu_cfg(env)->ext_smstateen) {
55 return RISCV_EXCP_NONE;
56 }
57
58 if (!(env->mstateen[index] & bit)) {
59 return RISCV_EXCP_ILLEGAL_INST;
60 }
61
62 if (virt) {
63 if (!(env->hstateen[index] & bit)) {
64 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
65 }
66
67 if (env->priv == PRV_U && !(env->sstateen[index] & bit)) {
68 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
69 }
70 }
71
72 if (env->priv == PRV_U && riscv_has_ext(env, RVS)) {
73 if (!(env->sstateen[index] & bit)) {
74 return RISCV_EXCP_ILLEGAL_INST;
75 }
76 }
77
78 return RISCV_EXCP_NONE;
79 }
80 #endif
81
fs(CPURISCVState * env,int csrno)82 static RISCVException fs(CPURISCVState *env, int csrno)
83 {
84 #if !defined(CONFIG_USER_ONLY)
85 if (!env->debugger && !riscv_cpu_fp_enabled(env) &&
86 !riscv_cpu_cfg(env)->ext_zfinx) {
87 return RISCV_EXCP_ILLEGAL_INST;
88 }
89
90 if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
91 return smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR);
92 }
93 #endif
94 return RISCV_EXCP_NONE;
95 }
96
vs(CPURISCVState * env,int csrno)97 static RISCVException vs(CPURISCVState *env, int csrno)
98 {
99 if (riscv_cpu_cfg(env)->ext_zve32x) {
100 #if !defined(CONFIG_USER_ONLY)
101 if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
102 return RISCV_EXCP_ILLEGAL_INST;
103 }
104 #endif
105 return RISCV_EXCP_NONE;
106 }
107 return RISCV_EXCP_ILLEGAL_INST;
108 }
109
ctr(CPURISCVState * env,int csrno)110 static RISCVException ctr(CPURISCVState *env, int csrno)
111 {
112 #if !defined(CONFIG_USER_ONLY)
113 RISCVCPU *cpu = env_archcpu(env);
114 int ctr_index;
115 target_ulong ctr_mask;
116 int base_csrno = CSR_CYCLE;
117 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
118
119 if (rv32 && csrno >= CSR_CYCLEH) {
120 /* Offset for RV32 hpmcounternh counters */
121 base_csrno += 0x80;
122 }
123 ctr_index = csrno - base_csrno;
124 ctr_mask = BIT(ctr_index);
125
126 if ((csrno >= CSR_CYCLE && csrno <= CSR_INSTRET) ||
127 (csrno >= CSR_CYCLEH && csrno <= CSR_INSTRETH)) {
128 if (!riscv_cpu_cfg(env)->ext_zicntr) {
129 return RISCV_EXCP_ILLEGAL_INST;
130 }
131
132 goto skip_ext_pmu_check;
133 }
134
135 if (!(cpu->pmu_avail_ctrs & ctr_mask)) {
136 /* No counter is enabled in PMU or the counter is out of range */
137 return RISCV_EXCP_ILLEGAL_INST;
138 }
139
140 skip_ext_pmu_check:
141
142 if (env->debugger) {
143 return RISCV_EXCP_NONE;
144 }
145
146 if (env->priv < PRV_M && !get_field(env->mcounteren, ctr_mask)) {
147 return RISCV_EXCP_ILLEGAL_INST;
148 }
149
150 if (env->virt_enabled) {
151 if (!get_field(env->hcounteren, ctr_mask) ||
152 (env->priv == PRV_U && !get_field(env->scounteren, ctr_mask))) {
153 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
154 }
155 }
156
157 if (riscv_has_ext(env, RVS) && env->priv == PRV_U &&
158 !get_field(env->scounteren, ctr_mask)) {
159 return RISCV_EXCP_ILLEGAL_INST;
160 }
161
162 #endif
163 return RISCV_EXCP_NONE;
164 }
165
ctr32(CPURISCVState * env,int csrno)166 static RISCVException ctr32(CPURISCVState *env, int csrno)
167 {
168 if (riscv_cpu_mxl(env) != MXL_RV32) {
169 return RISCV_EXCP_ILLEGAL_INST;
170 }
171
172 return ctr(env, csrno);
173 }
174
zcmt(CPURISCVState * env,int csrno)175 static RISCVException zcmt(CPURISCVState *env, int csrno)
176 {
177 if (!riscv_cpu_cfg(env)->ext_zcmt) {
178 return RISCV_EXCP_ILLEGAL_INST;
179 }
180
181 #if !defined(CONFIG_USER_ONLY)
182 RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_JVT);
183 if (ret != RISCV_EXCP_NONE) {
184 return ret;
185 }
186 #endif
187
188 return RISCV_EXCP_NONE;
189 }
190
cfi_ss(CPURISCVState * env,int csrno)191 static RISCVException cfi_ss(CPURISCVState *env, int csrno)
192 {
193 if (!env_archcpu(env)->cfg.ext_zicfiss) {
194 return RISCV_EXCP_ILLEGAL_INST;
195 }
196
197 /* If ext implemented, M-mode always have access to SSP CSR */
198 if (env->priv == PRV_M) {
199 return RISCV_EXCP_NONE;
200 }
201
202 /* if bcfi not active for current env, access to csr is illegal */
203 if (!cpu_get_bcfien(env)) {
204 #if !defined(CONFIG_USER_ONLY)
205 if (env->debugger) {
206 return RISCV_EXCP_NONE;
207 } else if (env->virt_enabled) {
208 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
209 }
210 #endif
211 return RISCV_EXCP_ILLEGAL_INST;
212 }
213
214 return RISCV_EXCP_NONE;
215 }
216
217 #if !defined(CONFIG_USER_ONLY)
mctr(CPURISCVState * env,int csrno)218 static RISCVException mctr(CPURISCVState *env, int csrno)
219 {
220 RISCVCPU *cpu = env_archcpu(env);
221 uint32_t pmu_avail_ctrs = cpu->pmu_avail_ctrs;
222 int ctr_index;
223 int base_csrno = CSR_MHPMCOUNTER3;
224
225 if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) {
226 /* Offset for RV32 mhpmcounternh counters */
227 csrno -= 0x80;
228 }
229
230 g_assert(csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31);
231
232 ctr_index = csrno - base_csrno;
233 if ((BIT(ctr_index) & pmu_avail_ctrs >> 3) == 0) {
234 /* The PMU is not enabled or counter is out of range */
235 return RISCV_EXCP_ILLEGAL_INST;
236 }
237
238 return RISCV_EXCP_NONE;
239 }
240
mctr32(CPURISCVState * env,int csrno)241 static RISCVException mctr32(CPURISCVState *env, int csrno)
242 {
243 if (riscv_cpu_mxl(env) != MXL_RV32) {
244 return RISCV_EXCP_ILLEGAL_INST;
245 }
246
247 return mctr(env, csrno);
248 }
249
sscofpmf(CPURISCVState * env,int csrno)250 static RISCVException sscofpmf(CPURISCVState *env, int csrno)
251 {
252 if (!riscv_cpu_cfg(env)->ext_sscofpmf) {
253 return RISCV_EXCP_ILLEGAL_INST;
254 }
255
256 return RISCV_EXCP_NONE;
257 }
258
sscofpmf_32(CPURISCVState * env,int csrno)259 static RISCVException sscofpmf_32(CPURISCVState *env, int csrno)
260 {
261 if (riscv_cpu_mxl(env) != MXL_RV32) {
262 return RISCV_EXCP_ILLEGAL_INST;
263 }
264
265 return sscofpmf(env, csrno);
266 }
267
smcntrpmf(CPURISCVState * env,int csrno)268 static RISCVException smcntrpmf(CPURISCVState *env, int csrno)
269 {
270 if (!riscv_cpu_cfg(env)->ext_smcntrpmf) {
271 return RISCV_EXCP_ILLEGAL_INST;
272 }
273
274 return RISCV_EXCP_NONE;
275 }
276
smcntrpmf_32(CPURISCVState * env,int csrno)277 static RISCVException smcntrpmf_32(CPURISCVState *env, int csrno)
278 {
279 if (riscv_cpu_mxl(env) != MXL_RV32) {
280 return RISCV_EXCP_ILLEGAL_INST;
281 }
282
283 return smcntrpmf(env, csrno);
284 }
285
any(CPURISCVState * env,int csrno)286 static RISCVException any(CPURISCVState *env, int csrno)
287 {
288 return RISCV_EXCP_NONE;
289 }
290
any32(CPURISCVState * env,int csrno)291 static RISCVException any32(CPURISCVState *env, int csrno)
292 {
293 if (riscv_cpu_mxl(env) != MXL_RV32) {
294 return RISCV_EXCP_ILLEGAL_INST;
295 }
296
297 return any(env, csrno);
298
299 }
300
aia_any(CPURISCVState * env,int csrno)301 static RISCVException aia_any(CPURISCVState *env, int csrno)
302 {
303 if (!riscv_cpu_cfg(env)->ext_smaia) {
304 return RISCV_EXCP_ILLEGAL_INST;
305 }
306
307 return any(env, csrno);
308 }
309
aia_any32(CPURISCVState * env,int csrno)310 static RISCVException aia_any32(CPURISCVState *env, int csrno)
311 {
312 if (!riscv_cpu_cfg(env)->ext_smaia) {
313 return RISCV_EXCP_ILLEGAL_INST;
314 }
315
316 return any32(env, csrno);
317 }
318
csrind_any(CPURISCVState * env,int csrno)319 static RISCVException csrind_any(CPURISCVState *env, int csrno)
320 {
321 if (!riscv_cpu_cfg(env)->ext_smcsrind) {
322 return RISCV_EXCP_ILLEGAL_INST;
323 }
324
325 return RISCV_EXCP_NONE;
326 }
327
csrind_or_aia_any(CPURISCVState * env,int csrno)328 static RISCVException csrind_or_aia_any(CPURISCVState *env, int csrno)
329 {
330 if (!riscv_cpu_cfg(env)->ext_smaia && !riscv_cpu_cfg(env)->ext_smcsrind) {
331 return RISCV_EXCP_ILLEGAL_INST;
332 }
333
334 return any(env, csrno);
335 }
336
smode(CPURISCVState * env,int csrno)337 static RISCVException smode(CPURISCVState *env, int csrno)
338 {
339 if (riscv_has_ext(env, RVS)) {
340 return RISCV_EXCP_NONE;
341 }
342
343 return RISCV_EXCP_ILLEGAL_INST;
344 }
345
smode32(CPURISCVState * env,int csrno)346 static RISCVException smode32(CPURISCVState *env, int csrno)
347 {
348 if (riscv_cpu_mxl(env) != MXL_RV32) {
349 return RISCV_EXCP_ILLEGAL_INST;
350 }
351
352 return smode(env, csrno);
353 }
354
aia_smode(CPURISCVState * env,int csrno)355 static RISCVException aia_smode(CPURISCVState *env, int csrno)
356 {
357 int ret;
358
359 if (!riscv_cpu_cfg(env)->ext_ssaia) {
360 return RISCV_EXCP_ILLEGAL_INST;
361 }
362
363 if (csrno == CSR_STOPEI) {
364 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_IMSIC);
365 } else {
366 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
367 }
368
369 if (ret != RISCV_EXCP_NONE) {
370 return ret;
371 }
372
373 return smode(env, csrno);
374 }
375
aia_smode32(CPURISCVState * env,int csrno)376 static RISCVException aia_smode32(CPURISCVState *env, int csrno)
377 {
378 int ret;
379 int csr_priv = get_field(csrno, 0x300);
380
381 if (csr_priv == PRV_M && !riscv_cpu_cfg(env)->ext_smaia) {
382 return RISCV_EXCP_ILLEGAL_INST;
383 } else if (!riscv_cpu_cfg(env)->ext_ssaia) {
384 return RISCV_EXCP_ILLEGAL_INST;
385 }
386
387 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
388 if (ret != RISCV_EXCP_NONE) {
389 return ret;
390 }
391
392 return smode32(env, csrno);
393 }
394
scountinhibit_pred(CPURISCVState * env,int csrno)395 static RISCVException scountinhibit_pred(CPURISCVState *env, int csrno)
396 {
397 RISCVCPU *cpu = env_archcpu(env);
398
399 if (!cpu->cfg.ext_ssccfg || !cpu->cfg.ext_smcdeleg) {
400 return RISCV_EXCP_ILLEGAL_INST;
401 }
402
403 if (env->virt_enabled) {
404 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
405 }
406
407 return smode(env, csrno);
408 }
409
csrind_extensions_present(CPURISCVState * env)410 static bool csrind_extensions_present(CPURISCVState *env)
411 {
412 return riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind;
413 }
414
aia_extensions_present(CPURISCVState * env)415 static bool aia_extensions_present(CPURISCVState *env)
416 {
417 return riscv_cpu_cfg(env)->ext_smaia || riscv_cpu_cfg(env)->ext_ssaia;
418 }
419
csrind_or_aia_extensions_present(CPURISCVState * env)420 static bool csrind_or_aia_extensions_present(CPURISCVState *env)
421 {
422 return csrind_extensions_present(env) || aia_extensions_present(env);
423 }
424
csrind_smode(CPURISCVState * env,int csrno)425 static RISCVException csrind_smode(CPURISCVState *env, int csrno)
426 {
427 if (!csrind_extensions_present(env)) {
428 return RISCV_EXCP_ILLEGAL_INST;
429 }
430
431 return smode(env, csrno);
432 }
433
csrind_or_aia_smode(CPURISCVState * env,int csrno)434 static RISCVException csrind_or_aia_smode(CPURISCVState *env, int csrno)
435 {
436 if (!csrind_or_aia_extensions_present(env)) {
437 return RISCV_EXCP_ILLEGAL_INST;
438 }
439
440 return smode(env, csrno);
441 }
442
hmode(CPURISCVState * env,int csrno)443 static RISCVException hmode(CPURISCVState *env, int csrno)
444 {
445 if (riscv_has_ext(env, RVH)) {
446 return RISCV_EXCP_NONE;
447 }
448
449 return RISCV_EXCP_ILLEGAL_INST;
450 }
451
hmode32(CPURISCVState * env,int csrno)452 static RISCVException hmode32(CPURISCVState *env, int csrno)
453 {
454 if (riscv_cpu_mxl(env) != MXL_RV32) {
455 return RISCV_EXCP_ILLEGAL_INST;
456 }
457
458 return hmode(env, csrno);
459
460 }
461
csrind_hmode(CPURISCVState * env,int csrno)462 static RISCVException csrind_hmode(CPURISCVState *env, int csrno)
463 {
464 if (!csrind_extensions_present(env)) {
465 return RISCV_EXCP_ILLEGAL_INST;
466 }
467
468 return hmode(env, csrno);
469 }
470
csrind_or_aia_hmode(CPURISCVState * env,int csrno)471 static RISCVException csrind_or_aia_hmode(CPURISCVState *env, int csrno)
472 {
473 if (!csrind_or_aia_extensions_present(env)) {
474 return RISCV_EXCP_ILLEGAL_INST;
475 }
476
477 return hmode(env, csrno);
478 }
479
umode(CPURISCVState * env,int csrno)480 static RISCVException umode(CPURISCVState *env, int csrno)
481 {
482 if (riscv_has_ext(env, RVU)) {
483 return RISCV_EXCP_NONE;
484 }
485
486 return RISCV_EXCP_ILLEGAL_INST;
487 }
488
umode32(CPURISCVState * env,int csrno)489 static RISCVException umode32(CPURISCVState *env, int csrno)
490 {
491 if (riscv_cpu_mxl(env) != MXL_RV32) {
492 return RISCV_EXCP_ILLEGAL_INST;
493 }
494
495 return umode(env, csrno);
496 }
497
mstateen(CPURISCVState * env,int csrno)498 static RISCVException mstateen(CPURISCVState *env, int csrno)
499 {
500 if (!riscv_cpu_cfg(env)->ext_smstateen) {
501 return RISCV_EXCP_ILLEGAL_INST;
502 }
503
504 return any(env, csrno);
505 }
506
hstateen_pred(CPURISCVState * env,int csrno,int base)507 static RISCVException hstateen_pred(CPURISCVState *env, int csrno, int base)
508 {
509 if (!riscv_cpu_cfg(env)->ext_smstateen) {
510 return RISCV_EXCP_ILLEGAL_INST;
511 }
512
513 RISCVException ret = hmode(env, csrno);
514 if (ret != RISCV_EXCP_NONE) {
515 return ret;
516 }
517
518 if (env->debugger) {
519 return RISCV_EXCP_NONE;
520 }
521
522 if (env->priv < PRV_M) {
523 if (!(env->mstateen[csrno - base] & SMSTATEEN_STATEEN)) {
524 return RISCV_EXCP_ILLEGAL_INST;
525 }
526 }
527
528 return RISCV_EXCP_NONE;
529 }
530
hstateen(CPURISCVState * env,int csrno)531 static RISCVException hstateen(CPURISCVState *env, int csrno)
532 {
533 return hstateen_pred(env, csrno, CSR_HSTATEEN0);
534 }
535
hstateenh(CPURISCVState * env,int csrno)536 static RISCVException hstateenh(CPURISCVState *env, int csrno)
537 {
538 return hstateen_pred(env, csrno, CSR_HSTATEEN0H);
539 }
540
sstateen(CPURISCVState * env,int csrno)541 static RISCVException sstateen(CPURISCVState *env, int csrno)
542 {
543 bool virt = env->virt_enabled;
544 int index = csrno - CSR_SSTATEEN0;
545
546 if (!riscv_cpu_cfg(env)->ext_smstateen) {
547 return RISCV_EXCP_ILLEGAL_INST;
548 }
549
550 RISCVException ret = smode(env, csrno);
551 if (ret != RISCV_EXCP_NONE) {
552 return ret;
553 }
554
555 if (env->debugger) {
556 return RISCV_EXCP_NONE;
557 }
558
559 if (env->priv < PRV_M) {
560 if (!(env->mstateen[index] & SMSTATEEN_STATEEN)) {
561 return RISCV_EXCP_ILLEGAL_INST;
562 }
563
564 if (virt) {
565 if (!(env->hstateen[index] & SMSTATEEN_STATEEN)) {
566 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
567 }
568 }
569 }
570
571 return RISCV_EXCP_NONE;
572 }
573
sstc(CPURISCVState * env,int csrno)574 static RISCVException sstc(CPURISCVState *env, int csrno)
575 {
576 bool hmode_check = false;
577
578 if (!riscv_cpu_cfg(env)->ext_sstc || !env->rdtime_fn) {
579 return RISCV_EXCP_ILLEGAL_INST;
580 }
581
582 if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) {
583 hmode_check = true;
584 }
585
586 RISCVException ret = hmode_check ? hmode(env, csrno) : smode(env, csrno);
587 if (ret != RISCV_EXCP_NONE) {
588 return ret;
589 }
590
591 if (env->debugger) {
592 return RISCV_EXCP_NONE;
593 }
594
595 if (env->priv == PRV_M) {
596 return RISCV_EXCP_NONE;
597 }
598
599 /*
600 * No need of separate function for rv32 as menvcfg stores both menvcfg
601 * menvcfgh for RV32.
602 */
603 if (!(get_field(env->mcounteren, COUNTEREN_TM) &&
604 get_field(env->menvcfg, MENVCFG_STCE))) {
605 return RISCV_EXCP_ILLEGAL_INST;
606 }
607
608 if (env->virt_enabled) {
609 if (!(get_field(env->hcounteren, COUNTEREN_TM) &&
610 get_field(env->henvcfg, HENVCFG_STCE))) {
611 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
612 }
613 }
614
615 return RISCV_EXCP_NONE;
616 }
617
sstc_32(CPURISCVState * env,int csrno)618 static RISCVException sstc_32(CPURISCVState *env, int csrno)
619 {
620 if (riscv_cpu_mxl(env) != MXL_RV32) {
621 return RISCV_EXCP_ILLEGAL_INST;
622 }
623
624 return sstc(env, csrno);
625 }
626
satp(CPURISCVState * env,int csrno)627 static RISCVException satp(CPURISCVState *env, int csrno)
628 {
629 if (env->priv == PRV_S && !env->virt_enabled &&
630 get_field(env->mstatus, MSTATUS_TVM)) {
631 return RISCV_EXCP_ILLEGAL_INST;
632 }
633 if (env->priv == PRV_S && env->virt_enabled &&
634 get_field(env->hstatus, HSTATUS_VTVM)) {
635 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
636 }
637
638 return smode(env, csrno);
639 }
640
hgatp(CPURISCVState * env,int csrno)641 static RISCVException hgatp(CPURISCVState *env, int csrno)
642 {
643 if (env->priv == PRV_S && !env->virt_enabled &&
644 get_field(env->mstatus, MSTATUS_TVM)) {
645 return RISCV_EXCP_ILLEGAL_INST;
646 }
647
648 return hmode(env, csrno);
649 }
650
651 /*
652 * M-mode:
653 * Without ext_smctr raise illegal inst excep.
654 * Otherwise everything is accessible to m-mode.
655 *
656 * S-mode:
657 * Without ext_ssctr or mstateen.ctr raise illegal inst excep.
658 * Otherwise everything other than mctrctl is accessible.
659 *
660 * VS-mode:
661 * Without ext_ssctr or mstateen.ctr raise illegal inst excep.
662 * Without hstateen.ctr raise virtual illegal inst excep.
663 * Otherwise allow sctrctl (vsctrctl), sctrstatus, 0x200-0x2ff entry range.
664 * Always raise illegal instruction exception for sctrdepth.
665 */
ctr_mmode(CPURISCVState * env,int csrno)666 static RISCVException ctr_mmode(CPURISCVState *env, int csrno)
667 {
668 /* Check if smctr-ext is present */
669 if (riscv_cpu_cfg(env)->ext_smctr) {
670 return RISCV_EXCP_NONE;
671 }
672
673 return RISCV_EXCP_ILLEGAL_INST;
674 }
675
ctr_smode(CPURISCVState * env,int csrno)676 static RISCVException ctr_smode(CPURISCVState *env, int csrno)
677 {
678 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
679
680 if (!cfg->ext_smctr && !cfg->ext_ssctr) {
681 return RISCV_EXCP_ILLEGAL_INST;
682 }
683
684 RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_CTR);
685 if (ret == RISCV_EXCP_NONE && csrno == CSR_SCTRDEPTH &&
686 env->virt_enabled) {
687 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
688 }
689
690 return ret;
691 }
692
aia_hmode(CPURISCVState * env,int csrno)693 static RISCVException aia_hmode(CPURISCVState *env, int csrno)
694 {
695 int ret;
696
697 if (!riscv_cpu_cfg(env)->ext_ssaia) {
698 return RISCV_EXCP_ILLEGAL_INST;
699 }
700
701 if (csrno == CSR_VSTOPEI) {
702 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_IMSIC);
703 } else {
704 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
705 }
706
707 if (ret != RISCV_EXCP_NONE) {
708 return ret;
709 }
710
711 return hmode(env, csrno);
712 }
713
aia_hmode32(CPURISCVState * env,int csrno)714 static RISCVException aia_hmode32(CPURISCVState *env, int csrno)
715 {
716 int ret;
717
718 if (!riscv_cpu_cfg(env)->ext_ssaia) {
719 return RISCV_EXCP_ILLEGAL_INST;
720 }
721
722 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
723 if (ret != RISCV_EXCP_NONE) {
724 return ret;
725 }
726
727 if (!riscv_cpu_cfg(env)->ext_ssaia) {
728 return RISCV_EXCP_ILLEGAL_INST;
729 }
730
731 return hmode32(env, csrno);
732 }
733
dbltrp_hmode(CPURISCVState * env,int csrno)734 static RISCVException dbltrp_hmode(CPURISCVState *env, int csrno)
735 {
736 if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
737 return RISCV_EXCP_NONE;
738 }
739
740 return hmode(env, csrno);
741 }
742
pmp(CPURISCVState * env,int csrno)743 static RISCVException pmp(CPURISCVState *env, int csrno)
744 {
745 if (riscv_cpu_cfg(env)->pmp) {
746 int max_pmpcfg = (env->priv_ver >= PRIV_VERSION_1_12_0) ?
747 + CSR_PMPCFG15 : CSR_PMPCFG3;
748
749 if (csrno <= max_pmpcfg) {
750 uint32_t reg_index = csrno - CSR_PMPCFG0;
751
752 /* TODO: RV128 restriction check */
753 if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) {
754 return RISCV_EXCP_ILLEGAL_INST;
755 }
756 }
757
758 return RISCV_EXCP_NONE;
759 }
760
761 return RISCV_EXCP_ILLEGAL_INST;
762 }
763
have_mseccfg(CPURISCVState * env,int csrno)764 static RISCVException have_mseccfg(CPURISCVState *env, int csrno)
765 {
766 if (riscv_cpu_cfg(env)->ext_smepmp) {
767 return RISCV_EXCP_NONE;
768 }
769 if (riscv_cpu_cfg(env)->ext_zkr) {
770 return RISCV_EXCP_NONE;
771 }
772 if (riscv_cpu_cfg(env)->ext_smmpm) {
773 return RISCV_EXCP_NONE;
774 }
775
776 return RISCV_EXCP_ILLEGAL_INST;
777 }
778
debug(CPURISCVState * env,int csrno)779 static RISCVException debug(CPURISCVState *env, int csrno)
780 {
781 if (riscv_cpu_cfg(env)->debug) {
782 return RISCV_EXCP_NONE;
783 }
784
785 return RISCV_EXCP_ILLEGAL_INST;
786 }
787
rnmi(CPURISCVState * env,int csrno)788 static RISCVException rnmi(CPURISCVState *env, int csrno)
789 {
790 RISCVCPU *cpu = env_archcpu(env);
791
792 if (cpu->cfg.ext_smrnmi) {
793 return RISCV_EXCP_NONE;
794 }
795
796 return RISCV_EXCP_ILLEGAL_INST;
797 }
798 #endif
799
seed(CPURISCVState * env,int csrno)800 static RISCVException seed(CPURISCVState *env, int csrno)
801 {
802 if (!riscv_cpu_cfg(env)->ext_zkr) {
803 return RISCV_EXCP_ILLEGAL_INST;
804 }
805
806 #if !defined(CONFIG_USER_ONLY)
807 if (env->debugger) {
808 return RISCV_EXCP_NONE;
809 }
810
811 /*
812 * With a CSR read-write instruction:
813 * 1) The seed CSR is always available in machine mode as normal.
814 * 2) Attempted access to seed from virtual modes VS and VU always raises
815 * an exception(virtual instruction exception only if mseccfg.sseed=1).
816 * 3) Without the corresponding access control bit set to 1, any attempted
817 * access to seed from U, S or HS modes will raise an illegal instruction
818 * exception.
819 */
820 if (env->priv == PRV_M) {
821 return RISCV_EXCP_NONE;
822 } else if (env->virt_enabled) {
823 if (env->mseccfg & MSECCFG_SSEED) {
824 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
825 } else {
826 return RISCV_EXCP_ILLEGAL_INST;
827 }
828 } else {
829 if (env->priv == PRV_S && (env->mseccfg & MSECCFG_SSEED)) {
830 return RISCV_EXCP_NONE;
831 } else if (env->priv == PRV_U && (env->mseccfg & MSECCFG_USEED)) {
832 return RISCV_EXCP_NONE;
833 } else {
834 return RISCV_EXCP_ILLEGAL_INST;
835 }
836 }
837 #else
838 return RISCV_EXCP_NONE;
839 #endif
840 }
841
842 /* zicfiss CSR_SSP read and write */
read_ssp(CPURISCVState * env,int csrno,target_ulong * val)843 static RISCVException read_ssp(CPURISCVState *env, int csrno,
844 target_ulong *val)
845 {
846 *val = env->ssp;
847 return RISCV_EXCP_NONE;
848 }
849
write_ssp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)850 static RISCVException write_ssp(CPURISCVState *env, int csrno,
851 target_ulong val, uintptr_t ra)
852 {
853 env->ssp = val;
854 return RISCV_EXCP_NONE;
855 }
856
857 /* User Floating-Point CSRs */
read_fflags(CPURISCVState * env,int csrno,target_ulong * val)858 static RISCVException read_fflags(CPURISCVState *env, int csrno,
859 target_ulong *val)
860 {
861 *val = riscv_cpu_get_fflags(env);
862 return RISCV_EXCP_NONE;
863 }
864
write_fflags(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)865 static RISCVException write_fflags(CPURISCVState *env, int csrno,
866 target_ulong val, uintptr_t ra)
867 {
868 #if !defined(CONFIG_USER_ONLY)
869 if (riscv_has_ext(env, RVF)) {
870 env->mstatus |= MSTATUS_FS;
871 }
872 #endif
873 riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT));
874 return RISCV_EXCP_NONE;
875 }
876
read_frm(CPURISCVState * env,int csrno,target_ulong * val)877 static RISCVException read_frm(CPURISCVState *env, int csrno,
878 target_ulong *val)
879 {
880 *val = env->frm;
881 return RISCV_EXCP_NONE;
882 }
883
write_frm(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)884 static RISCVException write_frm(CPURISCVState *env, int csrno,
885 target_ulong val, uintptr_t ra)
886 {
887 #if !defined(CONFIG_USER_ONLY)
888 if (riscv_has_ext(env, RVF)) {
889 env->mstatus |= MSTATUS_FS;
890 }
891 #endif
892 env->frm = val & (FSR_RD >> FSR_RD_SHIFT);
893 return RISCV_EXCP_NONE;
894 }
895
read_fcsr(CPURISCVState * env,int csrno,target_ulong * val)896 static RISCVException read_fcsr(CPURISCVState *env, int csrno,
897 target_ulong *val)
898 {
899 *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT)
900 | (env->frm << FSR_RD_SHIFT);
901 return RISCV_EXCP_NONE;
902 }
903
write_fcsr(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)904 static RISCVException write_fcsr(CPURISCVState *env, int csrno,
905 target_ulong val, uintptr_t ra)
906 {
907 #if !defined(CONFIG_USER_ONLY)
908 if (riscv_has_ext(env, RVF)) {
909 env->mstatus |= MSTATUS_FS;
910 }
911 #endif
912 env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
913 riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
914 return RISCV_EXCP_NONE;
915 }
916
read_vtype(CPURISCVState * env,int csrno,target_ulong * val)917 static RISCVException read_vtype(CPURISCVState *env, int csrno,
918 target_ulong *val)
919 {
920 uint64_t vill;
921 switch (env->xl) {
922 case MXL_RV32:
923 vill = (uint32_t)env->vill << 31;
924 break;
925 case MXL_RV64:
926 vill = (uint64_t)env->vill << 63;
927 break;
928 default:
929 g_assert_not_reached();
930 }
931 *val = (target_ulong)vill | env->vtype;
932 return RISCV_EXCP_NONE;
933 }
934
read_vl(CPURISCVState * env,int csrno,target_ulong * val)935 static RISCVException read_vl(CPURISCVState *env, int csrno,
936 target_ulong *val)
937 {
938 *val = env->vl;
939 return RISCV_EXCP_NONE;
940 }
941
read_vlenb(CPURISCVState * env,int csrno,target_ulong * val)942 static RISCVException read_vlenb(CPURISCVState *env, int csrno,
943 target_ulong *val)
944 {
945 *val = riscv_cpu_cfg(env)->vlenb;
946 return RISCV_EXCP_NONE;
947 }
948
read_vxrm(CPURISCVState * env,int csrno,target_ulong * val)949 static RISCVException read_vxrm(CPURISCVState *env, int csrno,
950 target_ulong *val)
951 {
952 *val = env->vxrm;
953 return RISCV_EXCP_NONE;
954 }
955
write_vxrm(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)956 static RISCVException write_vxrm(CPURISCVState *env, int csrno,
957 target_ulong val, uintptr_t ra)
958 {
959 #if !defined(CONFIG_USER_ONLY)
960 env->mstatus |= MSTATUS_VS;
961 #endif
962 env->vxrm = val;
963 return RISCV_EXCP_NONE;
964 }
965
read_vxsat(CPURISCVState * env,int csrno,target_ulong * val)966 static RISCVException read_vxsat(CPURISCVState *env, int csrno,
967 target_ulong *val)
968 {
969 *val = env->vxsat & BIT(0);
970 return RISCV_EXCP_NONE;
971 }
972
write_vxsat(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)973 static RISCVException write_vxsat(CPURISCVState *env, int csrno,
974 target_ulong val, uintptr_t ra)
975 {
976 #if !defined(CONFIG_USER_ONLY)
977 env->mstatus |= MSTATUS_VS;
978 #endif
979 env->vxsat = val & BIT(0);
980 return RISCV_EXCP_NONE;
981 }
982
read_vstart(CPURISCVState * env,int csrno,target_ulong * val)983 static RISCVException read_vstart(CPURISCVState *env, int csrno,
984 target_ulong *val)
985 {
986 *val = env->vstart;
987 return RISCV_EXCP_NONE;
988 }
989
write_vstart(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)990 static RISCVException write_vstart(CPURISCVState *env, int csrno,
991 target_ulong val, uintptr_t ra)
992 {
993 #if !defined(CONFIG_USER_ONLY)
994 env->mstatus |= MSTATUS_VS;
995 #endif
996 /*
997 * The vstart CSR is defined to have only enough writable bits
998 * to hold the largest element index, i.e. lg2(VLEN) bits.
999 */
1000 env->vstart = val & ~(~0ULL << ctzl(riscv_cpu_cfg(env)->vlenb << 3));
1001 return RISCV_EXCP_NONE;
1002 }
1003
read_vcsr(CPURISCVState * env,int csrno,target_ulong * val)1004 static RISCVException read_vcsr(CPURISCVState *env, int csrno,
1005 target_ulong *val)
1006 {
1007 *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT);
1008 return RISCV_EXCP_NONE;
1009 }
1010
write_vcsr(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1011 static RISCVException write_vcsr(CPURISCVState *env, int csrno,
1012 target_ulong val, uintptr_t ra)
1013 {
1014 #if !defined(CONFIG_USER_ONLY)
1015 env->mstatus |= MSTATUS_VS;
1016 #endif
1017 env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT;
1018 env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT;
1019 return RISCV_EXCP_NONE;
1020 }
1021
1022 #if defined(CONFIG_USER_ONLY)
1023 /* User Timers and Counters */
get_ticks(bool shift)1024 static target_ulong get_ticks(bool shift)
1025 {
1026 int64_t val = cpu_get_host_ticks();
1027 target_ulong result = shift ? val >> 32 : val;
1028
1029 return result;
1030 }
1031
read_time(CPURISCVState * env,int csrno,target_ulong * val)1032 static RISCVException read_time(CPURISCVState *env, int csrno,
1033 target_ulong *val)
1034 {
1035 *val = cpu_get_host_ticks();
1036 return RISCV_EXCP_NONE;
1037 }
1038
read_timeh(CPURISCVState * env,int csrno,target_ulong * val)1039 static RISCVException read_timeh(CPURISCVState *env, int csrno,
1040 target_ulong *val)
1041 {
1042 *val = cpu_get_host_ticks() >> 32;
1043 return RISCV_EXCP_NONE;
1044 }
1045
read_hpmcounter(CPURISCVState * env,int csrno,target_ulong * val)1046 static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
1047 target_ulong *val)
1048 {
1049 *val = get_ticks(false);
1050 return RISCV_EXCP_NONE;
1051 }
1052
read_hpmcounterh(CPURISCVState * env,int csrno,target_ulong * val)1053 static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
1054 target_ulong *val)
1055 {
1056 *val = get_ticks(true);
1057 return RISCV_EXCP_NONE;
1058 }
1059
1060 #else /* CONFIG_USER_ONLY */
1061
read_mcyclecfg(CPURISCVState * env,int csrno,target_ulong * val)1062 static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno,
1063 target_ulong *val)
1064 {
1065 *val = env->mcyclecfg;
1066 return RISCV_EXCP_NONE;
1067 }
1068
write_mcyclecfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1069 static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno,
1070 target_ulong val, uintptr_t ra)
1071 {
1072 uint64_t inh_avail_mask;
1073
1074 if (riscv_cpu_mxl(env) == MXL_RV32) {
1075 env->mcyclecfg = val;
1076 } else {
1077 /* Set xINH fields if priv mode supported */
1078 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MCYCLECFG_BIT_MINH;
1079 inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFG_BIT_UINH : 0;
1080 inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFG_BIT_SINH : 0;
1081 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1082 riscv_has_ext(env, RVU)) ? MCYCLECFG_BIT_VUINH : 0;
1083 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1084 riscv_has_ext(env, RVS)) ? MCYCLECFG_BIT_VSINH : 0;
1085 env->mcyclecfg = val & inh_avail_mask;
1086 }
1087
1088 return RISCV_EXCP_NONE;
1089 }
1090
read_mcyclecfgh(CPURISCVState * env,int csrno,target_ulong * val)1091 static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno,
1092 target_ulong *val)
1093 {
1094 *val = env->mcyclecfgh;
1095 return RISCV_EXCP_NONE;
1096 }
1097
write_mcyclecfgh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1098 static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno,
1099 target_ulong val, uintptr_t ra)
1100 {
1101 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
1102 MCYCLECFGH_BIT_MINH);
1103
1104 /* Set xINH fields if priv mode supported */
1105 inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFGH_BIT_UINH : 0;
1106 inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFGH_BIT_SINH : 0;
1107 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1108 riscv_has_ext(env, RVU)) ? MCYCLECFGH_BIT_VUINH : 0;
1109 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1110 riscv_has_ext(env, RVS)) ? MCYCLECFGH_BIT_VSINH : 0;
1111
1112 env->mcyclecfgh = val & inh_avail_mask;
1113 return RISCV_EXCP_NONE;
1114 }
1115
read_minstretcfg(CPURISCVState * env,int csrno,target_ulong * val)1116 static RISCVException read_minstretcfg(CPURISCVState *env, int csrno,
1117 target_ulong *val)
1118 {
1119 *val = env->minstretcfg;
1120 return RISCV_EXCP_NONE;
1121 }
1122
write_minstretcfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1123 static RISCVException write_minstretcfg(CPURISCVState *env, int csrno,
1124 target_ulong val, uintptr_t ra)
1125 {
1126 uint64_t inh_avail_mask;
1127
1128 if (riscv_cpu_mxl(env) == MXL_RV32) {
1129 env->minstretcfg = val;
1130 } else {
1131 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MINSTRETCFG_BIT_MINH;
1132 inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFG_BIT_UINH : 0;
1133 inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFG_BIT_SINH : 0;
1134 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1135 riscv_has_ext(env, RVU)) ? MINSTRETCFG_BIT_VUINH : 0;
1136 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1137 riscv_has_ext(env, RVS)) ? MINSTRETCFG_BIT_VSINH : 0;
1138 env->minstretcfg = val & inh_avail_mask;
1139 }
1140 return RISCV_EXCP_NONE;
1141 }
1142
read_minstretcfgh(CPURISCVState * env,int csrno,target_ulong * val)1143 static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno,
1144 target_ulong *val)
1145 {
1146 *val = env->minstretcfgh;
1147 return RISCV_EXCP_NONE;
1148 }
1149
write_minstretcfgh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1150 static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno,
1151 target_ulong val, uintptr_t ra)
1152 {
1153 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
1154 MINSTRETCFGH_BIT_MINH);
1155
1156 inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFGH_BIT_UINH : 0;
1157 inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFGH_BIT_SINH : 0;
1158 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1159 riscv_has_ext(env, RVU)) ? MINSTRETCFGH_BIT_VUINH : 0;
1160 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1161 riscv_has_ext(env, RVS)) ? MINSTRETCFGH_BIT_VSINH : 0;
1162
1163 env->minstretcfgh = val & inh_avail_mask;
1164 return RISCV_EXCP_NONE;
1165 }
1166
read_mhpmevent(CPURISCVState * env,int csrno,target_ulong * val)1167 static RISCVException read_mhpmevent(CPURISCVState *env, int csrno,
1168 target_ulong *val)
1169 {
1170 int evt_index = csrno - CSR_MCOUNTINHIBIT;
1171
1172 *val = env->mhpmevent_val[evt_index];
1173
1174 return RISCV_EXCP_NONE;
1175 }
1176
write_mhpmevent(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1177 static RISCVException write_mhpmevent(CPURISCVState *env, int csrno,
1178 target_ulong val, uintptr_t ra)
1179 {
1180 int evt_index = csrno - CSR_MCOUNTINHIBIT;
1181 uint64_t mhpmevt_val = val;
1182 uint64_t inh_avail_mask;
1183
1184 if (riscv_cpu_mxl(env) == MXL_RV32) {
1185 env->mhpmevent_val[evt_index] = val;
1186 mhpmevt_val = mhpmevt_val |
1187 ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
1188 } else {
1189 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MHPMEVENT_BIT_MINH;
1190 inh_avail_mask |= riscv_has_ext(env, RVU) ? MHPMEVENT_BIT_UINH : 0;
1191 inh_avail_mask |= riscv_has_ext(env, RVS) ? MHPMEVENT_BIT_SINH : 0;
1192 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1193 riscv_has_ext(env, RVU)) ? MHPMEVENT_BIT_VUINH : 0;
1194 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1195 riscv_has_ext(env, RVS)) ? MHPMEVENT_BIT_VSINH : 0;
1196 mhpmevt_val = val & inh_avail_mask;
1197 env->mhpmevent_val[evt_index] = mhpmevt_val;
1198 }
1199
1200 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1201
1202 return RISCV_EXCP_NONE;
1203 }
1204
read_mhpmeventh(CPURISCVState * env,int csrno,target_ulong * val)1205 static RISCVException read_mhpmeventh(CPURISCVState *env, int csrno,
1206 target_ulong *val)
1207 {
1208 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
1209
1210 *val = env->mhpmeventh_val[evt_index];
1211
1212 return RISCV_EXCP_NONE;
1213 }
1214
write_mhpmeventh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1215 static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno,
1216 target_ulong val, uintptr_t ra)
1217 {
1218 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
1219 uint64_t mhpmevth_val;
1220 uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
1221 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
1222 MHPMEVENTH_BIT_MINH);
1223
1224 inh_avail_mask |= riscv_has_ext(env, RVU) ? MHPMEVENTH_BIT_UINH : 0;
1225 inh_avail_mask |= riscv_has_ext(env, RVS) ? MHPMEVENTH_BIT_SINH : 0;
1226 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1227 riscv_has_ext(env, RVU)) ? MHPMEVENTH_BIT_VUINH : 0;
1228 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1229 riscv_has_ext(env, RVS)) ? MHPMEVENTH_BIT_VSINH : 0;
1230
1231 mhpmevth_val = val & inh_avail_mask;
1232 mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
1233 env->mhpmeventh_val[evt_index] = mhpmevth_val;
1234
1235 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1236
1237 return RISCV_EXCP_NONE;
1238 }
1239
riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState * env,int counter_idx,bool upper_half)1240 static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env,
1241 int counter_idx,
1242 bool upper_half)
1243 {
1244 int inst = riscv_pmu_ctr_monitor_instructions(env, counter_idx);
1245 uint64_t *counter_arr_virt = env->pmu_fixed_ctrs[inst].counter_virt;
1246 uint64_t *counter_arr = env->pmu_fixed_ctrs[inst].counter;
1247 target_ulong result = 0;
1248 uint64_t curr_val = 0;
1249 uint64_t cfg_val = 0;
1250
1251 if (counter_idx == 0) {
1252 cfg_val = upper_half ? ((uint64_t)env->mcyclecfgh << 32) :
1253 env->mcyclecfg;
1254 } else if (counter_idx == 2) {
1255 cfg_val = upper_half ? ((uint64_t)env->minstretcfgh << 32) :
1256 env->minstretcfg;
1257 } else {
1258 cfg_val = upper_half ?
1259 ((uint64_t)env->mhpmeventh_val[counter_idx] << 32) :
1260 env->mhpmevent_val[counter_idx];
1261 cfg_val &= MHPMEVENT_FILTER_MASK;
1262 }
1263
1264 if (!cfg_val) {
1265 if (icount_enabled()) {
1266 curr_val = inst ? icount_get_raw() : icount_get();
1267 } else {
1268 curr_val = cpu_get_host_ticks();
1269 }
1270
1271 goto done;
1272 }
1273
1274 /* Update counter before reading. */
1275 riscv_pmu_update_fixed_ctrs(env, env->priv, env->virt_enabled);
1276
1277 if (!(cfg_val & MCYCLECFG_BIT_MINH)) {
1278 curr_val += counter_arr[PRV_M];
1279 }
1280
1281 if (!(cfg_val & MCYCLECFG_BIT_SINH)) {
1282 curr_val += counter_arr[PRV_S];
1283 }
1284
1285 if (!(cfg_val & MCYCLECFG_BIT_UINH)) {
1286 curr_val += counter_arr[PRV_U];
1287 }
1288
1289 if (!(cfg_val & MCYCLECFG_BIT_VSINH)) {
1290 curr_val += counter_arr_virt[PRV_S];
1291 }
1292
1293 if (!(cfg_val & MCYCLECFG_BIT_VUINH)) {
1294 curr_val += counter_arr_virt[PRV_U];
1295 }
1296
1297 done:
1298 if (riscv_cpu_mxl(env) == MXL_RV32) {
1299 result = upper_half ? curr_val >> 32 : curr_val;
1300 } else {
1301 result = curr_val;
1302 }
1303
1304 return result;
1305 }
1306
riscv_pmu_write_ctr(CPURISCVState * env,target_ulong val,uint32_t ctr_idx)1307 static RISCVException riscv_pmu_write_ctr(CPURISCVState *env, target_ulong val,
1308 uint32_t ctr_idx)
1309 {
1310 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1311 uint64_t mhpmctr_val = val;
1312
1313 counter->mhpmcounter_val = val;
1314 if (!get_field(env->mcountinhibit, BIT(ctr_idx)) &&
1315 (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1316 riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) {
1317 counter->mhpmcounter_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
1318 ctr_idx, false);
1319 if (ctr_idx > 2) {
1320 if (riscv_cpu_mxl(env) == MXL_RV32) {
1321 mhpmctr_val = mhpmctr_val |
1322 ((uint64_t)counter->mhpmcounterh_val << 32);
1323 }
1324 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
1325 }
1326 } else {
1327 /* Other counters can keep incrementing from the given value */
1328 counter->mhpmcounter_prev = val;
1329 }
1330
1331 return RISCV_EXCP_NONE;
1332 }
1333
riscv_pmu_write_ctrh(CPURISCVState * env,target_ulong val,uint32_t ctr_idx)1334 static RISCVException riscv_pmu_write_ctrh(CPURISCVState *env, target_ulong val,
1335 uint32_t ctr_idx)
1336 {
1337 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1338 uint64_t mhpmctr_val = counter->mhpmcounter_val;
1339 uint64_t mhpmctrh_val = val;
1340
1341 counter->mhpmcounterh_val = val;
1342 mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
1343 if (!get_field(env->mcountinhibit, BIT(ctr_idx)) &&
1344 (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1345 riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) {
1346 counter->mhpmcounterh_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
1347 ctr_idx, true);
1348 if (ctr_idx > 2) {
1349 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
1350 }
1351 } else {
1352 counter->mhpmcounterh_prev = val;
1353 }
1354
1355 return RISCV_EXCP_NONE;
1356 }
1357
write_mhpmcounter(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1358 static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
1359 target_ulong val, uintptr_t ra)
1360 {
1361 int ctr_idx = csrno - CSR_MCYCLE;
1362
1363 return riscv_pmu_write_ctr(env, val, ctr_idx);
1364 }
1365
write_mhpmcounterh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1366 static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
1367 target_ulong val, uintptr_t ra)
1368 {
1369 int ctr_idx = csrno - CSR_MCYCLEH;
1370
1371 return riscv_pmu_write_ctrh(env, val, ctr_idx);
1372 }
1373
riscv_pmu_read_ctr(CPURISCVState * env,target_ulong * val,bool upper_half,uint32_t ctr_idx)1374 RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
1375 bool upper_half, uint32_t ctr_idx)
1376 {
1377 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1378 target_ulong ctr_prev = upper_half ? counter->mhpmcounterh_prev :
1379 counter->mhpmcounter_prev;
1380 target_ulong ctr_val = upper_half ? counter->mhpmcounterh_val :
1381 counter->mhpmcounter_val;
1382
1383 if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
1384 /*
1385 * Counter should not increment if inhibit bit is set. Just return the
1386 * current counter value.
1387 */
1388 *val = ctr_val;
1389 return RISCV_EXCP_NONE;
1390 }
1391
1392 /*
1393 * The kernel computes the perf delta by subtracting the current value from
1394 * the value it initialized previously (ctr_val).
1395 */
1396 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1397 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
1398 *val = riscv_pmu_ctr_get_fixed_counters_val(env, ctr_idx, upper_half) -
1399 ctr_prev + ctr_val;
1400 } else {
1401 *val = ctr_val;
1402 }
1403
1404 return RISCV_EXCP_NONE;
1405 }
1406
read_hpmcounter(CPURISCVState * env,int csrno,target_ulong * val)1407 static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
1408 target_ulong *val)
1409 {
1410 uint16_t ctr_index;
1411
1412 if (csrno >= CSR_MCYCLE && csrno <= CSR_MHPMCOUNTER31) {
1413 ctr_index = csrno - CSR_MCYCLE;
1414 } else if (csrno >= CSR_CYCLE && csrno <= CSR_HPMCOUNTER31) {
1415 ctr_index = csrno - CSR_CYCLE;
1416 } else {
1417 return RISCV_EXCP_ILLEGAL_INST;
1418 }
1419
1420 return riscv_pmu_read_ctr(env, val, false, ctr_index);
1421 }
1422
read_hpmcounterh(CPURISCVState * env,int csrno,target_ulong * val)1423 static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
1424 target_ulong *val)
1425 {
1426 uint16_t ctr_index;
1427
1428 if (csrno >= CSR_MCYCLEH && csrno <= CSR_MHPMCOUNTER31H) {
1429 ctr_index = csrno - CSR_MCYCLEH;
1430 } else if (csrno >= CSR_CYCLEH && csrno <= CSR_HPMCOUNTER31H) {
1431 ctr_index = csrno - CSR_CYCLEH;
1432 } else {
1433 return RISCV_EXCP_ILLEGAL_INST;
1434 }
1435
1436 return riscv_pmu_read_ctr(env, val, true, ctr_index);
1437 }
1438
rmw_cd_mhpmcounter(CPURISCVState * env,int ctr_idx,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1439 static int rmw_cd_mhpmcounter(CPURISCVState *env, int ctr_idx,
1440 target_ulong *val, target_ulong new_val,
1441 target_ulong wr_mask)
1442 {
1443 if (wr_mask != 0 && wr_mask != -1) {
1444 return -EINVAL;
1445 }
1446
1447 if (!wr_mask && val) {
1448 riscv_pmu_read_ctr(env, val, false, ctr_idx);
1449 } else if (wr_mask) {
1450 riscv_pmu_write_ctr(env, new_val, ctr_idx);
1451 } else {
1452 return -EINVAL;
1453 }
1454
1455 return 0;
1456 }
1457
rmw_cd_mhpmcounterh(CPURISCVState * env,int ctr_idx,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1458 static int rmw_cd_mhpmcounterh(CPURISCVState *env, int ctr_idx,
1459 target_ulong *val, target_ulong new_val,
1460 target_ulong wr_mask)
1461 {
1462 if (wr_mask != 0 && wr_mask != -1) {
1463 return -EINVAL;
1464 }
1465
1466 if (!wr_mask && val) {
1467 riscv_pmu_read_ctr(env, val, true, ctr_idx);
1468 } else if (wr_mask) {
1469 riscv_pmu_write_ctrh(env, new_val, ctr_idx);
1470 } else {
1471 return -EINVAL;
1472 }
1473
1474 return 0;
1475 }
1476
rmw_cd_mhpmevent(CPURISCVState * env,int evt_index,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1477 static int rmw_cd_mhpmevent(CPURISCVState *env, int evt_index,
1478 target_ulong *val, target_ulong new_val,
1479 target_ulong wr_mask)
1480 {
1481 uint64_t mhpmevt_val = new_val;
1482
1483 if (wr_mask != 0 && wr_mask != -1) {
1484 return -EINVAL;
1485 }
1486
1487 if (!wr_mask && val) {
1488 *val = env->mhpmevent_val[evt_index];
1489 if (riscv_cpu_cfg(env)->ext_sscofpmf) {
1490 *val &= ~MHPMEVENT_BIT_MINH;
1491 }
1492 } else if (wr_mask) {
1493 wr_mask &= ~MHPMEVENT_BIT_MINH;
1494 mhpmevt_val = (new_val & wr_mask) |
1495 (env->mhpmevent_val[evt_index] & ~wr_mask);
1496 if (riscv_cpu_mxl(env) == MXL_RV32) {
1497 mhpmevt_val = mhpmevt_val |
1498 ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
1499 }
1500 env->mhpmevent_val[evt_index] = mhpmevt_val;
1501 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1502 } else {
1503 return -EINVAL;
1504 }
1505
1506 return 0;
1507 }
1508
rmw_cd_mhpmeventh(CPURISCVState * env,int evt_index,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1509 static int rmw_cd_mhpmeventh(CPURISCVState *env, int evt_index,
1510 target_ulong *val, target_ulong new_val,
1511 target_ulong wr_mask)
1512 {
1513 uint64_t mhpmevth_val;
1514 uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
1515
1516 if (wr_mask != 0 && wr_mask != -1) {
1517 return -EINVAL;
1518 }
1519
1520 if (!wr_mask && val) {
1521 *val = env->mhpmeventh_val[evt_index];
1522 if (riscv_cpu_cfg(env)->ext_sscofpmf) {
1523 *val &= ~MHPMEVENTH_BIT_MINH;
1524 }
1525 } else if (wr_mask) {
1526 wr_mask &= ~MHPMEVENTH_BIT_MINH;
1527 env->mhpmeventh_val[evt_index] =
1528 (new_val & wr_mask) | (env->mhpmeventh_val[evt_index] & ~wr_mask);
1529 mhpmevth_val = env->mhpmeventh_val[evt_index];
1530 mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
1531 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1532 } else {
1533 return -EINVAL;
1534 }
1535
1536 return 0;
1537 }
1538
rmw_cd_ctr_cfg(CPURISCVState * env,int cfg_index,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1539 static int rmw_cd_ctr_cfg(CPURISCVState *env, int cfg_index, target_ulong *val,
1540 target_ulong new_val, target_ulong wr_mask)
1541 {
1542 switch (cfg_index) {
1543 case 0: /* CYCLECFG */
1544 if (wr_mask) {
1545 wr_mask &= ~MCYCLECFG_BIT_MINH;
1546 env->mcyclecfg = (new_val & wr_mask) | (env->mcyclecfg & ~wr_mask);
1547 } else {
1548 *val = env->mcyclecfg &= ~MHPMEVENTH_BIT_MINH;
1549 }
1550 break;
1551 case 2: /* INSTRETCFG */
1552 if (wr_mask) {
1553 wr_mask &= ~MINSTRETCFG_BIT_MINH;
1554 env->minstretcfg = (new_val & wr_mask) |
1555 (env->minstretcfg & ~wr_mask);
1556 } else {
1557 *val = env->minstretcfg &= ~MHPMEVENTH_BIT_MINH;
1558 }
1559 break;
1560 default:
1561 return -EINVAL;
1562 }
1563 return 0;
1564 }
1565
rmw_cd_ctr_cfgh(CPURISCVState * env,int cfg_index,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1566 static int rmw_cd_ctr_cfgh(CPURISCVState *env, int cfg_index, target_ulong *val,
1567 target_ulong new_val, target_ulong wr_mask)
1568 {
1569
1570 if (riscv_cpu_mxl(env) != MXL_RV32) {
1571 return RISCV_EXCP_ILLEGAL_INST;
1572 }
1573
1574 switch (cfg_index) {
1575 case 0: /* CYCLECFGH */
1576 if (wr_mask) {
1577 wr_mask &= ~MCYCLECFGH_BIT_MINH;
1578 env->mcyclecfgh = (new_val & wr_mask) |
1579 (env->mcyclecfgh & ~wr_mask);
1580 } else {
1581 *val = env->mcyclecfgh;
1582 }
1583 break;
1584 case 2: /* INSTRETCFGH */
1585 if (wr_mask) {
1586 wr_mask &= ~MINSTRETCFGH_BIT_MINH;
1587 env->minstretcfgh = (new_val & wr_mask) |
1588 (env->minstretcfgh & ~wr_mask);
1589 } else {
1590 *val = env->minstretcfgh;
1591 }
1592 break;
1593 default:
1594 return -EINVAL;
1595 }
1596 return 0;
1597 }
1598
1599
read_scountovf(CPURISCVState * env,int csrno,target_ulong * val)1600 static RISCVException read_scountovf(CPURISCVState *env, int csrno,
1601 target_ulong *val)
1602 {
1603 int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT;
1604 int i;
1605 *val = 0;
1606 target_ulong *mhpm_evt_val;
1607 uint64_t of_bit_mask;
1608
1609 /* Virtualize scountovf for counter delegation */
1610 if (riscv_cpu_cfg(env)->ext_sscofpmf &&
1611 riscv_cpu_cfg(env)->ext_ssccfg &&
1612 get_field(env->menvcfg, MENVCFG_CDE) &&
1613 env->virt_enabled) {
1614 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1615 }
1616
1617 if (riscv_cpu_mxl(env) == MXL_RV32) {
1618 mhpm_evt_val = env->mhpmeventh_val;
1619 of_bit_mask = MHPMEVENTH_BIT_OF;
1620 } else {
1621 mhpm_evt_val = env->mhpmevent_val;
1622 of_bit_mask = MHPMEVENT_BIT_OF;
1623 }
1624
1625 for (i = mhpmevt_start; i < RV_MAX_MHPMEVENTS; i++) {
1626 if ((get_field(env->mcounteren, BIT(i))) &&
1627 (mhpm_evt_val[i] & of_bit_mask)) {
1628 *val |= BIT(i);
1629 }
1630 }
1631
1632 return RISCV_EXCP_NONE;
1633 }
1634
read_time(CPURISCVState * env,int csrno,target_ulong * val)1635 static RISCVException read_time(CPURISCVState *env, int csrno,
1636 target_ulong *val)
1637 {
1638 uint64_t delta = env->virt_enabled ? env->htimedelta : 0;
1639
1640 if (!env->rdtime_fn) {
1641 return RISCV_EXCP_ILLEGAL_INST;
1642 }
1643
1644 *val = env->rdtime_fn(env->rdtime_fn_arg) + delta;
1645 return RISCV_EXCP_NONE;
1646 }
1647
read_timeh(CPURISCVState * env,int csrno,target_ulong * val)1648 static RISCVException read_timeh(CPURISCVState *env, int csrno,
1649 target_ulong *val)
1650 {
1651 uint64_t delta = env->virt_enabled ? env->htimedelta : 0;
1652
1653 if (!env->rdtime_fn) {
1654 return RISCV_EXCP_ILLEGAL_INST;
1655 }
1656
1657 *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32;
1658 return RISCV_EXCP_NONE;
1659 }
1660
read_vstimecmp(CPURISCVState * env,int csrno,target_ulong * val)1661 static RISCVException read_vstimecmp(CPURISCVState *env, int csrno,
1662 target_ulong *val)
1663 {
1664 *val = env->vstimecmp;
1665
1666 return RISCV_EXCP_NONE;
1667 }
1668
read_vstimecmph(CPURISCVState * env,int csrno,target_ulong * val)1669 static RISCVException read_vstimecmph(CPURISCVState *env, int csrno,
1670 target_ulong *val)
1671 {
1672 *val = env->vstimecmp >> 32;
1673
1674 return RISCV_EXCP_NONE;
1675 }
1676
write_vstimecmp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1677 static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
1678 target_ulong val, uintptr_t ra)
1679 {
1680 if (riscv_cpu_mxl(env) == MXL_RV32) {
1681 env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val);
1682 } else {
1683 env->vstimecmp = val;
1684 }
1685
1686 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
1687 env->htimedelta, MIP_VSTIP);
1688
1689 return RISCV_EXCP_NONE;
1690 }
1691
write_vstimecmph(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1692 static RISCVException write_vstimecmph(CPURISCVState *env, int csrno,
1693 target_ulong val, uintptr_t ra)
1694 {
1695 env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val);
1696 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
1697 env->htimedelta, MIP_VSTIP);
1698
1699 return RISCV_EXCP_NONE;
1700 }
1701
read_stimecmp(CPURISCVState * env,int csrno,target_ulong * val)1702 static RISCVException read_stimecmp(CPURISCVState *env, int csrno,
1703 target_ulong *val)
1704 {
1705 if (env->virt_enabled) {
1706 *val = env->vstimecmp;
1707 } else {
1708 *val = env->stimecmp;
1709 }
1710
1711 return RISCV_EXCP_NONE;
1712 }
1713
read_stimecmph(CPURISCVState * env,int csrno,target_ulong * val)1714 static RISCVException read_stimecmph(CPURISCVState *env, int csrno,
1715 target_ulong *val)
1716 {
1717 if (env->virt_enabled) {
1718 *val = env->vstimecmp >> 32;
1719 } else {
1720 *val = env->stimecmp >> 32;
1721 }
1722
1723 return RISCV_EXCP_NONE;
1724 }
1725
write_stimecmp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1726 static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
1727 target_ulong val, uintptr_t ra)
1728 {
1729 if (env->virt_enabled) {
1730 if (env->hvictl & HVICTL_VTI) {
1731 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1732 }
1733 return write_vstimecmp(env, csrno, val, ra);
1734 }
1735
1736 if (riscv_cpu_mxl(env) == MXL_RV32) {
1737 env->stimecmp = deposit64(env->stimecmp, 0, 32, (uint64_t)val);
1738 } else {
1739 env->stimecmp = val;
1740 }
1741
1742 riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP);
1743
1744 return RISCV_EXCP_NONE;
1745 }
1746
write_stimecmph(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1747 static RISCVException write_stimecmph(CPURISCVState *env, int csrno,
1748 target_ulong val, uintptr_t ra)
1749 {
1750 if (env->virt_enabled) {
1751 if (env->hvictl & HVICTL_VTI) {
1752 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1753 }
1754 return write_vstimecmph(env, csrno, val, ra);
1755 }
1756
1757 env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val);
1758 riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP);
1759
1760 return RISCV_EXCP_NONE;
1761 }
1762
1763 #define VSTOPI_NUM_SRCS 5
1764
1765 /*
1766 * All core local interrupts except the fixed ones 0:12. This macro is for
1767 * virtual interrupts logic so please don't change this to avoid messing up
1768 * the whole support, For reference see AIA spec: `5.3 Interrupt filtering and
1769 * virtual interrupts for supervisor level` and `6.3.2 Virtual interrupts for
1770 * VS level`.
1771 */
1772 #define LOCAL_INTERRUPTS (~0x1FFFULL)
1773
1774 static const uint64_t delegable_ints =
1775 S_MODE_INTERRUPTS | VS_MODE_INTERRUPTS | MIP_LCOFIP;
1776 static const uint64_t vs_delegable_ints =
1777 (VS_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & ~MIP_LCOFIP;
1778 static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
1779 HS_MODE_INTERRUPTS | LOCAL_INTERRUPTS;
1780 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
1781 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
1782 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
1783 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \
1784 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \
1785 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \
1786 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \
1787 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \
1788 (1ULL << (RISCV_EXCP_U_ECALL)) | \
1789 (1ULL << (RISCV_EXCP_S_ECALL)) | \
1790 (1ULL << (RISCV_EXCP_VS_ECALL)) | \
1791 (1ULL << (RISCV_EXCP_M_ECALL)) | \
1792 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
1793 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
1794 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
1795 (1ULL << (RISCV_EXCP_SW_CHECK)) | \
1796 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
1797 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
1798 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
1799 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)))
1800 static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
1801 ~((1ULL << (RISCV_EXCP_S_ECALL)) |
1802 (1ULL << (RISCV_EXCP_VS_ECALL)) |
1803 (1ULL << (RISCV_EXCP_M_ECALL)) |
1804 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) |
1805 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) |
1806 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) |
1807 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)));
1808 static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
1809 SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
1810 SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
1811
1812 /*
1813 * Spec allows for bits 13:63 to be either read-only or writable.
1814 * So far we have interrupt LCOFIP in that region which is writable.
1815 *
1816 * Also, spec allows to inject virtual interrupts in this region even
1817 * without any hardware interrupts for that interrupt number.
1818 *
1819 * For now interrupt in 13:63 region are all kept writable. 13 being
1820 * LCOFIP and 14:63 being virtual only. Change this in future if we
1821 * introduce more interrupts that are not writable.
1822 */
1823
1824 /* Bit STIP can be an alias of mip.STIP that's why it's writable in mvip. */
1825 static const uint64_t mvip_writable_mask = MIP_SSIP | MIP_STIP | MIP_SEIP |
1826 LOCAL_INTERRUPTS;
1827 static const uint64_t mvien_writable_mask = MIP_SSIP | MIP_SEIP |
1828 LOCAL_INTERRUPTS;
1829
1830 static const uint64_t sip_writable_mask = SIP_SSIP | LOCAL_INTERRUPTS;
1831 static const uint64_t hip_writable_mask = MIP_VSSIP;
1832 static const uint64_t hvip_writable_mask = MIP_VSSIP | MIP_VSTIP |
1833 MIP_VSEIP | LOCAL_INTERRUPTS;
1834 static const uint64_t hvien_writable_mask = LOCAL_INTERRUPTS;
1835
1836 static const uint64_t vsip_writable_mask = MIP_VSSIP | LOCAL_INTERRUPTS;
1837
1838 const bool valid_vm_1_10_32[16] = {
1839 [VM_1_10_MBARE] = true,
1840 [VM_1_10_SV32] = true
1841 };
1842
1843 const bool valid_vm_1_10_64[16] = {
1844 [VM_1_10_MBARE] = true,
1845 [VM_1_10_SV39] = true,
1846 [VM_1_10_SV48] = true,
1847 [VM_1_10_SV57] = true
1848 };
1849
1850 /* Machine Information Registers */
read_zero(CPURISCVState * env,int csrno,target_ulong * val)1851 static RISCVException read_zero(CPURISCVState *env, int csrno,
1852 target_ulong *val)
1853 {
1854 *val = 0;
1855 return RISCV_EXCP_NONE;
1856 }
1857
write_ignore(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1858 static RISCVException write_ignore(CPURISCVState *env, int csrno,
1859 target_ulong val, uintptr_t ra)
1860 {
1861 return RISCV_EXCP_NONE;
1862 }
1863
read_mvendorid(CPURISCVState * env,int csrno,target_ulong * val)1864 static RISCVException read_mvendorid(CPURISCVState *env, int csrno,
1865 target_ulong *val)
1866 {
1867 *val = riscv_cpu_cfg(env)->mvendorid;
1868 return RISCV_EXCP_NONE;
1869 }
1870
read_marchid(CPURISCVState * env,int csrno,target_ulong * val)1871 static RISCVException read_marchid(CPURISCVState *env, int csrno,
1872 target_ulong *val)
1873 {
1874 *val = riscv_cpu_cfg(env)->marchid;
1875 return RISCV_EXCP_NONE;
1876 }
1877
read_mimpid(CPURISCVState * env,int csrno,target_ulong * val)1878 static RISCVException read_mimpid(CPURISCVState *env, int csrno,
1879 target_ulong *val)
1880 {
1881 *val = riscv_cpu_cfg(env)->mimpid;
1882 return RISCV_EXCP_NONE;
1883 }
1884
read_mhartid(CPURISCVState * env,int csrno,target_ulong * val)1885 static RISCVException read_mhartid(CPURISCVState *env, int csrno,
1886 target_ulong *val)
1887 {
1888 *val = env->mhartid;
1889 return RISCV_EXCP_NONE;
1890 }
1891
1892 /* Machine Trap Setup */
1893
1894 /* We do not store SD explicitly, only compute it on demand. */
add_status_sd(RISCVMXL xl,uint64_t status)1895 static uint64_t add_status_sd(RISCVMXL xl, uint64_t status)
1896 {
1897 if ((status & MSTATUS_FS) == MSTATUS_FS ||
1898 (status & MSTATUS_VS) == MSTATUS_VS ||
1899 (status & MSTATUS_XS) == MSTATUS_XS) {
1900 switch (xl) {
1901 case MXL_RV32:
1902 return status | MSTATUS32_SD;
1903 case MXL_RV64:
1904 return status | MSTATUS64_SD;
1905 case MXL_RV128:
1906 return MSTATUSH128_SD;
1907 default:
1908 g_assert_not_reached();
1909 }
1910 }
1911 return status;
1912 }
1913
read_mstatus(CPURISCVState * env,int csrno,target_ulong * val)1914 static RISCVException read_mstatus(CPURISCVState *env, int csrno,
1915 target_ulong *val)
1916 {
1917 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus);
1918 return RISCV_EXCP_NONE;
1919 }
1920
validate_vm(CPURISCVState * env,target_ulong vm)1921 static bool validate_vm(CPURISCVState *env, target_ulong vm)
1922 {
1923 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32;
1924 RISCVCPU *cpu = env_archcpu(env);
1925 int satp_mode_supported_max = cpu->cfg.max_satp_mode;
1926 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
1927
1928 assert(satp_mode_supported_max >= 0);
1929 return vm <= satp_mode_supported_max && valid_vm[vm];
1930 }
1931
legalize_xatp(CPURISCVState * env,target_ulong old_xatp,target_ulong val)1932 static target_ulong legalize_xatp(CPURISCVState *env, target_ulong old_xatp,
1933 target_ulong val)
1934 {
1935 target_ulong mask;
1936 bool vm;
1937 if (riscv_cpu_mxl(env) == MXL_RV32) {
1938 vm = validate_vm(env, get_field(val, SATP32_MODE));
1939 mask = (val ^ old_xatp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN);
1940 } else {
1941 vm = validate_vm(env, get_field(val, SATP64_MODE));
1942 mask = (val ^ old_xatp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN);
1943 }
1944
1945 if (vm && mask) {
1946 /*
1947 * The ISA defines SATP.MODE=Bare as "no translation", but we still
1948 * pass these through QEMU's TLB emulation as it improves
1949 * performance. Flushing the TLB on SATP writes with paging
1950 * enabled avoids leaking those invalid cached mappings.
1951 */
1952 tlb_flush(env_cpu(env));
1953 return val;
1954 }
1955 return old_xatp;
1956 }
1957
legalize_mpp(CPURISCVState * env,target_ulong old_mpp,target_ulong val)1958 static target_ulong legalize_mpp(CPURISCVState *env, target_ulong old_mpp,
1959 target_ulong val)
1960 {
1961 bool valid = false;
1962 target_ulong new_mpp = get_field(val, MSTATUS_MPP);
1963
1964 switch (new_mpp) {
1965 case PRV_M:
1966 valid = true;
1967 break;
1968 case PRV_S:
1969 valid = riscv_has_ext(env, RVS);
1970 break;
1971 case PRV_U:
1972 valid = riscv_has_ext(env, RVU);
1973 break;
1974 }
1975
1976 /* Remain field unchanged if new_mpp value is invalid */
1977 if (!valid) {
1978 val = set_field(val, MSTATUS_MPP, old_mpp);
1979 }
1980
1981 return val;
1982 }
1983
write_mstatus(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1984 static RISCVException write_mstatus(CPURISCVState *env, int csrno,
1985 target_ulong val, uintptr_t ra)
1986 {
1987 uint64_t mstatus = env->mstatus;
1988 uint64_t mask = 0;
1989 RISCVMXL xl = riscv_cpu_mxl(env);
1990
1991 /*
1992 * MPP field have been made WARL since priv version 1.11. However,
1993 * legalization for it will not break any software running on 1.10.
1994 */
1995 val = legalize_mpp(env, get_field(mstatus, MSTATUS_MPP), val);
1996
1997 /* flush tlb on mstatus fields that affect VM */
1998 if ((val ^ mstatus) & MSTATUS_MXR) {
1999 tlb_flush(env_cpu(env));
2000 }
2001 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
2002 MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM |
2003 MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
2004 MSTATUS_TW;
2005
2006 if (riscv_has_ext(env, RVF)) {
2007 mask |= MSTATUS_FS;
2008 }
2009
2010 if (riscv_cpu_cfg(env)->ext_zve32x) {
2011 mask |= MSTATUS_VS;
2012 }
2013
2014 if (riscv_env_smode_dbltrp_enabled(env, env->virt_enabled)) {
2015 mask |= MSTATUS_SDT;
2016 if ((val & MSTATUS_SDT) != 0) {
2017 val &= ~MSTATUS_SIE;
2018 }
2019 }
2020
2021 if (riscv_cpu_cfg(env)->ext_smdbltrp) {
2022 mask |= MSTATUS_MDT;
2023 if ((val & MSTATUS_MDT) != 0) {
2024 val &= ~MSTATUS_MIE;
2025 }
2026 }
2027
2028 if (xl != MXL_RV32 || env->debugger) {
2029 if (riscv_has_ext(env, RVH)) {
2030 mask |= MSTATUS_MPV | MSTATUS_GVA;
2031 }
2032 if ((val & MSTATUS64_UXL) != 0) {
2033 mask |= MSTATUS64_UXL;
2034 }
2035 }
2036
2037 /* If cfi lp extension is available, then apply cfi lp mask */
2038 if (env_archcpu(env)->cfg.ext_zicfilp) {
2039 mask |= (MSTATUS_MPELP | MSTATUS_SPELP);
2040 }
2041
2042 mstatus = (mstatus & ~mask) | (val & mask);
2043
2044 env->mstatus = mstatus;
2045
2046 /*
2047 * Except in debug mode, UXL/SXL can only be modified by higher
2048 * privilege mode. So xl will not be changed in normal mode.
2049 */
2050 if (env->debugger) {
2051 env->xl = cpu_recompute_xl(env);
2052 }
2053
2054 return RISCV_EXCP_NONE;
2055 }
2056
read_mstatush(CPURISCVState * env,int csrno,target_ulong * val)2057 static RISCVException read_mstatush(CPURISCVState *env, int csrno,
2058 target_ulong *val)
2059 {
2060 *val = env->mstatus >> 32;
2061 return RISCV_EXCP_NONE;
2062 }
2063
write_mstatush(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)2064 static RISCVException write_mstatush(CPURISCVState *env, int csrno,
2065 target_ulong val, uintptr_t ra)
2066 {
2067 uint64_t valh = (uint64_t)val << 32;
2068 uint64_t mask = riscv_has_ext(env, RVH) ? MSTATUS_MPV | MSTATUS_GVA : 0;
2069
2070 if (riscv_cpu_cfg(env)->ext_smdbltrp) {
2071 mask |= MSTATUS_MDT;
2072 if ((valh & MSTATUS_MDT) != 0) {
2073 mask |= MSTATUS_MIE;
2074 }
2075 }
2076 env->mstatus = (env->mstatus & ~mask) | (valh & mask);
2077
2078 return RISCV_EXCP_NONE;
2079 }
2080
read_mstatus_i128(CPURISCVState * env,int csrno,Int128 * val)2081 static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno,
2082 Int128 *val)
2083 {
2084 *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128,
2085 env->mstatus));
2086 return RISCV_EXCP_NONE;
2087 }
2088
read_misa_i128(CPURISCVState * env,int csrno,Int128 * val)2089 static RISCVException read_misa_i128(CPURISCVState *env, int csrno,
2090 Int128 *val)
2091 {
2092 *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62);
2093 return RISCV_EXCP_NONE;
2094 }
2095
read_misa(CPURISCVState * env,int csrno,target_ulong * val)2096 static RISCVException read_misa(CPURISCVState *env, int csrno,
2097 target_ulong *val)
2098 {
2099 target_ulong misa;
2100
2101 switch (env->misa_mxl) {
2102 case MXL_RV32:
2103 misa = (target_ulong)MXL_RV32 << 30;
2104 break;
2105 #ifdef TARGET_RISCV64
2106 case MXL_RV64:
2107 misa = (target_ulong)MXL_RV64 << 62;
2108 break;
2109 #endif
2110 default:
2111 g_assert_not_reached();
2112 }
2113
2114 *val = misa | env->misa_ext;
2115 return RISCV_EXCP_NONE;
2116 }
2117
get_next_pc(CPURISCVState * env,uintptr_t ra)2118 static target_ulong get_next_pc(CPURISCVState *env, uintptr_t ra)
2119 {
2120 uint64_t data[INSN_START_WORDS];
2121
2122 /* Outside of a running cpu, env contains the next pc. */
2123 if (ra == 0 || !cpu_unwind_state_data(env_cpu(env), ra, data)) {
2124 return env->pc;
2125 }
2126
2127 /* Within unwind data, [0] is pc and [1] is the opcode. */
2128 return data[0] + insn_len(data[1]);
2129 }
2130
write_misa(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)2131 static RISCVException write_misa(CPURISCVState *env, int csrno,
2132 target_ulong val, uintptr_t ra)
2133 {
2134 RISCVCPU *cpu = env_archcpu(env);
2135 uint32_t orig_misa_ext = env->misa_ext;
2136 Error *local_err = NULL;
2137
2138 if (!riscv_cpu_cfg(env)->misa_w) {
2139 /* drop write to misa */
2140 return RISCV_EXCP_NONE;
2141 }
2142
2143 /* Mask extensions that are not supported by this hart */
2144 val &= env->misa_ext_mask;
2145
2146 /* Suppress 'C' if next instruction is not aligned. */
2147 if ((val & RVC) && (get_next_pc(env, ra) & 3) != 0) {
2148 val &= ~RVC;
2149 }
2150
2151 /* Disable RVG if any of its dependencies are disabled */
2152 if (!(val & RVI && val & RVM && val & RVA &&
2153 val & RVF && val & RVD)) {
2154 val &= ~RVG;
2155 }
2156
2157 /* If nothing changed, do nothing. */
2158 if (val == env->misa_ext) {
2159 return RISCV_EXCP_NONE;
2160 }
2161
2162 env->misa_ext = val;
2163 riscv_cpu_validate_set_extensions(cpu, &local_err);
2164 if (local_err != NULL) {
2165 /* Rollback on validation error */
2166 qemu_log_mask(LOG_GUEST_ERROR, "Unable to write MISA ext value "
2167 "0x%x, keeping existing MISA ext 0x%x\n",
2168 env->misa_ext, orig_misa_ext);
2169
2170 env->misa_ext = orig_misa_ext;
2171
2172 return RISCV_EXCP_NONE;
2173 }
2174
2175 if (!(env->misa_ext & RVF)) {
2176 env->mstatus &= ~MSTATUS_FS;
2177 }
2178
2179 /* flush translation cache */
2180 tb_flush(env_cpu(env));
2181 env->xl = riscv_cpu_mxl(env);
2182 return RISCV_EXCP_NONE;
2183 }
2184
read_medeleg(CPURISCVState * env,int csrno,target_ulong * val)2185 static RISCVException read_medeleg(CPURISCVState *env, int csrno,
2186 target_ulong *val)
2187 {
2188 *val = env->medeleg;
2189 return RISCV_EXCP_NONE;
2190 }
2191
write_medeleg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)2192 static RISCVException write_medeleg(CPURISCVState *env, int csrno,
2193 target_ulong val, uintptr_t ra)
2194 {
2195 env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS);
2196 return RISCV_EXCP_NONE;
2197 }
2198
rmw_mideleg64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2199 static RISCVException rmw_mideleg64(CPURISCVState *env, int csrno,
2200 uint64_t *ret_val,
2201 uint64_t new_val, uint64_t wr_mask)
2202 {
2203 uint64_t mask = wr_mask & delegable_ints;
2204
2205 if (ret_val) {
2206 *ret_val = env->mideleg;
2207 }
2208
2209 env->mideleg = (env->mideleg & ~mask) | (new_val & mask);
2210
2211 if (riscv_has_ext(env, RVH)) {
2212 env->mideleg |= HS_MODE_INTERRUPTS;
2213 }
2214
2215 return RISCV_EXCP_NONE;
2216 }
2217
rmw_mideleg(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2218 static RISCVException rmw_mideleg(CPURISCVState *env, int csrno,
2219 target_ulong *ret_val,
2220 target_ulong new_val, target_ulong wr_mask)
2221 {
2222 uint64_t rval;
2223 RISCVException ret;
2224
2225 ret = rmw_mideleg64(env, csrno, &rval, new_val, wr_mask);
2226 if (ret_val) {
2227 *ret_val = rval;
2228 }
2229
2230 return ret;
2231 }
2232
rmw_midelegh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2233 static RISCVException rmw_midelegh(CPURISCVState *env, int csrno,
2234 target_ulong *ret_val,
2235 target_ulong new_val,
2236 target_ulong wr_mask)
2237 {
2238 uint64_t rval;
2239 RISCVException ret;
2240
2241 ret = rmw_mideleg64(env, csrno, &rval,
2242 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2243 if (ret_val) {
2244 *ret_val = rval >> 32;
2245 }
2246
2247 return ret;
2248 }
2249
rmw_mie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2250 static RISCVException rmw_mie64(CPURISCVState *env, int csrno,
2251 uint64_t *ret_val,
2252 uint64_t new_val, uint64_t wr_mask)
2253 {
2254 uint64_t mask = wr_mask & all_ints;
2255
2256 if (ret_val) {
2257 *ret_val = env->mie;
2258 }
2259
2260 env->mie = (env->mie & ~mask) | (new_val & mask);
2261
2262 if (!riscv_has_ext(env, RVH)) {
2263 env->mie &= ~((uint64_t)HS_MODE_INTERRUPTS);
2264 }
2265
2266 return RISCV_EXCP_NONE;
2267 }
2268
rmw_mie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2269 static RISCVException rmw_mie(CPURISCVState *env, int csrno,
2270 target_ulong *ret_val,
2271 target_ulong new_val, target_ulong wr_mask)
2272 {
2273 uint64_t rval;
2274 RISCVException ret;
2275
2276 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask);
2277 if (ret_val) {
2278 *ret_val = rval;
2279 }
2280
2281 return ret;
2282 }
2283
rmw_mieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2284 static RISCVException rmw_mieh(CPURISCVState *env, int csrno,
2285 target_ulong *ret_val,
2286 target_ulong new_val, target_ulong wr_mask)
2287 {
2288 uint64_t rval;
2289 RISCVException ret;
2290
2291 ret = rmw_mie64(env, csrno, &rval,
2292 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2293 if (ret_val) {
2294 *ret_val = rval >> 32;
2295 }
2296
2297 return ret;
2298 }
2299
rmw_mvien64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2300 static RISCVException rmw_mvien64(CPURISCVState *env, int csrno,
2301 uint64_t *ret_val,
2302 uint64_t new_val, uint64_t wr_mask)
2303 {
2304 uint64_t mask = wr_mask & mvien_writable_mask;
2305
2306 if (ret_val) {
2307 *ret_val = env->mvien;
2308 }
2309
2310 env->mvien = (env->mvien & ~mask) | (new_val & mask);
2311
2312 return RISCV_EXCP_NONE;
2313 }
2314
rmw_mvien(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2315 static RISCVException rmw_mvien(CPURISCVState *env, int csrno,
2316 target_ulong *ret_val,
2317 target_ulong new_val, target_ulong wr_mask)
2318 {
2319 uint64_t rval;
2320 RISCVException ret;
2321
2322 ret = rmw_mvien64(env, csrno, &rval, new_val, wr_mask);
2323 if (ret_val) {
2324 *ret_val = rval;
2325 }
2326
2327 return ret;
2328 }
2329
rmw_mvienh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2330 static RISCVException rmw_mvienh(CPURISCVState *env, int csrno,
2331 target_ulong *ret_val,
2332 target_ulong new_val, target_ulong wr_mask)
2333 {
2334 uint64_t rval;
2335 RISCVException ret;
2336
2337 ret = rmw_mvien64(env, csrno, &rval,
2338 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2339 if (ret_val) {
2340 *ret_val = rval >> 32;
2341 }
2342
2343 return ret;
2344 }
2345
read_mtopi(CPURISCVState * env,int csrno,target_ulong * val)2346 static RISCVException read_mtopi(CPURISCVState *env, int csrno,
2347 target_ulong *val)
2348 {
2349 int irq;
2350 uint8_t iprio;
2351
2352 irq = riscv_cpu_mirq_pending(env);
2353 if (irq <= 0 || irq > 63) {
2354 *val = 0;
2355 } else {
2356 iprio = env->miprio[irq];
2357 if (!iprio) {
2358 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_M) {
2359 iprio = IPRIO_MMAXIPRIO;
2360 }
2361 }
2362 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
2363 *val |= iprio;
2364 }
2365
2366 return RISCV_EXCP_NONE;
2367 }
2368
aia_xlate_vs_csrno(CPURISCVState * env,int csrno)2369 static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
2370 {
2371 if (!env->virt_enabled) {
2372 return csrno;
2373 }
2374
2375 switch (csrno) {
2376 case CSR_SISELECT:
2377 return CSR_VSISELECT;
2378 case CSR_SIREG:
2379 return CSR_VSIREG;
2380 case CSR_STOPEI:
2381 return CSR_VSTOPEI;
2382 default:
2383 return csrno;
2384 };
2385 }
2386
csrind_xlate_vs_csrno(CPURISCVState * env,int csrno)2387 static int csrind_xlate_vs_csrno(CPURISCVState *env, int csrno)
2388 {
2389 if (!env->virt_enabled) {
2390 return csrno;
2391 }
2392
2393 switch (csrno) {
2394 case CSR_SISELECT:
2395 return CSR_VSISELECT;
2396 case CSR_SIREG:
2397 case CSR_SIREG2:
2398 case CSR_SIREG3:
2399 case CSR_SIREG4:
2400 case CSR_SIREG5:
2401 case CSR_SIREG6:
2402 return CSR_VSIREG + (csrno - CSR_SIREG);
2403 default:
2404 return csrno;
2405 };
2406 }
2407
rmw_xiselect(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2408 static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
2409 target_ulong *val, target_ulong new_val,
2410 target_ulong wr_mask)
2411 {
2412 target_ulong *iselect;
2413 int ret;
2414
2415 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
2416 if (ret != RISCV_EXCP_NONE) {
2417 return ret;
2418 }
2419
2420 /* Translate CSR number for VS-mode */
2421 csrno = csrind_xlate_vs_csrno(env, csrno);
2422
2423 /* Find the iselect CSR based on CSR number */
2424 switch (csrno) {
2425 case CSR_MISELECT:
2426 iselect = &env->miselect;
2427 break;
2428 case CSR_SISELECT:
2429 iselect = &env->siselect;
2430 break;
2431 case CSR_VSISELECT:
2432 iselect = &env->vsiselect;
2433 break;
2434 default:
2435 return RISCV_EXCP_ILLEGAL_INST;
2436 };
2437
2438 if (val) {
2439 *val = *iselect;
2440 }
2441
2442 if (riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind) {
2443 wr_mask &= ISELECT_MASK_SXCSRIND;
2444 } else {
2445 wr_mask &= ISELECT_MASK_AIA;
2446 }
2447
2448 if (wr_mask) {
2449 *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask);
2450 }
2451
2452 return RISCV_EXCP_NONE;
2453 }
2454
xiselect_aia_range(target_ulong isel)2455 static bool xiselect_aia_range(target_ulong isel)
2456 {
2457 return (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) ||
2458 (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST);
2459 }
2460
xiselect_cd_range(target_ulong isel)2461 static bool xiselect_cd_range(target_ulong isel)
2462 {
2463 return (ISELECT_CD_FIRST <= isel && isel <= ISELECT_CD_LAST);
2464 }
2465
xiselect_ctr_range(int csrno,target_ulong isel)2466 static bool xiselect_ctr_range(int csrno, target_ulong isel)
2467 {
2468 /* MIREG-MIREG6 for the range 0x200-0x2ff are not used by CTR. */
2469 return CTR_ENTRIES_FIRST <= isel && isel <= CTR_ENTRIES_LAST &&
2470 csrno < CSR_MIREG;
2471 }
2472
rmw_iprio(target_ulong xlen,target_ulong iselect,uint8_t * iprio,target_ulong * val,target_ulong new_val,target_ulong wr_mask,int ext_irq_no)2473 static int rmw_iprio(target_ulong xlen,
2474 target_ulong iselect, uint8_t *iprio,
2475 target_ulong *val, target_ulong new_val,
2476 target_ulong wr_mask, int ext_irq_no)
2477 {
2478 int i, firq, nirqs;
2479 target_ulong old_val;
2480
2481 if (iselect < ISELECT_IPRIO0 || ISELECT_IPRIO15 < iselect) {
2482 return -EINVAL;
2483 }
2484 if (xlen != 32 && iselect & 0x1) {
2485 return -EINVAL;
2486 }
2487
2488 nirqs = 4 * (xlen / 32);
2489 firq = ((iselect - ISELECT_IPRIO0) / (xlen / 32)) * (nirqs);
2490
2491 old_val = 0;
2492 for (i = 0; i < nirqs; i++) {
2493 old_val |= ((target_ulong)iprio[firq + i]) << (IPRIO_IRQ_BITS * i);
2494 }
2495
2496 if (val) {
2497 *val = old_val;
2498 }
2499
2500 if (wr_mask) {
2501 new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
2502 for (i = 0; i < nirqs; i++) {
2503 /*
2504 * M-level and S-level external IRQ priority always read-only
2505 * zero. This means default priority order is always preferred
2506 * for M-level and S-level external IRQs.
2507 */
2508 if ((firq + i) == ext_irq_no) {
2509 continue;
2510 }
2511 iprio[firq + i] = (new_val >> (IPRIO_IRQ_BITS * i)) & 0xff;
2512 }
2513 }
2514
2515 return 0;
2516 }
2517
rmw_ctrsource(CPURISCVState * env,int isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2518 static int rmw_ctrsource(CPURISCVState *env, int isel, target_ulong *val,
2519 target_ulong new_val, target_ulong wr_mask)
2520 {
2521 /*
2522 * CTR arrays are treated as circular buffers and TOS always points to next
2523 * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
2524 * 0 is always the latest one, traversal is a bit different here. See the
2525 * below example.
2526 *
2527 * Depth = 16.
2528 *
2529 * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
2530 * TOS H
2531 * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
2532 */
2533 const uint64_t entry = isel - CTR_ENTRIES_FIRST;
2534 const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
2535 uint64_t idx;
2536
2537 /* Entry greater than depth-1 is read-only zero */
2538 if (entry >= depth) {
2539 if (val) {
2540 *val = 0;
2541 }
2542 return 0;
2543 }
2544
2545 idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
2546 idx = (idx - entry - 1) & (depth - 1);
2547
2548 if (val) {
2549 *val = env->ctr_src[idx];
2550 }
2551
2552 env->ctr_src[idx] = (env->ctr_src[idx] & ~wr_mask) | (new_val & wr_mask);
2553
2554 return 0;
2555 }
2556
rmw_ctrtarget(CPURISCVState * env,int isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2557 static int rmw_ctrtarget(CPURISCVState *env, int isel, target_ulong *val,
2558 target_ulong new_val, target_ulong wr_mask)
2559 {
2560 /*
2561 * CTR arrays are treated as circular buffers and TOS always points to next
2562 * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
2563 * 0 is always the latest one, traversal is a bit different here. See the
2564 * below example.
2565 *
2566 * Depth = 16.
2567 *
2568 * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
2569 * head H
2570 * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
2571 */
2572 const uint64_t entry = isel - CTR_ENTRIES_FIRST;
2573 const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
2574 uint64_t idx;
2575
2576 /* Entry greater than depth-1 is read-only zero */
2577 if (entry >= depth) {
2578 if (val) {
2579 *val = 0;
2580 }
2581 return 0;
2582 }
2583
2584 idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
2585 idx = (idx - entry - 1) & (depth - 1);
2586
2587 if (val) {
2588 *val = env->ctr_dst[idx];
2589 }
2590
2591 env->ctr_dst[idx] = (env->ctr_dst[idx] & ~wr_mask) | (new_val & wr_mask);
2592
2593 return 0;
2594 }
2595
rmw_ctrdata(CPURISCVState * env,int isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2596 static int rmw_ctrdata(CPURISCVState *env, int isel, target_ulong *val,
2597 target_ulong new_val, target_ulong wr_mask)
2598 {
2599 /*
2600 * CTR arrays are treated as circular buffers and TOS always points to next
2601 * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
2602 * 0 is always the latest one, traversal is a bit different here. See the
2603 * below example.
2604 *
2605 * Depth = 16.
2606 *
2607 * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
2608 * head H
2609 * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
2610 */
2611 const uint64_t entry = isel - CTR_ENTRIES_FIRST;
2612 const uint64_t mask = wr_mask & CTRDATA_MASK;
2613 const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
2614 uint64_t idx;
2615
2616 /* Entry greater than depth-1 is read-only zero */
2617 if (entry >= depth) {
2618 if (val) {
2619 *val = 0;
2620 }
2621 return 0;
2622 }
2623
2624 idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
2625 idx = (idx - entry - 1) & (depth - 1);
2626
2627 if (val) {
2628 *val = env->ctr_data[idx];
2629 }
2630
2631 env->ctr_data[idx] = (env->ctr_data[idx] & ~mask) | (new_val & mask);
2632
2633 return 0;
2634 }
2635
rmw_xireg_aia(CPURISCVState * env,int csrno,target_ulong isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2636 static RISCVException rmw_xireg_aia(CPURISCVState *env, int csrno,
2637 target_ulong isel, target_ulong *val,
2638 target_ulong new_val, target_ulong wr_mask)
2639 {
2640 bool virt = false, isel_reserved = false;
2641 int ret = -EINVAL;
2642 uint8_t *iprio;
2643 target_ulong priv, vgein;
2644
2645 /* VS-mode CSR number passed in has already been translated */
2646 switch (csrno) {
2647 case CSR_MIREG:
2648 if (!riscv_cpu_cfg(env)->ext_smaia) {
2649 goto done;
2650 }
2651 iprio = env->miprio;
2652 priv = PRV_M;
2653 break;
2654 case CSR_SIREG:
2655 if (!riscv_cpu_cfg(env)->ext_ssaia ||
2656 (env->priv == PRV_S && env->mvien & MIP_SEIP &&
2657 env->siselect >= ISELECT_IMSIC_EIDELIVERY &&
2658 env->siselect <= ISELECT_IMSIC_EIE63)) {
2659 goto done;
2660 }
2661 iprio = env->siprio;
2662 priv = PRV_S;
2663 break;
2664 case CSR_VSIREG:
2665 if (!riscv_cpu_cfg(env)->ext_ssaia) {
2666 goto done;
2667 }
2668 iprio = env->hviprio;
2669 priv = PRV_S;
2670 virt = true;
2671 break;
2672 default:
2673 goto done;
2674 };
2675
2676 /* Find the selected guest interrupt file */
2677 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
2678
2679 if (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) {
2680 /* Local interrupt priority registers not available for VS-mode */
2681 if (!virt) {
2682 ret = rmw_iprio(riscv_cpu_mxl_bits(env),
2683 isel, iprio, val, new_val, wr_mask,
2684 (priv == PRV_M) ? IRQ_M_EXT : IRQ_S_EXT);
2685 }
2686 } else if (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST) {
2687 /* IMSIC registers only available when machine implements it. */
2688 if (env->aia_ireg_rmw_fn[priv]) {
2689 /* Selected guest interrupt file should not be zero */
2690 if (virt && (!vgein || env->geilen < vgein)) {
2691 goto done;
2692 }
2693 /* Call machine specific IMSIC register emulation */
2694 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
2695 AIA_MAKE_IREG(isel, priv, virt, vgein,
2696 riscv_cpu_mxl_bits(env)),
2697 val, new_val, wr_mask);
2698 }
2699 } else {
2700 isel_reserved = true;
2701 }
2702
2703 done:
2704 /*
2705 * If AIA is not enabled, illegal instruction exception is always
2706 * returned regardless of whether we are in VS-mode or not
2707 */
2708 if (ret) {
2709 return (env->virt_enabled && virt && !isel_reserved) ?
2710 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2711 }
2712
2713 return RISCV_EXCP_NONE;
2714 }
2715
rmw_xireg_cd(CPURISCVState * env,int csrno,target_ulong isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2716 static int rmw_xireg_cd(CPURISCVState *env, int csrno,
2717 target_ulong isel, target_ulong *val,
2718 target_ulong new_val, target_ulong wr_mask)
2719 {
2720 int ret = -EINVAL;
2721 int ctr_index = isel - ISELECT_CD_FIRST;
2722 int isel_hpm_start = ISELECT_CD_FIRST + 3;
2723
2724 if (!riscv_cpu_cfg(env)->ext_smcdeleg || !riscv_cpu_cfg(env)->ext_ssccfg) {
2725 ret = RISCV_EXCP_ILLEGAL_INST;
2726 goto done;
2727 }
2728
2729 /* Invalid siselect value for reserved */
2730 if (ctr_index == 1) {
2731 goto done;
2732 }
2733
2734 /* sireg4 and sireg5 provides access RV32 only CSRs */
2735 if (((csrno == CSR_SIREG5) || (csrno == CSR_SIREG4)) &&
2736 (riscv_cpu_mxl(env) != MXL_RV32)) {
2737 ret = RISCV_EXCP_ILLEGAL_INST;
2738 goto done;
2739 }
2740
2741 /* Check Sscofpmf dependancy */
2742 if (!riscv_cpu_cfg(env)->ext_sscofpmf && csrno == CSR_SIREG5 &&
2743 (isel_hpm_start <= isel && isel <= ISELECT_CD_LAST)) {
2744 goto done;
2745 }
2746
2747 /* Check smcntrpmf dependancy */
2748 if (!riscv_cpu_cfg(env)->ext_smcntrpmf &&
2749 (csrno == CSR_SIREG2 || csrno == CSR_SIREG5) &&
2750 (ISELECT_CD_FIRST <= isel && isel < isel_hpm_start)) {
2751 goto done;
2752 }
2753
2754 if (!get_field(env->mcounteren, BIT(ctr_index)) ||
2755 !get_field(env->menvcfg, MENVCFG_CDE)) {
2756 goto done;
2757 }
2758
2759 switch (csrno) {
2760 case CSR_SIREG:
2761 ret = rmw_cd_mhpmcounter(env, ctr_index, val, new_val, wr_mask);
2762 break;
2763 case CSR_SIREG4:
2764 ret = rmw_cd_mhpmcounterh(env, ctr_index, val, new_val, wr_mask);
2765 break;
2766 case CSR_SIREG2:
2767 if (ctr_index <= 2) {
2768 ret = rmw_cd_ctr_cfg(env, ctr_index, val, new_val, wr_mask);
2769 } else {
2770 ret = rmw_cd_mhpmevent(env, ctr_index, val, new_val, wr_mask);
2771 }
2772 break;
2773 case CSR_SIREG5:
2774 if (ctr_index <= 2) {
2775 ret = rmw_cd_ctr_cfgh(env, ctr_index, val, new_val, wr_mask);
2776 } else {
2777 ret = rmw_cd_mhpmeventh(env, ctr_index, val, new_val, wr_mask);
2778 }
2779 break;
2780 default:
2781 goto done;
2782 }
2783
2784 done:
2785 return ret;
2786 }
2787
rmw_xireg_ctr(CPURISCVState * env,int csrno,target_ulong isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2788 static int rmw_xireg_ctr(CPURISCVState *env, int csrno,
2789 target_ulong isel, target_ulong *val,
2790 target_ulong new_val, target_ulong wr_mask)
2791 {
2792 if (!riscv_cpu_cfg(env)->ext_smctr && !riscv_cpu_cfg(env)->ext_ssctr) {
2793 return -EINVAL;
2794 }
2795
2796 if (csrno == CSR_SIREG || csrno == CSR_VSIREG) {
2797 return rmw_ctrsource(env, isel, val, new_val, wr_mask);
2798 } else if (csrno == CSR_SIREG2 || csrno == CSR_VSIREG2) {
2799 return rmw_ctrtarget(env, isel, val, new_val, wr_mask);
2800 } else if (csrno == CSR_SIREG3 || csrno == CSR_VSIREG3) {
2801 return rmw_ctrdata(env, isel, val, new_val, wr_mask);
2802 } else if (val) {
2803 *val = 0;
2804 }
2805
2806 return 0;
2807 }
2808
2809 /*
2810 * rmw_xireg_csrind: Perform indirect access to xireg and xireg2-xireg6
2811 *
2812 * Perform indirect access to xireg and xireg2-xireg6.
2813 * This is a generic interface for all xireg CSRs. Apart from AIA, all other
2814 * extension using csrind should be implemented here.
2815 */
rmw_xireg_csrind(CPURISCVState * env,int csrno,target_ulong isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2816 static int rmw_xireg_csrind(CPURISCVState *env, int csrno,
2817 target_ulong isel, target_ulong *val,
2818 target_ulong new_val, target_ulong wr_mask)
2819 {
2820 bool virt = csrno == CSR_VSIREG ? true : false;
2821 int ret = -EINVAL;
2822
2823 if (xiselect_cd_range(isel)) {
2824 ret = rmw_xireg_cd(env, csrno, isel, val, new_val, wr_mask);
2825 } else if (xiselect_ctr_range(csrno, isel)) {
2826 ret = rmw_xireg_ctr(env, csrno, isel, val, new_val, wr_mask);
2827 } else {
2828 /*
2829 * As per the specification, access to unimplented region is undefined
2830 * but recommendation is to raise illegal instruction exception.
2831 */
2832 return RISCV_EXCP_ILLEGAL_INST;
2833 }
2834
2835 if (ret) {
2836 return (env->virt_enabled && virt) ?
2837 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2838 }
2839
2840 return RISCV_EXCP_NONE;
2841 }
2842
rmw_xiregi(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2843 static int rmw_xiregi(CPURISCVState *env, int csrno, target_ulong *val,
2844 target_ulong new_val, target_ulong wr_mask)
2845 {
2846 int ret = -EINVAL;
2847 target_ulong isel;
2848
2849 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
2850 if (ret != RISCV_EXCP_NONE) {
2851 return ret;
2852 }
2853
2854 /* Translate CSR number for VS-mode */
2855 csrno = csrind_xlate_vs_csrno(env, csrno);
2856
2857 if (CSR_MIREG <= csrno && csrno <= CSR_MIREG6 &&
2858 csrno != CSR_MIREG4 - 1) {
2859 isel = env->miselect;
2860 } else if (CSR_SIREG <= csrno && csrno <= CSR_SIREG6 &&
2861 csrno != CSR_SIREG4 - 1) {
2862 isel = env->siselect;
2863 } else if (CSR_VSIREG <= csrno && csrno <= CSR_VSIREG6 &&
2864 csrno != CSR_VSIREG4 - 1) {
2865 isel = env->vsiselect;
2866 } else {
2867 return RISCV_EXCP_ILLEGAL_INST;
2868 }
2869
2870 return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
2871 }
2872
rmw_xireg(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2873 static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
2874 target_ulong *val, target_ulong new_val,
2875 target_ulong wr_mask)
2876 {
2877 int ret = -EINVAL;
2878 target_ulong isel;
2879
2880 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
2881 if (ret != RISCV_EXCP_NONE) {
2882 return ret;
2883 }
2884
2885 /* Translate CSR number for VS-mode */
2886 csrno = csrind_xlate_vs_csrno(env, csrno);
2887
2888 /* Decode register details from CSR number */
2889 switch (csrno) {
2890 case CSR_MIREG:
2891 isel = env->miselect;
2892 break;
2893 case CSR_SIREG:
2894 isel = env->siselect;
2895 break;
2896 case CSR_VSIREG:
2897 isel = env->vsiselect;
2898 break;
2899 default:
2900 goto done;
2901 };
2902
2903 /*
2904 * Use the xiselect range to determine actual op on xireg.
2905 *
2906 * Since we only checked the existence of AIA or Indirect Access in the
2907 * predicate, we should check the existence of the exact extension when
2908 * we get to a specific range and return illegal instruction exception even
2909 * in VS-mode.
2910 */
2911 if (xiselect_aia_range(isel)) {
2912 return rmw_xireg_aia(env, csrno, isel, val, new_val, wr_mask);
2913 } else if (riscv_cpu_cfg(env)->ext_smcsrind ||
2914 riscv_cpu_cfg(env)->ext_sscsrind) {
2915 return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
2916 }
2917
2918 done:
2919 return RISCV_EXCP_ILLEGAL_INST;
2920 }
2921
rmw_xtopei(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2922 static RISCVException rmw_xtopei(CPURISCVState *env, int csrno,
2923 target_ulong *val, target_ulong new_val,
2924 target_ulong wr_mask)
2925 {
2926 bool virt;
2927 int ret = -EINVAL;
2928 target_ulong priv, vgein;
2929
2930 /* Translate CSR number for VS-mode */
2931 csrno = aia_xlate_vs_csrno(env, csrno);
2932
2933 /* Decode register details from CSR number */
2934 virt = false;
2935 switch (csrno) {
2936 case CSR_MTOPEI:
2937 priv = PRV_M;
2938 break;
2939 case CSR_STOPEI:
2940 if (env->mvien & MIP_SEIP && env->priv == PRV_S) {
2941 goto done;
2942 }
2943 priv = PRV_S;
2944 break;
2945 case CSR_VSTOPEI:
2946 priv = PRV_S;
2947 virt = true;
2948 break;
2949 default:
2950 goto done;
2951 };
2952
2953 /* IMSIC CSRs only available when machine implements IMSIC. */
2954 if (!env->aia_ireg_rmw_fn[priv]) {
2955 goto done;
2956 }
2957
2958 /* Find the selected guest interrupt file */
2959 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
2960
2961 /* Selected guest interrupt file should be valid */
2962 if (virt && (!vgein || env->geilen < vgein)) {
2963 goto done;
2964 }
2965
2966 /* Call machine specific IMSIC register emulation for TOPEI */
2967 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
2968 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, priv, virt, vgein,
2969 riscv_cpu_mxl_bits(env)),
2970 val, new_val, wr_mask);
2971
2972 done:
2973 if (ret) {
2974 return (env->virt_enabled && virt) ?
2975 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2976 }
2977 return RISCV_EXCP_NONE;
2978 }
2979
read_mtvec(CPURISCVState * env,int csrno,target_ulong * val)2980 static RISCVException read_mtvec(CPURISCVState *env, int csrno,
2981 target_ulong *val)
2982 {
2983 *val = env->mtvec;
2984 return RISCV_EXCP_NONE;
2985 }
2986
write_mtvec(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)2987 static RISCVException write_mtvec(CPURISCVState *env, int csrno,
2988 target_ulong val, uintptr_t ra)
2989 {
2990 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
2991 if ((val & 3) < 2) {
2992 env->mtvec = val;
2993 } else {
2994 qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n");
2995 }
2996 return RISCV_EXCP_NONE;
2997 }
2998
read_mcountinhibit(CPURISCVState * env,int csrno,target_ulong * val)2999 static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno,
3000 target_ulong *val)
3001 {
3002 *val = env->mcountinhibit;
3003 return RISCV_EXCP_NONE;
3004 }
3005
write_mcountinhibit(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3006 static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
3007 target_ulong val, uintptr_t ra)
3008 {
3009 int cidx;
3010 PMUCTRState *counter;
3011 RISCVCPU *cpu = env_archcpu(env);
3012 uint32_t present_ctrs = cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_IR;
3013 target_ulong updated_ctrs = (env->mcountinhibit ^ val) & present_ctrs;
3014 uint64_t mhpmctr_val, prev_count, curr_count;
3015
3016 /* WARL register - disable unavailable counters; TM bit is always 0 */
3017 env->mcountinhibit = val & present_ctrs;
3018
3019 /* Check if any other counter is also monitoring cycles/instructions */
3020 for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
3021 if (!(updated_ctrs & BIT(cidx)) ||
3022 (!riscv_pmu_ctr_monitor_cycles(env, cidx) &&
3023 !riscv_pmu_ctr_monitor_instructions(env, cidx))) {
3024 continue;
3025 }
3026
3027 counter = &env->pmu_ctrs[cidx];
3028
3029 if (!get_field(env->mcountinhibit, BIT(cidx))) {
3030 counter->mhpmcounter_prev =
3031 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false);
3032 if (riscv_cpu_mxl(env) == MXL_RV32) {
3033 counter->mhpmcounterh_prev =
3034 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true);
3035 }
3036
3037 if (cidx > 2) {
3038 mhpmctr_val = counter->mhpmcounter_val;
3039 if (riscv_cpu_mxl(env) == MXL_RV32) {
3040 mhpmctr_val = mhpmctr_val |
3041 ((uint64_t)counter->mhpmcounterh_val << 32);
3042 }
3043 riscv_pmu_setup_timer(env, mhpmctr_val, cidx);
3044 }
3045 } else {
3046 curr_count = riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false);
3047
3048 mhpmctr_val = counter->mhpmcounter_val;
3049 prev_count = counter->mhpmcounter_prev;
3050 if (riscv_cpu_mxl(env) == MXL_RV32) {
3051 uint64_t tmp =
3052 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true);
3053
3054 curr_count = curr_count | (tmp << 32);
3055 mhpmctr_val = mhpmctr_val |
3056 ((uint64_t)counter->mhpmcounterh_val << 32);
3057 prev_count = prev_count |
3058 ((uint64_t)counter->mhpmcounterh_prev << 32);
3059 }
3060
3061 /* Adjust the counter for later reads. */
3062 mhpmctr_val = curr_count - prev_count + mhpmctr_val;
3063 counter->mhpmcounter_val = mhpmctr_val;
3064 if (riscv_cpu_mxl(env) == MXL_RV32) {
3065 counter->mhpmcounterh_val = mhpmctr_val >> 32;
3066 }
3067 }
3068 }
3069
3070 return RISCV_EXCP_NONE;
3071 }
3072
read_scountinhibit(CPURISCVState * env,int csrno,target_ulong * val)3073 static RISCVException read_scountinhibit(CPURISCVState *env, int csrno,
3074 target_ulong *val)
3075 {
3076 /* S-mode can only access the bits delegated by M-mode */
3077 *val = env->mcountinhibit & env->mcounteren;
3078 return RISCV_EXCP_NONE;
3079 }
3080
write_scountinhibit(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3081 static RISCVException write_scountinhibit(CPURISCVState *env, int csrno,
3082 target_ulong val, uintptr_t ra)
3083 {
3084 return write_mcountinhibit(env, csrno, val & env->mcounteren, ra);
3085 }
3086
read_mcounteren(CPURISCVState * env,int csrno,target_ulong * val)3087 static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
3088 target_ulong *val)
3089 {
3090 *val = env->mcounteren;
3091 return RISCV_EXCP_NONE;
3092 }
3093
write_mcounteren(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3094 static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
3095 target_ulong val, uintptr_t ra)
3096 {
3097 RISCVCPU *cpu = env_archcpu(env);
3098
3099 /* WARL register - disable unavailable counters */
3100 env->mcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
3101 COUNTEREN_IR);
3102 return RISCV_EXCP_NONE;
3103 }
3104
3105 /* Machine Trap Handling */
read_mscratch_i128(CPURISCVState * env,int csrno,Int128 * val)3106 static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno,
3107 Int128 *val)
3108 {
3109 *val = int128_make128(env->mscratch, env->mscratchh);
3110 return RISCV_EXCP_NONE;
3111 }
3112
write_mscratch_i128(CPURISCVState * env,int csrno,Int128 val)3113 static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno,
3114 Int128 val)
3115 {
3116 env->mscratch = int128_getlo(val);
3117 env->mscratchh = int128_gethi(val);
3118 return RISCV_EXCP_NONE;
3119 }
3120
read_mscratch(CPURISCVState * env,int csrno,target_ulong * val)3121 static RISCVException read_mscratch(CPURISCVState *env, int csrno,
3122 target_ulong *val)
3123 {
3124 *val = env->mscratch;
3125 return RISCV_EXCP_NONE;
3126 }
3127
write_mscratch(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3128 static RISCVException write_mscratch(CPURISCVState *env, int csrno,
3129 target_ulong val, uintptr_t ra)
3130 {
3131 env->mscratch = val;
3132 return RISCV_EXCP_NONE;
3133 }
3134
read_mepc(CPURISCVState * env,int csrno,target_ulong * val)3135 static RISCVException read_mepc(CPURISCVState *env, int csrno,
3136 target_ulong *val)
3137 {
3138 *val = env->mepc & get_xepc_mask(env);
3139 return RISCV_EXCP_NONE;
3140 }
3141
write_mepc(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3142 static RISCVException write_mepc(CPURISCVState *env, int csrno,
3143 target_ulong val, uintptr_t ra)
3144 {
3145 env->mepc = val & get_xepc_mask(env);
3146 return RISCV_EXCP_NONE;
3147 }
3148
read_mcause(CPURISCVState * env,int csrno,target_ulong * val)3149 static RISCVException read_mcause(CPURISCVState *env, int csrno,
3150 target_ulong *val)
3151 {
3152 *val = env->mcause;
3153 return RISCV_EXCP_NONE;
3154 }
3155
write_mcause(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3156 static RISCVException write_mcause(CPURISCVState *env, int csrno,
3157 target_ulong val, uintptr_t ra)
3158 {
3159 env->mcause = val;
3160 return RISCV_EXCP_NONE;
3161 }
3162
read_mtval(CPURISCVState * env,int csrno,target_ulong * val)3163 static RISCVException read_mtval(CPURISCVState *env, int csrno,
3164 target_ulong *val)
3165 {
3166 *val = env->mtval;
3167 return RISCV_EXCP_NONE;
3168 }
3169
write_mtval(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3170 static RISCVException write_mtval(CPURISCVState *env, int csrno,
3171 target_ulong val, uintptr_t ra)
3172 {
3173 env->mtval = val;
3174 return RISCV_EXCP_NONE;
3175 }
3176
3177 /* Execution environment configuration setup */
read_menvcfg(CPURISCVState * env,int csrno,target_ulong * val)3178 static RISCVException read_menvcfg(CPURISCVState *env, int csrno,
3179 target_ulong *val)
3180 {
3181 *val = env->menvcfg;
3182 return RISCV_EXCP_NONE;
3183 }
3184
3185 static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
3186 target_ulong val, uintptr_t ra);
write_menvcfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3187 static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
3188 target_ulong val, uintptr_t ra)
3189 {
3190 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
3191 uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE |
3192 MENVCFG_CBZE | MENVCFG_CDE;
3193 bool stce_changed = false;
3194
3195 if (riscv_cpu_mxl(env) == MXL_RV64) {
3196 mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
3197 (cfg->ext_sstc ? MENVCFG_STCE : 0) |
3198 (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) |
3199 (cfg->ext_svadu ? MENVCFG_ADUE : 0) |
3200 (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0);
3201
3202 if (env_archcpu(env)->cfg.ext_zicfilp) {
3203 mask |= MENVCFG_LPE;
3204 }
3205
3206 if (env_archcpu(env)->cfg.ext_zicfiss) {
3207 mask |= MENVCFG_SSE;
3208 }
3209
3210 /* Update PMM field only if the value is valid according to Zjpm v1.0 */
3211 if (env_archcpu(env)->cfg.ext_smnpm &&
3212 get_field(val, MENVCFG_PMM) != PMM_FIELD_RESERVED) {
3213 mask |= MENVCFG_PMM;
3214 }
3215
3216 if ((val & MENVCFG_DTE) == 0) {
3217 env->mstatus &= ~MSTATUS_SDT;
3218 }
3219
3220 if (cfg->ext_sstc &&
3221 ((env->menvcfg & MENVCFG_STCE) != (val & MENVCFG_STCE))) {
3222 stce_changed = true;
3223 }
3224 }
3225 env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
3226
3227 if (stce_changed) {
3228 riscv_timer_stce_changed(env, true, !!(val & MENVCFG_STCE));
3229 }
3230
3231 return write_henvcfg(env, CSR_HENVCFG, env->henvcfg, ra);
3232 }
3233
read_menvcfgh(CPURISCVState * env,int csrno,target_ulong * val)3234 static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
3235 target_ulong *val)
3236 {
3237 *val = env->menvcfg >> 32;
3238 return RISCV_EXCP_NONE;
3239 }
3240
3241 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
3242 target_ulong val, uintptr_t ra);
write_menvcfgh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3243 static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
3244 target_ulong val, uintptr_t ra)
3245 {
3246 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
3247 uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
3248 (cfg->ext_sstc ? MENVCFG_STCE : 0) |
3249 (cfg->ext_svadu ? MENVCFG_ADUE : 0) |
3250 (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) |
3251 (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0);
3252 uint64_t valh = (uint64_t)val << 32;
3253 bool stce_changed = false;
3254
3255 if (cfg->ext_sstc &&
3256 ((env->menvcfg & MENVCFG_STCE) != (valh & MENVCFG_STCE))) {
3257 stce_changed = true;
3258 }
3259
3260 if ((valh & MENVCFG_DTE) == 0) {
3261 env->mstatus &= ~MSTATUS_SDT;
3262 }
3263
3264 env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
3265
3266 if (stce_changed) {
3267 riscv_timer_stce_changed(env, true, !!(valh & MENVCFG_STCE));
3268 }
3269
3270 return write_henvcfgh(env, CSR_HENVCFGH, env->henvcfg >> 32, ra);
3271 }
3272
read_senvcfg(CPURISCVState * env,int csrno,target_ulong * val)3273 static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
3274 target_ulong *val)
3275 {
3276 RISCVException ret;
3277
3278 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3279 if (ret != RISCV_EXCP_NONE) {
3280 return ret;
3281 }
3282
3283 *val = env->senvcfg;
3284 return RISCV_EXCP_NONE;
3285 }
3286
write_senvcfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3287 static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
3288 target_ulong val, uintptr_t ra)
3289 {
3290 uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE;
3291 RISCVException ret;
3292 /* Update PMM field only if the value is valid according to Zjpm v1.0 */
3293 if (env_archcpu(env)->cfg.ext_ssnpm &&
3294 riscv_cpu_mxl(env) == MXL_RV64 &&
3295 get_field(val, SENVCFG_PMM) != PMM_FIELD_RESERVED) {
3296 mask |= SENVCFG_PMM;
3297 }
3298
3299 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3300 if (ret != RISCV_EXCP_NONE) {
3301 return ret;
3302 }
3303
3304 if (env_archcpu(env)->cfg.ext_zicfilp) {
3305 mask |= SENVCFG_LPE;
3306 }
3307
3308 /* Higher mode SSE must be ON for next-less mode SSE to be ON */
3309 if (env_archcpu(env)->cfg.ext_zicfiss &&
3310 get_field(env->menvcfg, MENVCFG_SSE) &&
3311 (env->virt_enabled ? get_field(env->henvcfg, HENVCFG_SSE) : true)) {
3312 mask |= SENVCFG_SSE;
3313 }
3314
3315 if (env_archcpu(env)->cfg.ext_svukte) {
3316 mask |= SENVCFG_UKTE;
3317 }
3318
3319 env->senvcfg = (env->senvcfg & ~mask) | (val & mask);
3320 return RISCV_EXCP_NONE;
3321 }
3322
read_henvcfg(CPURISCVState * env,int csrno,target_ulong * val)3323 static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
3324 target_ulong *val)
3325 {
3326 RISCVException ret;
3327
3328 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3329 if (ret != RISCV_EXCP_NONE) {
3330 return ret;
3331 }
3332
3333 /*
3334 * henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0
3335 * henvcfg.stce is read_only 0 when menvcfg.stce = 0
3336 * henvcfg.adue is read_only 0 when menvcfg.adue = 0
3337 * henvcfg.dte is read_only 0 when menvcfg.dte = 0
3338 */
3339 *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
3340 HENVCFG_DTE) | env->menvcfg);
3341 return RISCV_EXCP_NONE;
3342 }
3343
write_henvcfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3344 static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
3345 target_ulong val, uintptr_t ra)
3346 {
3347 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
3348 uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE;
3349 RISCVException ret;
3350 bool stce_changed = false;
3351
3352 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3353 if (ret != RISCV_EXCP_NONE) {
3354 return ret;
3355 }
3356
3357 if (riscv_cpu_mxl(env) == MXL_RV64) {
3358 mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
3359 HENVCFG_DTE);
3360
3361 if (env_archcpu(env)->cfg.ext_zicfilp) {
3362 mask |= HENVCFG_LPE;
3363 }
3364
3365 /* H can light up SSE for VS only if HS had it from menvcfg */
3366 if (env_archcpu(env)->cfg.ext_zicfiss &&
3367 get_field(env->menvcfg, MENVCFG_SSE)) {
3368 mask |= HENVCFG_SSE;
3369 }
3370
3371 /* Update PMM field only if the value is valid according to Zjpm v1.0 */
3372 if (env_archcpu(env)->cfg.ext_ssnpm &&
3373 get_field(val, HENVCFG_PMM) != PMM_FIELD_RESERVED) {
3374 mask |= HENVCFG_PMM;
3375 }
3376
3377 if (cfg->ext_sstc &&
3378 ((env->henvcfg & HENVCFG_STCE) != (val & HENVCFG_STCE))) {
3379 stce_changed = true;
3380 }
3381 }
3382
3383 env->henvcfg = val & mask;
3384 if ((env->henvcfg & HENVCFG_DTE) == 0) {
3385 env->vsstatus &= ~MSTATUS_SDT;
3386 }
3387
3388 if (stce_changed) {
3389 riscv_timer_stce_changed(env, false, !!(val & HENVCFG_STCE));
3390 }
3391
3392 return RISCV_EXCP_NONE;
3393 }
3394
read_henvcfgh(CPURISCVState * env,int csrno,target_ulong * val)3395 static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
3396 target_ulong *val)
3397 {
3398 RISCVException ret;
3399
3400 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3401 if (ret != RISCV_EXCP_NONE) {
3402 return ret;
3403 }
3404
3405 *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
3406 HENVCFG_DTE) | env->menvcfg)) >> 32;
3407 return RISCV_EXCP_NONE;
3408 }
3409
write_henvcfgh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3410 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
3411 target_ulong val, uintptr_t ra)
3412 {
3413 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
3414 uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE |
3415 HENVCFG_ADUE | HENVCFG_DTE);
3416 uint64_t valh = (uint64_t)val << 32;
3417 RISCVException ret;
3418 bool stce_changed = false;
3419
3420 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3421 if (ret != RISCV_EXCP_NONE) {
3422 return ret;
3423 }
3424
3425 if (cfg->ext_sstc &&
3426 ((env->henvcfg & HENVCFG_STCE) != (valh & HENVCFG_STCE))) {
3427 stce_changed = true;
3428 }
3429
3430 env->henvcfg = (env->henvcfg & 0xFFFFFFFF) | (valh & mask);
3431 if ((env->henvcfg & HENVCFG_DTE) == 0) {
3432 env->vsstatus &= ~MSTATUS_SDT;
3433 }
3434
3435 if (stce_changed) {
3436 riscv_timer_stce_changed(env, false, !!(val & HENVCFG_STCE));
3437 }
3438
3439 return RISCV_EXCP_NONE;
3440 }
3441
read_mstateen(CPURISCVState * env,int csrno,target_ulong * val)3442 static RISCVException read_mstateen(CPURISCVState *env, int csrno,
3443 target_ulong *val)
3444 {
3445 *val = env->mstateen[csrno - CSR_MSTATEEN0];
3446
3447 return RISCV_EXCP_NONE;
3448 }
3449
write_mstateen(CPURISCVState * env,int csrno,uint64_t wr_mask,target_ulong new_val)3450 static RISCVException write_mstateen(CPURISCVState *env, int csrno,
3451 uint64_t wr_mask, target_ulong new_val)
3452 {
3453 uint64_t *reg;
3454
3455 reg = &env->mstateen[csrno - CSR_MSTATEEN0];
3456 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
3457
3458 return RISCV_EXCP_NONE;
3459 }
3460
write_mstateen0(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3461 static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
3462 target_ulong new_val, uintptr_t ra)
3463 {
3464 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
3465 if (!riscv_has_ext(env, RVF)) {
3466 wr_mask |= SMSTATEEN0_FCSR;
3467 }
3468
3469 if (env->priv_ver >= PRIV_VERSION_1_13_0) {
3470 wr_mask |= SMSTATEEN0_P1P13;
3471 }
3472
3473 if (riscv_cpu_cfg(env)->ext_smaia || riscv_cpu_cfg(env)->ext_smcsrind) {
3474 wr_mask |= SMSTATEEN0_SVSLCT;
3475 }
3476
3477 /*
3478 * As per the AIA specification, SMSTATEEN0_IMSIC is valid only if IMSIC is
3479 * implemented. However, that information is with MachineState and we can't
3480 * figure that out in csr.c. Just enable if Smaia is available.
3481 */
3482 if (riscv_cpu_cfg(env)->ext_smaia) {
3483 wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
3484 }
3485
3486 if (riscv_cpu_cfg(env)->ext_ssctr) {
3487 wr_mask |= SMSTATEEN0_CTR;
3488 }
3489
3490 return write_mstateen(env, csrno, wr_mask, new_val);
3491 }
3492
write_mstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3493 static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno,
3494 target_ulong new_val, uintptr_t ra)
3495 {
3496 return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
3497 }
3498
read_mstateenh(CPURISCVState * env,int csrno,target_ulong * val)3499 static RISCVException read_mstateenh(CPURISCVState *env, int csrno,
3500 target_ulong *val)
3501 {
3502 *val = env->mstateen[csrno - CSR_MSTATEEN0H] >> 32;
3503
3504 return RISCV_EXCP_NONE;
3505 }
3506
write_mstateenh(CPURISCVState * env,int csrno,uint64_t wr_mask,target_ulong new_val)3507 static RISCVException write_mstateenh(CPURISCVState *env, int csrno,
3508 uint64_t wr_mask, target_ulong new_val)
3509 {
3510 uint64_t *reg, val;
3511
3512 reg = &env->mstateen[csrno - CSR_MSTATEEN0H];
3513 val = (uint64_t)new_val << 32;
3514 val |= *reg & 0xFFFFFFFF;
3515 *reg = (*reg & ~wr_mask) | (val & wr_mask);
3516
3517 return RISCV_EXCP_NONE;
3518 }
3519
write_mstateen0h(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3520 static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
3521 target_ulong new_val, uintptr_t ra)
3522 {
3523 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
3524
3525 if (env->priv_ver >= PRIV_VERSION_1_13_0) {
3526 wr_mask |= SMSTATEEN0_P1P13;
3527 }
3528
3529 if (riscv_cpu_cfg(env)->ext_ssctr) {
3530 wr_mask |= SMSTATEEN0_CTR;
3531 }
3532
3533 return write_mstateenh(env, csrno, wr_mask, new_val);
3534 }
3535
write_mstateenh_1_3(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3536 static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno,
3537 target_ulong new_val, uintptr_t ra)
3538 {
3539 return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
3540 }
3541
read_hstateen(CPURISCVState * env,int csrno,target_ulong * val)3542 static RISCVException read_hstateen(CPURISCVState *env, int csrno,
3543 target_ulong *val)
3544 {
3545 int index = csrno - CSR_HSTATEEN0;
3546
3547 *val = env->hstateen[index] & env->mstateen[index];
3548
3549 return RISCV_EXCP_NONE;
3550 }
3551
write_hstateen(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)3552 static RISCVException write_hstateen(CPURISCVState *env, int csrno,
3553 uint64_t mask, target_ulong new_val)
3554 {
3555 int index = csrno - CSR_HSTATEEN0;
3556 uint64_t *reg, wr_mask;
3557
3558 reg = &env->hstateen[index];
3559 wr_mask = env->mstateen[index] & mask;
3560 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
3561
3562 return RISCV_EXCP_NONE;
3563 }
3564
write_hstateen0(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3565 static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
3566 target_ulong new_val, uintptr_t ra)
3567 {
3568 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
3569
3570 if (!riscv_has_ext(env, RVF)) {
3571 wr_mask |= SMSTATEEN0_FCSR;
3572 }
3573
3574 if (riscv_cpu_cfg(env)->ext_ssaia || riscv_cpu_cfg(env)->ext_sscsrind) {
3575 wr_mask |= SMSTATEEN0_SVSLCT;
3576 }
3577
3578 /*
3579 * As per the AIA specification, SMSTATEEN0_IMSIC is valid only if IMSIC is
3580 * implemented. However, that information is with MachineState and we can't
3581 * figure that out in csr.c. Just enable if Ssaia is available.
3582 */
3583 if (riscv_cpu_cfg(env)->ext_ssaia) {
3584 wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
3585 }
3586
3587 if (riscv_cpu_cfg(env)->ext_ssctr) {
3588 wr_mask |= SMSTATEEN0_CTR;
3589 }
3590
3591 return write_hstateen(env, csrno, wr_mask, new_val);
3592 }
3593
write_hstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3594 static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno,
3595 target_ulong new_val, uintptr_t ra)
3596 {
3597 return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
3598 }
3599
read_hstateenh(CPURISCVState * env,int csrno,target_ulong * val)3600 static RISCVException read_hstateenh(CPURISCVState *env, int csrno,
3601 target_ulong *val)
3602 {
3603 int index = csrno - CSR_HSTATEEN0H;
3604
3605 *val = (env->hstateen[index] >> 32) & (env->mstateen[index] >> 32);
3606
3607 return RISCV_EXCP_NONE;
3608 }
3609
write_hstateenh(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)3610 static RISCVException write_hstateenh(CPURISCVState *env, int csrno,
3611 uint64_t mask, target_ulong new_val)
3612 {
3613 int index = csrno - CSR_HSTATEEN0H;
3614 uint64_t *reg, wr_mask, val;
3615
3616 reg = &env->hstateen[index];
3617 val = (uint64_t)new_val << 32;
3618 val |= *reg & 0xFFFFFFFF;
3619 wr_mask = env->mstateen[index] & mask;
3620 *reg = (*reg & ~wr_mask) | (val & wr_mask);
3621
3622 return RISCV_EXCP_NONE;
3623 }
3624
write_hstateen0h(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3625 static RISCVException write_hstateen0h(CPURISCVState *env, int csrno,
3626 target_ulong new_val, uintptr_t ra)
3627 {
3628 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
3629
3630 if (riscv_cpu_cfg(env)->ext_ssctr) {
3631 wr_mask |= SMSTATEEN0_CTR;
3632 }
3633
3634 return write_hstateenh(env, csrno, wr_mask, new_val);
3635 }
3636
write_hstateenh_1_3(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3637 static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno,
3638 target_ulong new_val, uintptr_t ra)
3639 {
3640 return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
3641 }
3642
read_sstateen(CPURISCVState * env,int csrno,target_ulong * val)3643 static RISCVException read_sstateen(CPURISCVState *env, int csrno,
3644 target_ulong *val)
3645 {
3646 bool virt = env->virt_enabled;
3647 int index = csrno - CSR_SSTATEEN0;
3648
3649 *val = env->sstateen[index] & env->mstateen[index];
3650 if (virt) {
3651 *val &= env->hstateen[index];
3652 }
3653
3654 return RISCV_EXCP_NONE;
3655 }
3656
write_sstateen(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)3657 static RISCVException write_sstateen(CPURISCVState *env, int csrno,
3658 uint64_t mask, target_ulong new_val)
3659 {
3660 bool virt = env->virt_enabled;
3661 int index = csrno - CSR_SSTATEEN0;
3662 uint64_t wr_mask;
3663 uint64_t *reg;
3664
3665 wr_mask = env->mstateen[index] & mask;
3666 if (virt) {
3667 wr_mask &= env->hstateen[index];
3668 }
3669
3670 reg = &env->sstateen[index];
3671 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
3672
3673 return RISCV_EXCP_NONE;
3674 }
3675
write_sstateen0(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3676 static RISCVException write_sstateen0(CPURISCVState *env, int csrno,
3677 target_ulong new_val, uintptr_t ra)
3678 {
3679 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
3680
3681 if (!riscv_has_ext(env, RVF)) {
3682 wr_mask |= SMSTATEEN0_FCSR;
3683 }
3684
3685 return write_sstateen(env, csrno, wr_mask, new_val);
3686 }
3687
write_sstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3688 static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno,
3689 target_ulong new_val, uintptr_t ra)
3690 {
3691 return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
3692 }
3693
rmw_mip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3694 static RISCVException rmw_mip64(CPURISCVState *env, int csrno,
3695 uint64_t *ret_val,
3696 uint64_t new_val, uint64_t wr_mask)
3697 {
3698 uint64_t old_mip, mask = wr_mask & delegable_ints;
3699 uint32_t gin;
3700
3701 if (mask & MIP_SEIP) {
3702 env->software_seip = new_val & MIP_SEIP;
3703 new_val |= env->external_seip * MIP_SEIP;
3704 }
3705
3706 if (riscv_cpu_cfg(env)->ext_sstc && (env->priv == PRV_M) &&
3707 get_field(env->menvcfg, MENVCFG_STCE)) {
3708 /* sstc extension forbids STIP & VSTIP to be writeable in mip */
3709
3710 /* STIP is not writable when menvcfg.STCE is enabled. */
3711 mask = mask & ~MIP_STIP;
3712
3713 /* VSTIP is not writable when both [mh]envcfg.STCE are enabled. */
3714 if (get_field(env->henvcfg, HENVCFG_STCE)) {
3715 mask = mask & ~MIP_VSTIP;
3716 }
3717 }
3718
3719 if (mask) {
3720 old_mip = riscv_cpu_update_mip(env, mask, (new_val & mask));
3721 } else {
3722 old_mip = env->mip;
3723 }
3724
3725 if (csrno != CSR_HVIP) {
3726 gin = get_field(env->hstatus, HSTATUS_VGEIN);
3727 old_mip |= (env->hgeip & ((target_ulong)1 << gin)) ? MIP_VSEIP : 0;
3728 old_mip |= env->vstime_irq ? MIP_VSTIP : 0;
3729 }
3730
3731 if (ret_val) {
3732 *ret_val = old_mip;
3733 }
3734
3735 return RISCV_EXCP_NONE;
3736 }
3737
rmw_mip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3738 static RISCVException rmw_mip(CPURISCVState *env, int csrno,
3739 target_ulong *ret_val,
3740 target_ulong new_val, target_ulong wr_mask)
3741 {
3742 uint64_t rval;
3743 RISCVException ret;
3744
3745 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask);
3746 if (ret_val) {
3747 *ret_val = rval;
3748 }
3749
3750 return ret;
3751 }
3752
rmw_miph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3753 static RISCVException rmw_miph(CPURISCVState *env, int csrno,
3754 target_ulong *ret_val,
3755 target_ulong new_val, target_ulong wr_mask)
3756 {
3757 uint64_t rval;
3758 RISCVException ret;
3759
3760 ret = rmw_mip64(env, csrno, &rval,
3761 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3762 if (ret_val) {
3763 *ret_val = rval >> 32;
3764 }
3765
3766 return ret;
3767 }
3768
3769 /*
3770 * The function is written for two use-cases:
3771 * 1- To access mvip csr as is for m-mode access.
3772 * 2- To access sip as a combination of mip and mvip for s-mode.
3773 *
3774 * Both report bits 1, 5, 9 and 13:63 but with the exception of
3775 * STIP being read-only zero in case of mvip when sstc extension
3776 * is present.
3777 * Also, sip needs to be read-only zero when both mideleg[i] and
3778 * mvien[i] are zero but mvip needs to be an alias of mip.
3779 */
rmw_mvip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3780 static RISCVException rmw_mvip64(CPURISCVState *env, int csrno,
3781 uint64_t *ret_val,
3782 uint64_t new_val, uint64_t wr_mask)
3783 {
3784 RISCVCPU *cpu = env_archcpu(env);
3785 target_ulong ret_mip = 0;
3786 RISCVException ret;
3787 uint64_t old_mvip;
3788
3789 /*
3790 * mideleg[i] mvien[i]
3791 * 0 0 No delegation. mvip[i] is alias of mip[i].
3792 * 0 1 mvip[i] becomes source of interrupt, mip bypassed.
3793 * 1 X mip[i] is source of interrupt and mvip[i] aliases
3794 * mip[i].
3795 *
3796 * So alias condition would be for bits:
3797 * ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (mideleg | ~mvien)) |
3798 * (!sstc & MIP_STIP)
3799 *
3800 * Non-alias condition will be for bits:
3801 * (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (~mideleg & mvien)
3802 *
3803 * alias_mask denotes the bits that come from mip nalias_mask denotes bits
3804 * that come from hvip.
3805 */
3806 uint64_t alias_mask = ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
3807 (env->mideleg | ~env->mvien)) | MIP_STIP;
3808 uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
3809 (~env->mideleg & env->mvien);
3810 uint64_t wr_mask_mvip;
3811 uint64_t wr_mask_mip;
3812
3813 /*
3814 * mideleg[i] mvien[i]
3815 * 0 0 sip[i] read-only zero.
3816 * 0 1 sip[i] alias of mvip[i].
3817 * 1 X sip[i] alias of mip[i].
3818 *
3819 * Both alias and non-alias mask remain same for sip except for bits
3820 * which are zero in both mideleg and mvien.
3821 */
3822 if (csrno == CSR_SIP) {
3823 /* Remove bits that are zero in both mideleg and mvien. */
3824 alias_mask &= (env->mideleg | env->mvien);
3825 nalias_mask &= (env->mideleg | env->mvien);
3826 }
3827
3828 /*
3829 * If sstc is present, mvip.STIP is not an alias of mip.STIP so clear
3830 * that our in mip returned value.
3831 */
3832 if (cpu->cfg.ext_sstc && (env->priv == PRV_M) &&
3833 get_field(env->menvcfg, MENVCFG_STCE)) {
3834 alias_mask &= ~MIP_STIP;
3835 }
3836
3837 wr_mask_mip = wr_mask & alias_mask & mvip_writable_mask;
3838 wr_mask_mvip = wr_mask & nalias_mask & mvip_writable_mask;
3839
3840 /*
3841 * For bits set in alias_mask, mvip needs to be alias of mip, so forward
3842 * this to rmw_mip.
3843 */
3844 ret = rmw_mip(env, CSR_MIP, &ret_mip, new_val, wr_mask_mip);
3845 if (ret != RISCV_EXCP_NONE) {
3846 return ret;
3847 }
3848
3849 old_mvip = env->mvip;
3850
3851 /*
3852 * Write to mvip. Update only non-alias bits. Alias bits were updated
3853 * in mip in rmw_mip above.
3854 */
3855 if (wr_mask_mvip) {
3856 env->mvip = (env->mvip & ~wr_mask_mvip) | (new_val & wr_mask_mvip);
3857
3858 /*
3859 * Given mvip is separate source from mip, we need to trigger interrupt
3860 * from here separately. Normally this happen from riscv_cpu_update_mip.
3861 */
3862 riscv_cpu_interrupt(env);
3863 }
3864
3865 if (ret_val) {
3866 ret_mip &= alias_mask;
3867 old_mvip &= nalias_mask;
3868
3869 *ret_val = old_mvip | ret_mip;
3870 }
3871
3872 return RISCV_EXCP_NONE;
3873 }
3874
rmw_mvip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3875 static RISCVException rmw_mvip(CPURISCVState *env, int csrno,
3876 target_ulong *ret_val,
3877 target_ulong new_val, target_ulong wr_mask)
3878 {
3879 uint64_t rval;
3880 RISCVException ret;
3881
3882 ret = rmw_mvip64(env, csrno, &rval, new_val, wr_mask);
3883 if (ret_val) {
3884 *ret_val = rval;
3885 }
3886
3887 return ret;
3888 }
3889
rmw_mviph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3890 static RISCVException rmw_mviph(CPURISCVState *env, int csrno,
3891 target_ulong *ret_val,
3892 target_ulong new_val, target_ulong wr_mask)
3893 {
3894 uint64_t rval;
3895 RISCVException ret;
3896
3897 ret = rmw_mvip64(env, csrno, &rval,
3898 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3899 if (ret_val) {
3900 *ret_val = rval >> 32;
3901 }
3902
3903 return ret;
3904 }
3905
3906 /* Supervisor Trap Setup */
read_sstatus_i128(CPURISCVState * env,int csrno,Int128 * val)3907 static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
3908 Int128 *val)
3909 {
3910 uint64_t mask = sstatus_v1_10_mask;
3911 uint64_t sstatus = env->mstatus & mask;
3912 if (env->xl != MXL_RV32 || env->debugger) {
3913 mask |= SSTATUS64_UXL;
3914 }
3915 if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
3916 mask |= SSTATUS_SDT;
3917 }
3918
3919 if (env_archcpu(env)->cfg.ext_zicfilp) {
3920 mask |= SSTATUS_SPELP;
3921 }
3922
3923 *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
3924 return RISCV_EXCP_NONE;
3925 }
3926
read_sstatus(CPURISCVState * env,int csrno,target_ulong * val)3927 static RISCVException read_sstatus(CPURISCVState *env, int csrno,
3928 target_ulong *val)
3929 {
3930 target_ulong mask = (sstatus_v1_10_mask);
3931 if (env->xl != MXL_RV32 || env->debugger) {
3932 mask |= SSTATUS64_UXL;
3933 }
3934
3935 if (env_archcpu(env)->cfg.ext_zicfilp) {
3936 mask |= SSTATUS_SPELP;
3937 }
3938 if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
3939 mask |= SSTATUS_SDT;
3940 }
3941 /* TODO: Use SXL not MXL. */
3942 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
3943 return RISCV_EXCP_NONE;
3944 }
3945
write_sstatus(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3946 static RISCVException write_sstatus(CPURISCVState *env, int csrno,
3947 target_ulong val, uintptr_t ra)
3948 {
3949 target_ulong mask = (sstatus_v1_10_mask);
3950
3951 if (env->xl != MXL_RV32 || env->debugger) {
3952 if ((val & SSTATUS64_UXL) != 0) {
3953 mask |= SSTATUS64_UXL;
3954 }
3955 }
3956
3957 if (env_archcpu(env)->cfg.ext_zicfilp) {
3958 mask |= SSTATUS_SPELP;
3959 }
3960 if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
3961 mask |= SSTATUS_SDT;
3962 }
3963 target_ulong newval = (env->mstatus & ~mask) | (val & mask);
3964 return write_mstatus(env, CSR_MSTATUS, newval, ra);
3965 }
3966
rmw_vsie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3967 static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
3968 uint64_t *ret_val,
3969 uint64_t new_val, uint64_t wr_mask)
3970 {
3971 uint64_t alias_mask = (LOCAL_INTERRUPTS | VS_MODE_INTERRUPTS) &
3972 env->hideleg;
3973 uint64_t nalias_mask = LOCAL_INTERRUPTS & (~env->hideleg & env->hvien);
3974 uint64_t rval, rval_vs, vsbits;
3975 uint64_t wr_mask_vsie;
3976 uint64_t wr_mask_mie;
3977 RISCVException ret;
3978
3979 /* Bring VS-level bits to correct position */
3980 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
3981 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
3982 new_val |= vsbits << 1;
3983
3984 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
3985 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
3986 wr_mask |= vsbits << 1;
3987
3988 wr_mask_mie = wr_mask & alias_mask;
3989 wr_mask_vsie = wr_mask & nalias_mask;
3990
3991 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask_mie);
3992
3993 rval_vs = env->vsie & nalias_mask;
3994 env->vsie = (env->vsie & ~wr_mask_vsie) | (new_val & wr_mask_vsie);
3995
3996 if (ret_val) {
3997 rval &= alias_mask;
3998 vsbits = rval & VS_MODE_INTERRUPTS;
3999 rval &= ~VS_MODE_INTERRUPTS;
4000 *ret_val = rval | (vsbits >> 1) | rval_vs;
4001 }
4002
4003 return ret;
4004 }
4005
rmw_vsie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4006 static RISCVException rmw_vsie(CPURISCVState *env, int csrno,
4007 target_ulong *ret_val,
4008 target_ulong new_val, target_ulong wr_mask)
4009 {
4010 uint64_t rval;
4011 RISCVException ret;
4012
4013 ret = rmw_vsie64(env, csrno, &rval, new_val, wr_mask);
4014 if (ret_val) {
4015 *ret_val = rval;
4016 }
4017
4018 return ret;
4019 }
4020
rmw_vsieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4021 static RISCVException rmw_vsieh(CPURISCVState *env, int csrno,
4022 target_ulong *ret_val,
4023 target_ulong new_val, target_ulong wr_mask)
4024 {
4025 uint64_t rval;
4026 RISCVException ret;
4027
4028 ret = rmw_vsie64(env, csrno, &rval,
4029 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4030 if (ret_val) {
4031 *ret_val = rval >> 32;
4032 }
4033
4034 return ret;
4035 }
4036
rmw_sie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4037 static RISCVException rmw_sie64(CPURISCVState *env, int csrno,
4038 uint64_t *ret_val,
4039 uint64_t new_val, uint64_t wr_mask)
4040 {
4041 uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
4042 (~env->mideleg & env->mvien);
4043 uint64_t alias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & env->mideleg;
4044 uint64_t sie_mask = wr_mask & nalias_mask;
4045 RISCVException ret;
4046
4047 /*
4048 * mideleg[i] mvien[i]
4049 * 0 0 sie[i] read-only zero.
4050 * 0 1 sie[i] is a separate writable bit.
4051 * 1 X sie[i] alias of mie[i].
4052 *
4053 * Both alias and non-alias mask remain same for sip except for bits
4054 * which are zero in both mideleg and mvien.
4055 */
4056 if (env->virt_enabled) {
4057 if (env->hvictl & HVICTL_VTI) {
4058 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
4059 }
4060 ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask);
4061 if (ret_val) {
4062 *ret_val &= alias_mask;
4063 }
4064 } else {
4065 ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & alias_mask);
4066 if (ret_val) {
4067 *ret_val &= alias_mask;
4068 *ret_val |= env->sie & nalias_mask;
4069 }
4070
4071 env->sie = (env->sie & ~sie_mask) | (new_val & sie_mask);
4072 }
4073
4074 return ret;
4075 }
4076
rmw_sie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4077 static RISCVException rmw_sie(CPURISCVState *env, int csrno,
4078 target_ulong *ret_val,
4079 target_ulong new_val, target_ulong wr_mask)
4080 {
4081 uint64_t rval;
4082 RISCVException ret;
4083
4084 ret = rmw_sie64(env, csrno, &rval, new_val, wr_mask);
4085 if (ret == RISCV_EXCP_NONE && ret_val) {
4086 *ret_val = rval;
4087 }
4088
4089 return ret;
4090 }
4091
rmw_sieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4092 static RISCVException rmw_sieh(CPURISCVState *env, int csrno,
4093 target_ulong *ret_val,
4094 target_ulong new_val, target_ulong wr_mask)
4095 {
4096 uint64_t rval;
4097 RISCVException ret;
4098
4099 ret = rmw_sie64(env, csrno, &rval,
4100 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4101 if (ret_val) {
4102 *ret_val = rval >> 32;
4103 }
4104
4105 return ret;
4106 }
4107
read_stvec(CPURISCVState * env,int csrno,target_ulong * val)4108 static RISCVException read_stvec(CPURISCVState *env, int csrno,
4109 target_ulong *val)
4110 {
4111 *val = env->stvec;
4112 return RISCV_EXCP_NONE;
4113 }
4114
write_stvec(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4115 static RISCVException write_stvec(CPURISCVState *env, int csrno,
4116 target_ulong val, uintptr_t ra)
4117 {
4118 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
4119 if ((val & 3) < 2) {
4120 env->stvec = val;
4121 } else {
4122 qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n");
4123 }
4124 return RISCV_EXCP_NONE;
4125 }
4126
read_scounteren(CPURISCVState * env,int csrno,target_ulong * val)4127 static RISCVException read_scounteren(CPURISCVState *env, int csrno,
4128 target_ulong *val)
4129 {
4130 *val = env->scounteren;
4131 return RISCV_EXCP_NONE;
4132 }
4133
write_scounteren(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4134 static RISCVException write_scounteren(CPURISCVState *env, int csrno,
4135 target_ulong val, uintptr_t ra)
4136 {
4137 RISCVCPU *cpu = env_archcpu(env);
4138
4139 /* WARL register - disable unavailable counters */
4140 env->scounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
4141 COUNTEREN_IR);
4142 return RISCV_EXCP_NONE;
4143 }
4144
4145 /* Supervisor Trap Handling */
read_sscratch_i128(CPURISCVState * env,int csrno,Int128 * val)4146 static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno,
4147 Int128 *val)
4148 {
4149 *val = int128_make128(env->sscratch, env->sscratchh);
4150 return RISCV_EXCP_NONE;
4151 }
4152
write_sscratch_i128(CPURISCVState * env,int csrno,Int128 val)4153 static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno,
4154 Int128 val)
4155 {
4156 env->sscratch = int128_getlo(val);
4157 env->sscratchh = int128_gethi(val);
4158 return RISCV_EXCP_NONE;
4159 }
4160
read_sscratch(CPURISCVState * env,int csrno,target_ulong * val)4161 static RISCVException read_sscratch(CPURISCVState *env, int csrno,
4162 target_ulong *val)
4163 {
4164 *val = env->sscratch;
4165 return RISCV_EXCP_NONE;
4166 }
4167
write_sscratch(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4168 static RISCVException write_sscratch(CPURISCVState *env, int csrno,
4169 target_ulong val, uintptr_t ra)
4170 {
4171 env->sscratch = val;
4172 return RISCV_EXCP_NONE;
4173 }
4174
read_sepc(CPURISCVState * env,int csrno,target_ulong * val)4175 static RISCVException read_sepc(CPURISCVState *env, int csrno,
4176 target_ulong *val)
4177 {
4178 *val = env->sepc & get_xepc_mask(env);
4179 return RISCV_EXCP_NONE;
4180 }
4181
write_sepc(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4182 static RISCVException write_sepc(CPURISCVState *env, int csrno,
4183 target_ulong val, uintptr_t ra)
4184 {
4185 env->sepc = val & get_xepc_mask(env);
4186 return RISCV_EXCP_NONE;
4187 }
4188
read_scause(CPURISCVState * env,int csrno,target_ulong * val)4189 static RISCVException read_scause(CPURISCVState *env, int csrno,
4190 target_ulong *val)
4191 {
4192 *val = env->scause;
4193 return RISCV_EXCP_NONE;
4194 }
4195
write_scause(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4196 static RISCVException write_scause(CPURISCVState *env, int csrno,
4197 target_ulong val, uintptr_t ra)
4198 {
4199 env->scause = val;
4200 return RISCV_EXCP_NONE;
4201 }
4202
read_stval(CPURISCVState * env,int csrno,target_ulong * val)4203 static RISCVException read_stval(CPURISCVState *env, int csrno,
4204 target_ulong *val)
4205 {
4206 *val = env->stval;
4207 return RISCV_EXCP_NONE;
4208 }
4209
write_stval(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4210 static RISCVException write_stval(CPURISCVState *env, int csrno,
4211 target_ulong val, uintptr_t ra)
4212 {
4213 env->stval = val;
4214 return RISCV_EXCP_NONE;
4215 }
4216
4217 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
4218 uint64_t *ret_val,
4219 uint64_t new_val, uint64_t wr_mask);
4220
rmw_vsip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4221 static RISCVException rmw_vsip64(CPURISCVState *env, int csrno,
4222 uint64_t *ret_val,
4223 uint64_t new_val, uint64_t wr_mask)
4224 {
4225 RISCVException ret;
4226 uint64_t rval, mask = env->hideleg & VS_MODE_INTERRUPTS;
4227 uint64_t vsbits;
4228
4229 /* Add virtualized bits into vsip mask. */
4230 mask |= env->hvien & ~env->hideleg;
4231
4232 /* Bring VS-level bits to correct position */
4233 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
4234 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
4235 new_val |= vsbits << 1;
4236 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
4237 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
4238 wr_mask |= vsbits << 1;
4239
4240 ret = rmw_hvip64(env, csrno, &rval, new_val,
4241 wr_mask & mask & vsip_writable_mask);
4242 if (ret_val) {
4243 rval &= mask;
4244 vsbits = rval & VS_MODE_INTERRUPTS;
4245 rval &= ~VS_MODE_INTERRUPTS;
4246 *ret_val = rval | (vsbits >> 1);
4247 }
4248
4249 return ret;
4250 }
4251
rmw_vsip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4252 static RISCVException rmw_vsip(CPURISCVState *env, int csrno,
4253 target_ulong *ret_val,
4254 target_ulong new_val, target_ulong wr_mask)
4255 {
4256 uint64_t rval;
4257 RISCVException ret;
4258
4259 ret = rmw_vsip64(env, csrno, &rval, new_val, wr_mask);
4260 if (ret_val) {
4261 *ret_val = rval;
4262 }
4263
4264 return ret;
4265 }
4266
rmw_vsiph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4267 static RISCVException rmw_vsiph(CPURISCVState *env, int csrno,
4268 target_ulong *ret_val,
4269 target_ulong new_val, target_ulong wr_mask)
4270 {
4271 uint64_t rval;
4272 RISCVException ret;
4273
4274 ret = rmw_vsip64(env, csrno, &rval,
4275 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4276 if (ret_val) {
4277 *ret_val = rval >> 32;
4278 }
4279
4280 return ret;
4281 }
4282
rmw_sip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4283 static RISCVException rmw_sip64(CPURISCVState *env, int csrno,
4284 uint64_t *ret_val,
4285 uint64_t new_val, uint64_t wr_mask)
4286 {
4287 RISCVException ret;
4288 uint64_t mask = (env->mideleg | env->mvien) & sip_writable_mask;
4289
4290 if (env->virt_enabled) {
4291 if (env->hvictl & HVICTL_VTI) {
4292 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
4293 }
4294 ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask);
4295 } else {
4296 ret = rmw_mvip64(env, csrno, ret_val, new_val, wr_mask & mask);
4297 }
4298
4299 if (ret_val) {
4300 *ret_val &= (env->mideleg | env->mvien) &
4301 (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS);
4302 }
4303
4304 return ret;
4305 }
4306
rmw_sip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4307 static RISCVException rmw_sip(CPURISCVState *env, int csrno,
4308 target_ulong *ret_val,
4309 target_ulong new_val, target_ulong wr_mask)
4310 {
4311 uint64_t rval;
4312 RISCVException ret;
4313
4314 ret = rmw_sip64(env, csrno, &rval, new_val, wr_mask);
4315 if (ret_val) {
4316 *ret_val = rval;
4317 }
4318
4319 return ret;
4320 }
4321
rmw_siph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4322 static RISCVException rmw_siph(CPURISCVState *env, int csrno,
4323 target_ulong *ret_val,
4324 target_ulong new_val, target_ulong wr_mask)
4325 {
4326 uint64_t rval;
4327 RISCVException ret;
4328
4329 ret = rmw_sip64(env, csrno, &rval,
4330 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4331 if (ret_val) {
4332 *ret_val = rval >> 32;
4333 }
4334
4335 return ret;
4336 }
4337
4338 /* Supervisor Protection and Translation */
read_satp(CPURISCVState * env,int csrno,target_ulong * val)4339 static RISCVException read_satp(CPURISCVState *env, int csrno,
4340 target_ulong *val)
4341 {
4342 if (!riscv_cpu_cfg(env)->mmu) {
4343 *val = 0;
4344 return RISCV_EXCP_NONE;
4345 }
4346 *val = env->satp;
4347 return RISCV_EXCP_NONE;
4348 }
4349
write_satp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4350 static RISCVException write_satp(CPURISCVState *env, int csrno,
4351 target_ulong val, uintptr_t ra)
4352 {
4353 if (!riscv_cpu_cfg(env)->mmu) {
4354 return RISCV_EXCP_NONE;
4355 }
4356
4357 env->satp = legalize_xatp(env, env->satp, val);
4358 return RISCV_EXCP_NONE;
4359 }
4360
rmw_sctrdepth(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4361 static RISCVException rmw_sctrdepth(CPURISCVState *env, int csrno,
4362 target_ulong *ret_val,
4363 target_ulong new_val, target_ulong wr_mask)
4364 {
4365 uint64_t mask = wr_mask & SCTRDEPTH_MASK;
4366
4367 if (ret_val) {
4368 *ret_val = env->sctrdepth;
4369 }
4370
4371 env->sctrdepth = (env->sctrdepth & ~mask) | (new_val & mask);
4372
4373 /* Correct depth. */
4374 if (mask) {
4375 uint64_t depth = get_field(env->sctrdepth, SCTRDEPTH_MASK);
4376
4377 if (depth > SCTRDEPTH_MAX) {
4378 depth = SCTRDEPTH_MAX;
4379 env->sctrdepth = set_field(env->sctrdepth, SCTRDEPTH_MASK, depth);
4380 }
4381
4382 /* Update sctrstatus.WRPTR with a legal value */
4383 depth = 16ULL << depth;
4384 env->sctrstatus =
4385 env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
4386 }
4387
4388 return RISCV_EXCP_NONE;
4389 }
4390
rmw_sctrstatus(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4391 static RISCVException rmw_sctrstatus(CPURISCVState *env, int csrno,
4392 target_ulong *ret_val,
4393 target_ulong new_val, target_ulong wr_mask)
4394 {
4395 uint32_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
4396 uint32_t mask = wr_mask & SCTRSTATUS_MASK;
4397
4398 if (ret_val) {
4399 *ret_val = env->sctrstatus;
4400 }
4401
4402 env->sctrstatus = (env->sctrstatus & ~mask) | (new_val & mask);
4403
4404 /* Update sctrstatus.WRPTR with a legal value */
4405 env->sctrstatus = env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
4406
4407 return RISCV_EXCP_NONE;
4408 }
4409
rmw_xctrctl(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4410 static RISCVException rmw_xctrctl(CPURISCVState *env, int csrno,
4411 target_ulong *ret_val,
4412 target_ulong new_val, target_ulong wr_mask)
4413 {
4414 uint64_t csr_mask, mask = wr_mask;
4415 uint64_t *ctl_ptr = &env->mctrctl;
4416
4417 if (csrno == CSR_MCTRCTL) {
4418 csr_mask = MCTRCTL_MASK;
4419 } else if (csrno == CSR_SCTRCTL && !env->virt_enabled) {
4420 csr_mask = SCTRCTL_MASK;
4421 } else {
4422 /*
4423 * This is for csrno == CSR_SCTRCTL and env->virt_enabled == true
4424 * or csrno == CSR_VSCTRCTL.
4425 */
4426 csr_mask = VSCTRCTL_MASK;
4427 ctl_ptr = &env->vsctrctl;
4428 }
4429
4430 mask &= csr_mask;
4431
4432 if (ret_val) {
4433 *ret_val = *ctl_ptr & csr_mask;
4434 }
4435
4436 *ctl_ptr = (*ctl_ptr & ~mask) | (new_val & mask);
4437
4438 return RISCV_EXCP_NONE;
4439 }
4440
read_vstopi(CPURISCVState * env,int csrno,target_ulong * val)4441 static RISCVException read_vstopi(CPURISCVState *env, int csrno,
4442 target_ulong *val)
4443 {
4444 int irq, ret;
4445 target_ulong topei;
4446 uint64_t vseip, vsgein;
4447 uint32_t iid, iprio, hviid, hviprio, gein;
4448 uint32_t s, scount = 0, siid[VSTOPI_NUM_SRCS], siprio[VSTOPI_NUM_SRCS];
4449
4450 gein = get_field(env->hstatus, HSTATUS_VGEIN);
4451 hviid = get_field(env->hvictl, HVICTL_IID);
4452 hviprio = get_field(env->hvictl, HVICTL_IPRIO);
4453
4454 if (gein) {
4455 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
4456 vseip = env->mie & (env->mip | vsgein) & MIP_VSEIP;
4457 if (gein <= env->geilen && vseip) {
4458 siid[scount] = IRQ_S_EXT;
4459 siprio[scount] = IPRIO_MMAXIPRIO + 1;
4460 if (env->aia_ireg_rmw_fn[PRV_S]) {
4461 /*
4462 * Call machine specific IMSIC register emulation for
4463 * reading TOPEI.
4464 */
4465 ret = env->aia_ireg_rmw_fn[PRV_S](
4466 env->aia_ireg_rmw_fn_arg[PRV_S],
4467 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, PRV_S, true, gein,
4468 riscv_cpu_mxl_bits(env)),
4469 &topei, 0, 0);
4470 if (!ret && topei) {
4471 siprio[scount] = topei & IMSIC_TOPEI_IPRIO_MASK;
4472 }
4473 }
4474 scount++;
4475 }
4476 } else {
4477 if (hviid == IRQ_S_EXT && hviprio) {
4478 siid[scount] = IRQ_S_EXT;
4479 siprio[scount] = hviprio;
4480 scount++;
4481 }
4482 }
4483
4484 if (env->hvictl & HVICTL_VTI) {
4485 if (hviid != IRQ_S_EXT) {
4486 siid[scount] = hviid;
4487 siprio[scount] = hviprio;
4488 scount++;
4489 }
4490 } else {
4491 irq = riscv_cpu_vsirq_pending(env);
4492 if (irq != IRQ_S_EXT && 0 < irq && irq <= 63) {
4493 siid[scount] = irq;
4494 siprio[scount] = env->hviprio[irq];
4495 scount++;
4496 }
4497 }
4498
4499 iid = 0;
4500 iprio = UINT_MAX;
4501 for (s = 0; s < scount; s++) {
4502 if (siprio[s] < iprio) {
4503 iid = siid[s];
4504 iprio = siprio[s];
4505 }
4506 }
4507
4508 if (iid) {
4509 if (env->hvictl & HVICTL_IPRIOM) {
4510 if (iprio > IPRIO_MMAXIPRIO) {
4511 iprio = IPRIO_MMAXIPRIO;
4512 }
4513 if (!iprio) {
4514 if (riscv_cpu_default_priority(iid) > IPRIO_DEFAULT_S) {
4515 iprio = IPRIO_MMAXIPRIO;
4516 }
4517 }
4518 } else {
4519 iprio = 1;
4520 }
4521 } else {
4522 iprio = 0;
4523 }
4524
4525 *val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT;
4526 *val |= iprio;
4527
4528 return RISCV_EXCP_NONE;
4529 }
4530
read_stopi(CPURISCVState * env,int csrno,target_ulong * val)4531 static RISCVException read_stopi(CPURISCVState *env, int csrno,
4532 target_ulong *val)
4533 {
4534 int irq;
4535 uint8_t iprio;
4536
4537 if (env->virt_enabled) {
4538 return read_vstopi(env, CSR_VSTOPI, val);
4539 }
4540
4541 irq = riscv_cpu_sirq_pending(env);
4542 if (irq <= 0 || irq > 63) {
4543 *val = 0;
4544 } else {
4545 iprio = env->siprio[irq];
4546 if (!iprio) {
4547 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_S) {
4548 iprio = IPRIO_MMAXIPRIO;
4549 }
4550 }
4551 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
4552 *val |= iprio;
4553 }
4554
4555 return RISCV_EXCP_NONE;
4556 }
4557
4558 /* Hypervisor Extensions */
read_hstatus(CPURISCVState * env,int csrno,target_ulong * val)4559 static RISCVException read_hstatus(CPURISCVState *env, int csrno,
4560 target_ulong *val)
4561 {
4562 *val = env->hstatus;
4563 if (riscv_cpu_mxl(env) != MXL_RV32) {
4564 /* We only support 64-bit VSXL */
4565 *val = set_field(*val, HSTATUS_VSXL, 2);
4566 }
4567 /* We only support little endian */
4568 *val = set_field(*val, HSTATUS_VSBE, 0);
4569 return RISCV_EXCP_NONE;
4570 }
4571
write_hstatus(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4572 static RISCVException write_hstatus(CPURISCVState *env, int csrno,
4573 target_ulong val, uintptr_t ra)
4574 {
4575 uint64_t mask = (target_ulong)-1;
4576 if (!env_archcpu(env)->cfg.ext_svukte) {
4577 mask &= ~HSTATUS_HUKTE;
4578 }
4579 /* Update PMM field only if the value is valid according to Zjpm v1.0 */
4580 if (!env_archcpu(env)->cfg.ext_ssnpm ||
4581 riscv_cpu_mxl(env) != MXL_RV64 ||
4582 get_field(val, HSTATUS_HUPMM) == PMM_FIELD_RESERVED) {
4583 mask &= ~HSTATUS_HUPMM;
4584 }
4585 env->hstatus = (env->hstatus & ~mask) | (val & mask);
4586
4587 if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) {
4588 qemu_log_mask(LOG_UNIMP,
4589 "QEMU does not support mixed HSXLEN options.");
4590 }
4591 if (get_field(val, HSTATUS_VSBE) != 0) {
4592 qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests.");
4593 }
4594 return RISCV_EXCP_NONE;
4595 }
4596
read_hedeleg(CPURISCVState * env,int csrno,target_ulong * val)4597 static RISCVException read_hedeleg(CPURISCVState *env, int csrno,
4598 target_ulong *val)
4599 {
4600 *val = env->hedeleg;
4601 return RISCV_EXCP_NONE;
4602 }
4603
write_hedeleg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4604 static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
4605 target_ulong val, uintptr_t ra)
4606 {
4607 env->hedeleg = val & vs_delegable_excps;
4608 return RISCV_EXCP_NONE;
4609 }
4610
read_hedelegh(CPURISCVState * env,int csrno,target_ulong * val)4611 static RISCVException read_hedelegh(CPURISCVState *env, int csrno,
4612 target_ulong *val)
4613 {
4614 RISCVException ret;
4615 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13);
4616 if (ret != RISCV_EXCP_NONE) {
4617 return ret;
4618 }
4619
4620 /* Reserved, now read zero */
4621 *val = 0;
4622 return RISCV_EXCP_NONE;
4623 }
4624
write_hedelegh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4625 static RISCVException write_hedelegh(CPURISCVState *env, int csrno,
4626 target_ulong val, uintptr_t ra)
4627 {
4628 RISCVException ret;
4629 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13);
4630 if (ret != RISCV_EXCP_NONE) {
4631 return ret;
4632 }
4633
4634 /* Reserved, now write ignore */
4635 return RISCV_EXCP_NONE;
4636 }
4637
rmw_hvien64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4638 static RISCVException rmw_hvien64(CPURISCVState *env, int csrno,
4639 uint64_t *ret_val,
4640 uint64_t new_val, uint64_t wr_mask)
4641 {
4642 uint64_t mask = wr_mask & hvien_writable_mask;
4643
4644 if (ret_val) {
4645 *ret_val = env->hvien;
4646 }
4647
4648 env->hvien = (env->hvien & ~mask) | (new_val & mask);
4649
4650 return RISCV_EXCP_NONE;
4651 }
4652
rmw_hvien(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4653 static RISCVException rmw_hvien(CPURISCVState *env, int csrno,
4654 target_ulong *ret_val,
4655 target_ulong new_val, target_ulong wr_mask)
4656 {
4657 uint64_t rval;
4658 RISCVException ret;
4659
4660 ret = rmw_hvien64(env, csrno, &rval, new_val, wr_mask);
4661 if (ret_val) {
4662 *ret_val = rval;
4663 }
4664
4665 return ret;
4666 }
4667
rmw_hvienh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4668 static RISCVException rmw_hvienh(CPURISCVState *env, int csrno,
4669 target_ulong *ret_val,
4670 target_ulong new_val, target_ulong wr_mask)
4671 {
4672 uint64_t rval;
4673 RISCVException ret;
4674
4675 ret = rmw_hvien64(env, csrno, &rval,
4676 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4677 if (ret_val) {
4678 *ret_val = rval >> 32;
4679 }
4680
4681 return ret;
4682 }
4683
rmw_hideleg64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4684 static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno,
4685 uint64_t *ret_val,
4686 uint64_t new_val, uint64_t wr_mask)
4687 {
4688 uint64_t mask = wr_mask & vs_delegable_ints;
4689
4690 if (ret_val) {
4691 *ret_val = env->hideleg & vs_delegable_ints;
4692 }
4693
4694 env->hideleg = (env->hideleg & ~mask) | (new_val & mask);
4695 return RISCV_EXCP_NONE;
4696 }
4697
rmw_hideleg(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4698 static RISCVException rmw_hideleg(CPURISCVState *env, int csrno,
4699 target_ulong *ret_val,
4700 target_ulong new_val, target_ulong wr_mask)
4701 {
4702 uint64_t rval;
4703 RISCVException ret;
4704
4705 ret = rmw_hideleg64(env, csrno, &rval, new_val, wr_mask);
4706 if (ret_val) {
4707 *ret_val = rval;
4708 }
4709
4710 return ret;
4711 }
4712
rmw_hidelegh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4713 static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno,
4714 target_ulong *ret_val,
4715 target_ulong new_val, target_ulong wr_mask)
4716 {
4717 uint64_t rval;
4718 RISCVException ret;
4719
4720 ret = rmw_hideleg64(env, csrno, &rval,
4721 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4722 if (ret_val) {
4723 *ret_val = rval >> 32;
4724 }
4725
4726 return ret;
4727 }
4728
4729 /*
4730 * The function is written for two use-cases:
4731 * 1- To access hvip csr as is for HS-mode access.
4732 * 2- To access vsip as a combination of hvip, and mip for vs-mode.
4733 *
4734 * Both report bits 2, 6, 10 and 13:63.
4735 * vsip needs to be read-only zero when both hideleg[i] and
4736 * hvien[i] are zero.
4737 */
rmw_hvip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4738 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
4739 uint64_t *ret_val,
4740 uint64_t new_val, uint64_t wr_mask)
4741 {
4742 RISCVException ret;
4743 uint64_t old_hvip;
4744 uint64_t ret_mip;
4745
4746 /*
4747 * For bits 10, 6 and 2, vsip[i] is an alias of hip[i]. These bits are
4748 * present in hip, hvip and mip. Where mip[i] is alias of hip[i] and hvip[i]
4749 * is OR'ed in hip[i] to inject virtual interrupts from hypervisor. These
4750 * bits are actually being maintained in mip so we read them from there.
4751 * This way we have a single source of truth and allows for easier
4752 * implementation.
4753 *
4754 * For bits 13:63 we have:
4755 *
4756 * hideleg[i] hvien[i]
4757 * 0 0 No delegation. vsip[i] readonly zero.
4758 * 0 1 vsip[i] is alias of hvip[i], sip bypassed.
4759 * 1 X vsip[i] is alias of sip[i], hvip bypassed.
4760 *
4761 * alias_mask denotes the bits that come from sip (mip here given we
4762 * maintain all bits there). nalias_mask denotes bits that come from
4763 * hvip.
4764 */
4765 uint64_t alias_mask = (env->hideleg | ~env->hvien) | VS_MODE_INTERRUPTS;
4766 uint64_t nalias_mask = (~env->hideleg & env->hvien);
4767 uint64_t wr_mask_hvip;
4768 uint64_t wr_mask_mip;
4769
4770 /*
4771 * Both alias and non-alias mask remain same for vsip except:
4772 * 1- For VS* bits if they are zero in hideleg.
4773 * 2- For 13:63 bits if they are zero in both hideleg and hvien.
4774 */
4775 if (csrno == CSR_VSIP) {
4776 /* zero-out VS* bits that are not delegated to VS mode. */
4777 alias_mask &= (env->hideleg | ~VS_MODE_INTERRUPTS);
4778
4779 /*
4780 * zero-out 13:63 bits that are zero in both hideleg and hvien.
4781 * nalias_mask mask can not contain any VS* bits so only second
4782 * condition applies on it.
4783 */
4784 nalias_mask &= (env->hideleg | env->hvien);
4785 alias_mask &= (env->hideleg | env->hvien);
4786 }
4787
4788 wr_mask_hvip = wr_mask & nalias_mask & hvip_writable_mask;
4789 wr_mask_mip = wr_mask & alias_mask & hvip_writable_mask;
4790
4791 /* Aliased bits, bits 10, 6, 2 need to come from mip. */
4792 ret = rmw_mip64(env, csrno, &ret_mip, new_val, wr_mask_mip);
4793 if (ret != RISCV_EXCP_NONE) {
4794 return ret;
4795 }
4796
4797 old_hvip = env->hvip;
4798
4799 if (wr_mask_hvip) {
4800 env->hvip = (env->hvip & ~wr_mask_hvip) | (new_val & wr_mask_hvip);
4801
4802 /*
4803 * Given hvip is separate source from mip, we need to trigger interrupt
4804 * from here separately. Normally this happen from riscv_cpu_update_mip.
4805 */
4806 riscv_cpu_interrupt(env);
4807 }
4808
4809 if (ret_val) {
4810 /* Only take VS* bits from mip. */
4811 ret_mip &= alias_mask;
4812
4813 /* Take in non-delegated 13:63 bits from hvip. */
4814 old_hvip &= nalias_mask;
4815
4816 *ret_val = ret_mip | old_hvip;
4817 }
4818
4819 return ret;
4820 }
4821
rmw_hvip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4822 static RISCVException rmw_hvip(CPURISCVState *env, int csrno,
4823 target_ulong *ret_val,
4824 target_ulong new_val, target_ulong wr_mask)
4825 {
4826 uint64_t rval;
4827 RISCVException ret;
4828
4829 ret = rmw_hvip64(env, csrno, &rval, new_val, wr_mask);
4830 if (ret_val) {
4831 *ret_val = rval;
4832 }
4833
4834 return ret;
4835 }
4836
rmw_hviph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4837 static RISCVException rmw_hviph(CPURISCVState *env, int csrno,
4838 target_ulong *ret_val,
4839 target_ulong new_val, target_ulong wr_mask)
4840 {
4841 uint64_t rval;
4842 RISCVException ret;
4843
4844 ret = rmw_hvip64(env, csrno, &rval,
4845 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4846 if (ret_val) {
4847 *ret_val = rval >> 32;
4848 }
4849
4850 return ret;
4851 }
4852
rmw_hip(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)4853 static RISCVException rmw_hip(CPURISCVState *env, int csrno,
4854 target_ulong *ret_value,
4855 target_ulong new_value, target_ulong write_mask)
4856 {
4857 int ret = rmw_mip(env, csrno, ret_value, new_value,
4858 write_mask & hip_writable_mask);
4859
4860 if (ret_value) {
4861 *ret_value &= HS_MODE_INTERRUPTS;
4862 }
4863 return ret;
4864 }
4865
rmw_hie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4866 static RISCVException rmw_hie(CPURISCVState *env, int csrno,
4867 target_ulong *ret_val,
4868 target_ulong new_val, target_ulong wr_mask)
4869 {
4870 uint64_t rval;
4871 RISCVException ret;
4872
4873 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & HS_MODE_INTERRUPTS);
4874 if (ret_val) {
4875 *ret_val = rval & HS_MODE_INTERRUPTS;
4876 }
4877
4878 return ret;
4879 }
4880
read_hcounteren(CPURISCVState * env,int csrno,target_ulong * val)4881 static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
4882 target_ulong *val)
4883 {
4884 *val = env->hcounteren;
4885 return RISCV_EXCP_NONE;
4886 }
4887
write_hcounteren(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4888 static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
4889 target_ulong val, uintptr_t ra)
4890 {
4891 RISCVCPU *cpu = env_archcpu(env);
4892
4893 /* WARL register - disable unavailable counters */
4894 env->hcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
4895 COUNTEREN_IR);
4896 return RISCV_EXCP_NONE;
4897 }
4898
read_hgeie(CPURISCVState * env,int csrno,target_ulong * val)4899 static RISCVException read_hgeie(CPURISCVState *env, int csrno,
4900 target_ulong *val)
4901 {
4902 if (val) {
4903 *val = env->hgeie;
4904 }
4905 return RISCV_EXCP_NONE;
4906 }
4907
write_hgeie(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4908 static RISCVException write_hgeie(CPURISCVState *env, int csrno,
4909 target_ulong val, uintptr_t ra)
4910 {
4911 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */
4912 val &= ((((target_ulong)1) << env->geilen) - 1) << 1;
4913 env->hgeie = val;
4914 /* Update mip.SGEIP bit */
4915 riscv_cpu_update_mip(env, MIP_SGEIP,
4916 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
4917 return RISCV_EXCP_NONE;
4918 }
4919
read_htval(CPURISCVState * env,int csrno,target_ulong * val)4920 static RISCVException read_htval(CPURISCVState *env, int csrno,
4921 target_ulong *val)
4922 {
4923 *val = env->htval;
4924 return RISCV_EXCP_NONE;
4925 }
4926
write_htval(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4927 static RISCVException write_htval(CPURISCVState *env, int csrno,
4928 target_ulong val, uintptr_t ra)
4929 {
4930 env->htval = val;
4931 return RISCV_EXCP_NONE;
4932 }
4933
read_htinst(CPURISCVState * env,int csrno,target_ulong * val)4934 static RISCVException read_htinst(CPURISCVState *env, int csrno,
4935 target_ulong *val)
4936 {
4937 *val = env->htinst;
4938 return RISCV_EXCP_NONE;
4939 }
4940
write_htinst(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4941 static RISCVException write_htinst(CPURISCVState *env, int csrno,
4942 target_ulong val, uintptr_t ra)
4943 {
4944 return RISCV_EXCP_NONE;
4945 }
4946
read_hgeip(CPURISCVState * env,int csrno,target_ulong * val)4947 static RISCVException read_hgeip(CPURISCVState *env, int csrno,
4948 target_ulong *val)
4949 {
4950 if (val) {
4951 *val = env->hgeip;
4952 }
4953 return RISCV_EXCP_NONE;
4954 }
4955
read_hgatp(CPURISCVState * env,int csrno,target_ulong * val)4956 static RISCVException read_hgatp(CPURISCVState *env, int csrno,
4957 target_ulong *val)
4958 {
4959 *val = env->hgatp;
4960 return RISCV_EXCP_NONE;
4961 }
4962
write_hgatp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4963 static RISCVException write_hgatp(CPURISCVState *env, int csrno,
4964 target_ulong val, uintptr_t ra)
4965 {
4966 env->hgatp = legalize_xatp(env, env->hgatp, val);
4967 return RISCV_EXCP_NONE;
4968 }
4969
read_htimedelta(CPURISCVState * env,int csrno,target_ulong * val)4970 static RISCVException read_htimedelta(CPURISCVState *env, int csrno,
4971 target_ulong *val)
4972 {
4973 if (!env->rdtime_fn) {
4974 return RISCV_EXCP_ILLEGAL_INST;
4975 }
4976
4977 *val = env->htimedelta;
4978 return RISCV_EXCP_NONE;
4979 }
4980
write_htimedelta(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4981 static RISCVException write_htimedelta(CPURISCVState *env, int csrno,
4982 target_ulong val, uintptr_t ra)
4983 {
4984 if (!env->rdtime_fn) {
4985 return RISCV_EXCP_ILLEGAL_INST;
4986 }
4987
4988 if (riscv_cpu_mxl(env) == MXL_RV32) {
4989 env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val);
4990 } else {
4991 env->htimedelta = val;
4992 }
4993
4994 if (riscv_cpu_cfg(env)->ext_sstc && env->rdtime_fn) {
4995 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
4996 env->htimedelta, MIP_VSTIP);
4997 }
4998
4999 return RISCV_EXCP_NONE;
5000 }
5001
read_htimedeltah(CPURISCVState * env,int csrno,target_ulong * val)5002 static RISCVException read_htimedeltah(CPURISCVState *env, int csrno,
5003 target_ulong *val)
5004 {
5005 if (!env->rdtime_fn) {
5006 return RISCV_EXCP_ILLEGAL_INST;
5007 }
5008
5009 *val = env->htimedelta >> 32;
5010 return RISCV_EXCP_NONE;
5011 }
5012
write_htimedeltah(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5013 static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
5014 target_ulong val, uintptr_t ra)
5015 {
5016 if (!env->rdtime_fn) {
5017 return RISCV_EXCP_ILLEGAL_INST;
5018 }
5019
5020 env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val);
5021
5022 if (riscv_cpu_cfg(env)->ext_sstc && env->rdtime_fn) {
5023 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
5024 env->htimedelta, MIP_VSTIP);
5025 }
5026
5027 return RISCV_EXCP_NONE;
5028 }
5029
read_hvictl(CPURISCVState * env,int csrno,target_ulong * val)5030 static RISCVException read_hvictl(CPURISCVState *env, int csrno,
5031 target_ulong *val)
5032 {
5033 *val = env->hvictl;
5034 return RISCV_EXCP_NONE;
5035 }
5036
write_hvictl(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5037 static RISCVException write_hvictl(CPURISCVState *env, int csrno,
5038 target_ulong val, uintptr_t ra)
5039 {
5040 env->hvictl = val & HVICTL_VALID_MASK;
5041 return RISCV_EXCP_NONE;
5042 }
5043
read_hvipriox(CPURISCVState * env,int first_index,uint8_t * iprio,target_ulong * val)5044 static RISCVException read_hvipriox(CPURISCVState *env, int first_index,
5045 uint8_t *iprio, target_ulong *val)
5046 {
5047 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
5048
5049 /* First index has to be a multiple of number of irqs per register */
5050 if (first_index % num_irqs) {
5051 return (env->virt_enabled) ?
5052 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
5053 }
5054
5055 /* Fill-up return value */
5056 *val = 0;
5057 for (i = 0; i < num_irqs; i++) {
5058 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
5059 continue;
5060 }
5061 if (rdzero) {
5062 continue;
5063 }
5064 *val |= ((target_ulong)iprio[irq]) << (i * 8);
5065 }
5066
5067 return RISCV_EXCP_NONE;
5068 }
5069
write_hvipriox(CPURISCVState * env,int first_index,uint8_t * iprio,target_ulong val)5070 static RISCVException write_hvipriox(CPURISCVState *env, int first_index,
5071 uint8_t *iprio, target_ulong val)
5072 {
5073 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
5074
5075 /* First index has to be a multiple of number of irqs per register */
5076 if (first_index % num_irqs) {
5077 return (env->virt_enabled) ?
5078 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
5079 }
5080
5081 /* Fill-up priority array */
5082 for (i = 0; i < num_irqs; i++) {
5083 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
5084 continue;
5085 }
5086 if (rdzero) {
5087 iprio[irq] = 0;
5088 } else {
5089 iprio[irq] = (val >> (i * 8)) & 0xff;
5090 }
5091 }
5092
5093 return RISCV_EXCP_NONE;
5094 }
5095
read_hviprio1(CPURISCVState * env,int csrno,target_ulong * val)5096 static RISCVException read_hviprio1(CPURISCVState *env, int csrno,
5097 target_ulong *val)
5098 {
5099 return read_hvipriox(env, 0, env->hviprio, val);
5100 }
5101
write_hviprio1(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5102 static RISCVException write_hviprio1(CPURISCVState *env, int csrno,
5103 target_ulong val, uintptr_t ra)
5104 {
5105 return write_hvipriox(env, 0, env->hviprio, val);
5106 }
5107
read_hviprio1h(CPURISCVState * env,int csrno,target_ulong * val)5108 static RISCVException read_hviprio1h(CPURISCVState *env, int csrno,
5109 target_ulong *val)
5110 {
5111 return read_hvipriox(env, 4, env->hviprio, val);
5112 }
5113
write_hviprio1h(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5114 static RISCVException write_hviprio1h(CPURISCVState *env, int csrno,
5115 target_ulong val, uintptr_t ra)
5116 {
5117 return write_hvipriox(env, 4, env->hviprio, val);
5118 }
5119
read_hviprio2(CPURISCVState * env,int csrno,target_ulong * val)5120 static RISCVException read_hviprio2(CPURISCVState *env, int csrno,
5121 target_ulong *val)
5122 {
5123 return read_hvipriox(env, 8, env->hviprio, val);
5124 }
5125
write_hviprio2(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5126 static RISCVException write_hviprio2(CPURISCVState *env, int csrno,
5127 target_ulong val, uintptr_t ra)
5128 {
5129 return write_hvipriox(env, 8, env->hviprio, val);
5130 }
5131
read_hviprio2h(CPURISCVState * env,int csrno,target_ulong * val)5132 static RISCVException read_hviprio2h(CPURISCVState *env, int csrno,
5133 target_ulong *val)
5134 {
5135 return read_hvipriox(env, 12, env->hviprio, val);
5136 }
5137
write_hviprio2h(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5138 static RISCVException write_hviprio2h(CPURISCVState *env, int csrno,
5139 target_ulong val, uintptr_t ra)
5140 {
5141 return write_hvipriox(env, 12, env->hviprio, val);
5142 }
5143
5144 /* Virtual CSR Registers */
read_vsstatus(CPURISCVState * env,int csrno,target_ulong * val)5145 static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
5146 target_ulong *val)
5147 {
5148 *val = env->vsstatus;
5149 return RISCV_EXCP_NONE;
5150 }
5151
write_vsstatus(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5152 static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
5153 target_ulong val, uintptr_t ra)
5154 {
5155 uint64_t mask = (target_ulong)-1;
5156 if ((val & VSSTATUS64_UXL) == 0) {
5157 mask &= ~VSSTATUS64_UXL;
5158 }
5159 if ((env->henvcfg & HENVCFG_DTE)) {
5160 if ((val & SSTATUS_SDT) != 0) {
5161 val &= ~SSTATUS_SIE;
5162 }
5163 } else {
5164 val &= ~SSTATUS_SDT;
5165 }
5166 env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
5167 return RISCV_EXCP_NONE;
5168 }
5169
read_vstvec(CPURISCVState * env,int csrno,target_ulong * val)5170 static RISCVException read_vstvec(CPURISCVState *env, int csrno,
5171 target_ulong *val)
5172 {
5173 *val = env->vstvec;
5174 return RISCV_EXCP_NONE;
5175 }
5176
write_vstvec(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5177 static RISCVException write_vstvec(CPURISCVState *env, int csrno,
5178 target_ulong val, uintptr_t ra)
5179 {
5180 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
5181 if ((val & 3) < 2) {
5182 env->vstvec = val;
5183 } else {
5184 qemu_log_mask(LOG_UNIMP, "CSR_VSTVEC: reserved mode not supported\n");
5185 }
5186 return RISCV_EXCP_NONE;
5187 }
5188
read_vsscratch(CPURISCVState * env,int csrno,target_ulong * val)5189 static RISCVException read_vsscratch(CPURISCVState *env, int csrno,
5190 target_ulong *val)
5191 {
5192 *val = env->vsscratch;
5193 return RISCV_EXCP_NONE;
5194 }
5195
write_vsscratch(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5196 static RISCVException write_vsscratch(CPURISCVState *env, int csrno,
5197 target_ulong val, uintptr_t ra)
5198 {
5199 env->vsscratch = val;
5200 return RISCV_EXCP_NONE;
5201 }
5202
read_vsepc(CPURISCVState * env,int csrno,target_ulong * val)5203 static RISCVException read_vsepc(CPURISCVState *env, int csrno,
5204 target_ulong *val)
5205 {
5206 *val = env->vsepc;
5207 return RISCV_EXCP_NONE;
5208 }
5209
write_vsepc(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5210 static RISCVException write_vsepc(CPURISCVState *env, int csrno,
5211 target_ulong val, uintptr_t ra)
5212 {
5213 env->vsepc = val;
5214 return RISCV_EXCP_NONE;
5215 }
5216
read_vscause(CPURISCVState * env,int csrno,target_ulong * val)5217 static RISCVException read_vscause(CPURISCVState *env, int csrno,
5218 target_ulong *val)
5219 {
5220 *val = env->vscause;
5221 return RISCV_EXCP_NONE;
5222 }
5223
write_vscause(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5224 static RISCVException write_vscause(CPURISCVState *env, int csrno,
5225 target_ulong val, uintptr_t ra)
5226 {
5227 env->vscause = val;
5228 return RISCV_EXCP_NONE;
5229 }
5230
read_vstval(CPURISCVState * env,int csrno,target_ulong * val)5231 static RISCVException read_vstval(CPURISCVState *env, int csrno,
5232 target_ulong *val)
5233 {
5234 *val = env->vstval;
5235 return RISCV_EXCP_NONE;
5236 }
5237
write_vstval(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5238 static RISCVException write_vstval(CPURISCVState *env, int csrno,
5239 target_ulong val, uintptr_t ra)
5240 {
5241 env->vstval = val;
5242 return RISCV_EXCP_NONE;
5243 }
5244
read_vsatp(CPURISCVState * env,int csrno,target_ulong * val)5245 static RISCVException read_vsatp(CPURISCVState *env, int csrno,
5246 target_ulong *val)
5247 {
5248 *val = env->vsatp;
5249 return RISCV_EXCP_NONE;
5250 }
5251
write_vsatp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5252 static RISCVException write_vsatp(CPURISCVState *env, int csrno,
5253 target_ulong val, uintptr_t ra)
5254 {
5255 env->vsatp = legalize_xatp(env, env->vsatp, val);
5256 return RISCV_EXCP_NONE;
5257 }
5258
read_mtval2(CPURISCVState * env,int csrno,target_ulong * val)5259 static RISCVException read_mtval2(CPURISCVState *env, int csrno,
5260 target_ulong *val)
5261 {
5262 *val = env->mtval2;
5263 return RISCV_EXCP_NONE;
5264 }
5265
write_mtval2(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5266 static RISCVException write_mtval2(CPURISCVState *env, int csrno,
5267 target_ulong val, uintptr_t ra)
5268 {
5269 env->mtval2 = val;
5270 return RISCV_EXCP_NONE;
5271 }
5272
read_mtinst(CPURISCVState * env,int csrno,target_ulong * val)5273 static RISCVException read_mtinst(CPURISCVState *env, int csrno,
5274 target_ulong *val)
5275 {
5276 *val = env->mtinst;
5277 return RISCV_EXCP_NONE;
5278 }
5279
write_mtinst(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5280 static RISCVException write_mtinst(CPURISCVState *env, int csrno,
5281 target_ulong val, uintptr_t ra)
5282 {
5283 env->mtinst = val;
5284 return RISCV_EXCP_NONE;
5285 }
5286
5287 /* Physical Memory Protection */
read_mseccfg(CPURISCVState * env,int csrno,target_ulong * val)5288 static RISCVException read_mseccfg(CPURISCVState *env, int csrno,
5289 target_ulong *val)
5290 {
5291 *val = mseccfg_csr_read(env);
5292 return RISCV_EXCP_NONE;
5293 }
5294
write_mseccfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5295 static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
5296 target_ulong val, uintptr_t ra)
5297 {
5298 mseccfg_csr_write(env, val);
5299 return RISCV_EXCP_NONE;
5300 }
5301
read_pmpcfg(CPURISCVState * env,int csrno,target_ulong * val)5302 static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
5303 target_ulong *val)
5304 {
5305 uint32_t reg_index = csrno - CSR_PMPCFG0;
5306
5307 *val = pmpcfg_csr_read(env, reg_index);
5308 return RISCV_EXCP_NONE;
5309 }
5310
write_pmpcfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5311 static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
5312 target_ulong val, uintptr_t ra)
5313 {
5314 uint32_t reg_index = csrno - CSR_PMPCFG0;
5315
5316 pmpcfg_csr_write(env, reg_index, val);
5317 return RISCV_EXCP_NONE;
5318 }
5319
read_pmpaddr(CPURISCVState * env,int csrno,target_ulong * val)5320 static RISCVException read_pmpaddr(CPURISCVState *env, int csrno,
5321 target_ulong *val)
5322 {
5323 *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0);
5324 return RISCV_EXCP_NONE;
5325 }
5326
write_pmpaddr(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5327 static RISCVException write_pmpaddr(CPURISCVState *env, int csrno,
5328 target_ulong val, uintptr_t ra)
5329 {
5330 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val);
5331 return RISCV_EXCP_NONE;
5332 }
5333
read_tselect(CPURISCVState * env,int csrno,target_ulong * val)5334 static RISCVException read_tselect(CPURISCVState *env, int csrno,
5335 target_ulong *val)
5336 {
5337 *val = tselect_csr_read(env);
5338 return RISCV_EXCP_NONE;
5339 }
5340
write_tselect(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5341 static RISCVException write_tselect(CPURISCVState *env, int csrno,
5342 target_ulong val, uintptr_t ra)
5343 {
5344 tselect_csr_write(env, val);
5345 return RISCV_EXCP_NONE;
5346 }
5347
read_tdata(CPURISCVState * env,int csrno,target_ulong * val)5348 static RISCVException read_tdata(CPURISCVState *env, int csrno,
5349 target_ulong *val)
5350 {
5351 /* return 0 in tdata1 to end the trigger enumeration */
5352 if (env->trigger_cur >= RV_MAX_TRIGGERS && csrno == CSR_TDATA1) {
5353 *val = 0;
5354 return RISCV_EXCP_NONE;
5355 }
5356
5357 if (!tdata_available(env, csrno - CSR_TDATA1)) {
5358 return RISCV_EXCP_ILLEGAL_INST;
5359 }
5360
5361 *val = tdata_csr_read(env, csrno - CSR_TDATA1);
5362 return RISCV_EXCP_NONE;
5363 }
5364
write_tdata(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5365 static RISCVException write_tdata(CPURISCVState *env, int csrno,
5366 target_ulong val, uintptr_t ra)
5367 {
5368 if (!tdata_available(env, csrno - CSR_TDATA1)) {
5369 return RISCV_EXCP_ILLEGAL_INST;
5370 }
5371
5372 tdata_csr_write(env, csrno - CSR_TDATA1, val);
5373 return RISCV_EXCP_NONE;
5374 }
5375
read_tinfo(CPURISCVState * env,int csrno,target_ulong * val)5376 static RISCVException read_tinfo(CPURISCVState *env, int csrno,
5377 target_ulong *val)
5378 {
5379 *val = tinfo_csr_read(env);
5380 return RISCV_EXCP_NONE;
5381 }
5382
read_mcontext(CPURISCVState * env,int csrno,target_ulong * val)5383 static RISCVException read_mcontext(CPURISCVState *env, int csrno,
5384 target_ulong *val)
5385 {
5386 *val = env->mcontext;
5387 return RISCV_EXCP_NONE;
5388 }
5389
write_mcontext(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5390 static RISCVException write_mcontext(CPURISCVState *env, int csrno,
5391 target_ulong val, uintptr_t ra)
5392 {
5393 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
5394 int32_t mask;
5395
5396 if (riscv_has_ext(env, RVH)) {
5397 /* Spec suggest 7-bit for RV32 and 14-bit for RV64 w/ H extension */
5398 mask = rv32 ? MCONTEXT32_HCONTEXT : MCONTEXT64_HCONTEXT;
5399 } else {
5400 /* Spec suggest 6-bit for RV32 and 13-bit for RV64 w/o H extension */
5401 mask = rv32 ? MCONTEXT32 : MCONTEXT64;
5402 }
5403
5404 env->mcontext = val & mask;
5405 return RISCV_EXCP_NONE;
5406 }
5407
read_mnscratch(CPURISCVState * env,int csrno,target_ulong * val)5408 static RISCVException read_mnscratch(CPURISCVState *env, int csrno,
5409 target_ulong *val)
5410 {
5411 *val = env->mnscratch;
5412 return RISCV_EXCP_NONE;
5413 }
5414
write_mnscratch(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5415 static RISCVException write_mnscratch(CPURISCVState *env, int csrno,
5416 target_ulong val, uintptr_t ra)
5417 {
5418 env->mnscratch = val;
5419 return RISCV_EXCP_NONE;
5420 }
5421
read_mnepc(CPURISCVState * env,int csrno,target_ulong * val)5422 static RISCVException read_mnepc(CPURISCVState *env, int csrno,
5423 target_ulong *val)
5424 {
5425 *val = env->mnepc;
5426 return RISCV_EXCP_NONE;
5427 }
5428
write_mnepc(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5429 static RISCVException write_mnepc(CPURISCVState *env, int csrno,
5430 target_ulong val, uintptr_t ra)
5431 {
5432 env->mnepc = val;
5433 return RISCV_EXCP_NONE;
5434 }
5435
read_mncause(CPURISCVState * env,int csrno,target_ulong * val)5436 static RISCVException read_mncause(CPURISCVState *env, int csrno,
5437 target_ulong *val)
5438 {
5439 *val = env->mncause;
5440 return RISCV_EXCP_NONE;
5441 }
5442
write_mncause(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5443 static RISCVException write_mncause(CPURISCVState *env, int csrno,
5444 target_ulong val, uintptr_t ra)
5445 {
5446 env->mncause = val;
5447 return RISCV_EXCP_NONE;
5448 }
5449
read_mnstatus(CPURISCVState * env,int csrno,target_ulong * val)5450 static RISCVException read_mnstatus(CPURISCVState *env, int csrno,
5451 target_ulong *val)
5452 {
5453 *val = env->mnstatus;
5454 return RISCV_EXCP_NONE;
5455 }
5456
write_mnstatus(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5457 static RISCVException write_mnstatus(CPURISCVState *env, int csrno,
5458 target_ulong val, uintptr_t ra)
5459 {
5460 target_ulong mask = (MNSTATUS_NMIE | MNSTATUS_MNPP);
5461
5462 if (riscv_has_ext(env, RVH)) {
5463 /* Flush tlb on mnstatus fields that affect VM. */
5464 if ((val ^ env->mnstatus) & MNSTATUS_MNPV) {
5465 tlb_flush(env_cpu(env));
5466 }
5467
5468 mask |= MNSTATUS_MNPV;
5469 }
5470
5471 /* mnstatus.mnie can only be cleared by hardware. */
5472 env->mnstatus = (env->mnstatus & MNSTATUS_NMIE) | (val & mask);
5473 return RISCV_EXCP_NONE;
5474 }
5475
5476 #endif
5477
5478 /* Crypto Extension */
riscv_new_csr_seed(target_ulong new_value,target_ulong write_mask)5479 target_ulong riscv_new_csr_seed(target_ulong new_value,
5480 target_ulong write_mask)
5481 {
5482 uint16_t random_v;
5483 Error *random_e = NULL;
5484 int random_r;
5485 target_ulong rval;
5486
5487 random_r = qemu_guest_getrandom(&random_v, 2, &random_e);
5488 if (unlikely(random_r < 0)) {
5489 /*
5490 * Failed, for unknown reasons in the crypto subsystem.
5491 * The best we can do is log the reason and return a
5492 * failure indication to the guest. There is no reason
5493 * we know to expect the failure to be transitory, so
5494 * indicate DEAD to avoid having the guest spin on WAIT.
5495 */
5496 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
5497 __func__, error_get_pretty(random_e));
5498 error_free(random_e);
5499 rval = SEED_OPST_DEAD;
5500 } else {
5501 rval = random_v | SEED_OPST_ES16;
5502 }
5503
5504 return rval;
5505 }
5506
rmw_seed(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)5507 static RISCVException rmw_seed(CPURISCVState *env, int csrno,
5508 target_ulong *ret_value,
5509 target_ulong new_value,
5510 target_ulong write_mask)
5511 {
5512 target_ulong rval;
5513
5514 rval = riscv_new_csr_seed(new_value, write_mask);
5515
5516 if (ret_value) {
5517 *ret_value = rval;
5518 }
5519
5520 return RISCV_EXCP_NONE;
5521 }
5522
5523 /*
5524 * riscv_csrrw - read and/or update control and status register
5525 *
5526 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0);
5527 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1);
5528 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value);
5529 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
5530 */
5531
riscv_csrrw_check(CPURISCVState * env,int csrno,bool write)5532 static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
5533 int csrno,
5534 bool write)
5535 {
5536 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
5537 bool read_only = get_field(csrno, 0xC00) == 3;
5538 int csr_min_priv = csr_ops[csrno].min_priv_ver;
5539
5540 /* ensure the CSR extension is enabled */
5541 if (!riscv_cpu_cfg(env)->ext_zicsr) {
5542 return RISCV_EXCP_ILLEGAL_INST;
5543 }
5544
5545 /* ensure CSR is implemented by checking predicate */
5546 if (!csr_ops[csrno].predicate) {
5547 return RISCV_EXCP_ILLEGAL_INST;
5548 }
5549
5550 /* privileged spec version check */
5551 if (env->priv_ver < csr_min_priv) {
5552 return RISCV_EXCP_ILLEGAL_INST;
5553 }
5554
5555 /* read / write check */
5556 if (write && read_only) {
5557 return RISCV_EXCP_ILLEGAL_INST;
5558 }
5559
5560 /*
5561 * The predicate() not only does existence check but also does some
5562 * access control check which triggers for example virtual instruction
5563 * exception in some cases. When writing read-only CSRs in those cases
5564 * illegal instruction exception should be triggered instead of virtual
5565 * instruction exception. Hence this comes after the read / write check.
5566 */
5567 RISCVException ret = csr_ops[csrno].predicate(env, csrno);
5568 if (ret != RISCV_EXCP_NONE) {
5569 return ret;
5570 }
5571
5572 #if !defined(CONFIG_USER_ONLY)
5573 int csr_priv, effective_priv = env->priv;
5574
5575 if (riscv_has_ext(env, RVH) && env->priv == PRV_S &&
5576 !env->virt_enabled) {
5577 /*
5578 * We are in HS mode. Add 1 to the effective privilege level to
5579 * allow us to access the Hypervisor CSRs.
5580 */
5581 effective_priv++;
5582 }
5583
5584 csr_priv = get_field(csrno, 0x300);
5585 if (!env->debugger && (effective_priv < csr_priv)) {
5586 if (csr_priv <= (PRV_S + 1) && env->virt_enabled) {
5587 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
5588 }
5589 return RISCV_EXCP_ILLEGAL_INST;
5590 }
5591 #endif
5592 return RISCV_EXCP_NONE;
5593 }
5594
riscv_csrrw_do64(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask,uintptr_t ra)5595 static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
5596 target_ulong *ret_value,
5597 target_ulong new_value,
5598 target_ulong write_mask,
5599 uintptr_t ra)
5600 {
5601 RISCVException ret;
5602 target_ulong old_value = 0;
5603
5604 /* execute combined read/write operation if it exists */
5605 if (csr_ops[csrno].op) {
5606 return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask);
5607 }
5608
5609 /*
5610 * ret_value == NULL means that rd=x0 and we're coming from helper_csrw()
5611 * and we can't throw side effects caused by CSR reads.
5612 */
5613 if (ret_value) {
5614 /* if no accessor exists then return failure */
5615 if (!csr_ops[csrno].read) {
5616 return RISCV_EXCP_ILLEGAL_INST;
5617 }
5618 /* read old value */
5619 ret = csr_ops[csrno].read(env, csrno, &old_value);
5620 if (ret != RISCV_EXCP_NONE) {
5621 return ret;
5622 }
5623 }
5624
5625 /* write value if writable and write mask set, otherwise drop writes */
5626 if (write_mask) {
5627 new_value = (old_value & ~write_mask) | (new_value & write_mask);
5628 if (csr_ops[csrno].write) {
5629 ret = csr_ops[csrno].write(env, csrno, new_value, ra);
5630 if (ret != RISCV_EXCP_NONE) {
5631 return ret;
5632 }
5633 }
5634 }
5635
5636 /* return old value */
5637 if (ret_value) {
5638 *ret_value = old_value;
5639 }
5640
5641 return RISCV_EXCP_NONE;
5642 }
5643
riscv_csrr(CPURISCVState * env,int csrno,target_ulong * ret_value)5644 RISCVException riscv_csrr(CPURISCVState *env, int csrno,
5645 target_ulong *ret_value)
5646 {
5647 RISCVException ret = riscv_csrrw_check(env, csrno, false);
5648 if (ret != RISCV_EXCP_NONE) {
5649 return ret;
5650 }
5651
5652 return riscv_csrrw_do64(env, csrno, ret_value, 0, 0, 0);
5653 }
5654
riscv_csrrw(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask,uintptr_t ra)5655 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
5656 target_ulong *ret_value, target_ulong new_value,
5657 target_ulong write_mask, uintptr_t ra)
5658 {
5659 RISCVException ret = riscv_csrrw_check(env, csrno, true);
5660 if (ret != RISCV_EXCP_NONE) {
5661 return ret;
5662 }
5663
5664 return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask, ra);
5665 }
5666
riscv_csrrw_do128(CPURISCVState * env,int csrno,Int128 * ret_value,Int128 new_value,Int128 write_mask,uintptr_t ra)5667 static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
5668 Int128 *ret_value,
5669 Int128 new_value,
5670 Int128 write_mask, uintptr_t ra)
5671 {
5672 RISCVException ret;
5673 Int128 old_value;
5674
5675 /* read old value */
5676 ret = csr_ops[csrno].read128(env, csrno, &old_value);
5677 if (ret != RISCV_EXCP_NONE) {
5678 return ret;
5679 }
5680
5681 /* write value if writable and write mask set, otherwise drop writes */
5682 if (int128_nz(write_mask)) {
5683 new_value = int128_or(int128_and(old_value, int128_not(write_mask)),
5684 int128_and(new_value, write_mask));
5685 if (csr_ops[csrno].write128) {
5686 ret = csr_ops[csrno].write128(env, csrno, new_value);
5687 if (ret != RISCV_EXCP_NONE) {
5688 return ret;
5689 }
5690 } else if (csr_ops[csrno].write) {
5691 /* avoids having to write wrappers for all registers */
5692 ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value), ra);
5693 if (ret != RISCV_EXCP_NONE) {
5694 return ret;
5695 }
5696 }
5697 }
5698
5699 /* return old value */
5700 if (ret_value) {
5701 *ret_value = old_value;
5702 }
5703
5704 return RISCV_EXCP_NONE;
5705 }
5706
riscv_csrr_i128(CPURISCVState * env,int csrno,Int128 * ret_value)5707 RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
5708 Int128 *ret_value)
5709 {
5710 RISCVException ret;
5711
5712 ret = riscv_csrrw_check(env, csrno, false);
5713 if (ret != RISCV_EXCP_NONE) {
5714 return ret;
5715 }
5716
5717 if (csr_ops[csrno].read128) {
5718 return riscv_csrrw_do128(env, csrno, ret_value,
5719 int128_zero(), int128_zero(), 0);
5720 }
5721
5722 /*
5723 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
5724 * at all defined.
5725 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
5726 * significant), for those, this fallback is correctly handling the
5727 * accesses
5728 */
5729 target_ulong old_value;
5730 ret = riscv_csrrw_do64(env, csrno, &old_value, 0, 0, 0);
5731 if (ret == RISCV_EXCP_NONE && ret_value) {
5732 *ret_value = int128_make64(old_value);
5733 }
5734 return ret;
5735 }
5736
riscv_csrrw_i128(CPURISCVState * env,int csrno,Int128 * ret_value,Int128 new_value,Int128 write_mask,uintptr_t ra)5737 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
5738 Int128 *ret_value, Int128 new_value,
5739 Int128 write_mask, uintptr_t ra)
5740 {
5741 RISCVException ret;
5742
5743 ret = riscv_csrrw_check(env, csrno, true);
5744 if (ret != RISCV_EXCP_NONE) {
5745 return ret;
5746 }
5747
5748 if (csr_ops[csrno].read128) {
5749 return riscv_csrrw_do128(env, csrno, ret_value,
5750 new_value, write_mask, ra);
5751 }
5752
5753 /*
5754 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
5755 * at all defined.
5756 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
5757 * significant), for those, this fallback is correctly handling the
5758 * accesses
5759 */
5760 target_ulong old_value;
5761 ret = riscv_csrrw_do64(env, csrno, &old_value,
5762 int128_getlo(new_value),
5763 int128_getlo(write_mask), ra);
5764 if (ret == RISCV_EXCP_NONE && ret_value) {
5765 *ret_value = int128_make64(old_value);
5766 }
5767 return ret;
5768 }
5769
5770 /*
5771 * Debugger support. If not in user mode, set env->debugger before the
5772 * riscv_csrrw call and clear it after the call.
5773 */
riscv_csrrw_debug(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)5774 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
5775 target_ulong *ret_value,
5776 target_ulong new_value,
5777 target_ulong write_mask)
5778 {
5779 RISCVException ret;
5780 #if !defined(CONFIG_USER_ONLY)
5781 env->debugger = true;
5782 #endif
5783 if (!write_mask) {
5784 ret = riscv_csrr(env, csrno, ret_value);
5785 } else {
5786 ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask, 0);
5787 }
5788 #if !defined(CONFIG_USER_ONLY)
5789 env->debugger = false;
5790 #endif
5791 return ret;
5792 }
5793
read_jvt(CPURISCVState * env,int csrno,target_ulong * val)5794 static RISCVException read_jvt(CPURISCVState *env, int csrno,
5795 target_ulong *val)
5796 {
5797 *val = env->jvt;
5798 return RISCV_EXCP_NONE;
5799 }
5800
write_jvt(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5801 static RISCVException write_jvt(CPURISCVState *env, int csrno,
5802 target_ulong val, uintptr_t ra)
5803 {
5804 env->jvt = val;
5805 return RISCV_EXCP_NONE;
5806 }
5807
5808 /*
5809 * Control and Status Register function table
5810 * riscv_csr_operations::predicate() must be provided for an implemented CSR
5811 */
5812 riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
5813 /* User Floating-Point CSRs */
5814 [CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags },
5815 [CSR_FRM] = { "frm", fs, read_frm, write_frm },
5816 [CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr },
5817 /* Vector CSRs */
5818 [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart },
5819 [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat },
5820 [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm },
5821 [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr },
5822 [CSR_VL] = { "vl", vs, read_vl },
5823 [CSR_VTYPE] = { "vtype", vs, read_vtype },
5824 [CSR_VLENB] = { "vlenb", vs, read_vlenb },
5825 /* User Timers and Counters */
5826 [CSR_CYCLE] = { "cycle", ctr, read_hpmcounter },
5827 [CSR_INSTRET] = { "instret", ctr, read_hpmcounter },
5828 [CSR_CYCLEH] = { "cycleh", ctr32, read_hpmcounterh },
5829 [CSR_INSTRETH] = { "instreth", ctr32, read_hpmcounterh },
5830
5831 /*
5832 * In privileged mode, the monitor will have to emulate TIME CSRs only if
5833 * rdtime callback is not provided by machine/platform emulation.
5834 */
5835 [CSR_TIME] = { "time", ctr, read_time },
5836 [CSR_TIMEH] = { "timeh", ctr32, read_timeh },
5837
5838 /* Crypto Extension */
5839 [CSR_SEED] = { "seed", seed, NULL, NULL, rmw_seed },
5840
5841 /* Zcmt Extension */
5842 [CSR_JVT] = {"jvt", zcmt, read_jvt, write_jvt},
5843
5844 /* zicfiss Extension, shadow stack register */
5845 [CSR_SSP] = { "ssp", cfi_ss, read_ssp, write_ssp },
5846
5847 #if !defined(CONFIG_USER_ONLY)
5848 /* Machine Timers and Counters */
5849 [CSR_MCYCLE] = { "mcycle", any, read_hpmcounter,
5850 write_mhpmcounter },
5851 [CSR_MINSTRET] = { "minstret", any, read_hpmcounter,
5852 write_mhpmcounter },
5853 [CSR_MCYCLEH] = { "mcycleh", any32, read_hpmcounterh,
5854 write_mhpmcounterh },
5855 [CSR_MINSTRETH] = { "minstreth", any32, read_hpmcounterh,
5856 write_mhpmcounterh },
5857
5858 /* Machine Information Registers */
5859 [CSR_MVENDORID] = { "mvendorid", any, read_mvendorid },
5860 [CSR_MARCHID] = { "marchid", any, read_marchid },
5861 [CSR_MIMPID] = { "mimpid", any, read_mimpid },
5862 [CSR_MHARTID] = { "mhartid", any, read_mhartid },
5863
5864 [CSR_MCONFIGPTR] = { "mconfigptr", any, read_zero,
5865 .min_priv_ver = PRIV_VERSION_1_12_0 },
5866 /* Machine Trap Setup */
5867 [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus,
5868 NULL, read_mstatus_i128 },
5869 [CSR_MISA] = { "misa", any, read_misa, write_misa,
5870 NULL, read_misa_i128 },
5871 [CSR_MIDELEG] = { "mideleg", smode, NULL, NULL, rmw_mideleg },
5872 [CSR_MEDELEG] = { "medeleg", smode, read_medeleg, write_medeleg },
5873 [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie },
5874 [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec },
5875 [CSR_MCOUNTEREN] = { "mcounteren", umode, read_mcounteren,
5876 write_mcounteren },
5877
5878 [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush,
5879 write_mstatush },
5880 [CSR_MEDELEGH] = { "medelegh", smode32, read_zero, write_ignore,
5881 .min_priv_ver = PRIV_VERSION_1_13_0 },
5882 [CSR_HEDELEGH] = { "hedelegh", hmode32, read_hedelegh, write_hedelegh,
5883 .min_priv_ver = PRIV_VERSION_1_13_0 },
5884
5885 /* Machine Trap Handling */
5886 [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch,
5887 NULL, read_mscratch_i128, write_mscratch_i128 },
5888 [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc },
5889 [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause },
5890 [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval },
5891 [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
5892
5893 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */
5894 [CSR_MISELECT] = { "miselect", csrind_or_aia_any, NULL, NULL,
5895 rmw_xiselect },
5896 [CSR_MIREG] = { "mireg", csrind_or_aia_any, NULL, NULL,
5897 rmw_xireg },
5898
5899 /* Machine Indirect Register Alias */
5900 [CSR_MIREG2] = { "mireg2", csrind_any, NULL, NULL, rmw_xiregi,
5901 .min_priv_ver = PRIV_VERSION_1_12_0 },
5902 [CSR_MIREG3] = { "mireg3", csrind_any, NULL, NULL, rmw_xiregi,
5903 .min_priv_ver = PRIV_VERSION_1_12_0 },
5904 [CSR_MIREG4] = { "mireg4", csrind_any, NULL, NULL, rmw_xiregi,
5905 .min_priv_ver = PRIV_VERSION_1_12_0 },
5906 [CSR_MIREG5] = { "mireg5", csrind_any, NULL, NULL, rmw_xiregi,
5907 .min_priv_ver = PRIV_VERSION_1_12_0 },
5908 [CSR_MIREG6] = { "mireg6", csrind_any, NULL, NULL, rmw_xiregi,
5909 .min_priv_ver = PRIV_VERSION_1_12_0 },
5910
5911 /* Machine-Level Interrupts (AIA) */
5912 [CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei },
5913 [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi },
5914
5915 /* Virtual Interrupts for Supervisor Level (AIA) */
5916 [CSR_MVIEN] = { "mvien", aia_any, NULL, NULL, rmw_mvien },
5917 [CSR_MVIP] = { "mvip", aia_any, NULL, NULL, rmw_mvip },
5918
5919 /* Machine-Level High-Half CSRs (AIA) */
5920 [CSR_MIDELEGH] = { "midelegh", aia_smode32, NULL, NULL, rmw_midelegh },
5921 [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh },
5922 [CSR_MVIENH] = { "mvienh", aia_any32, NULL, NULL, rmw_mvienh },
5923 [CSR_MVIPH] = { "mviph", aia_any32, NULL, NULL, rmw_mviph },
5924 [CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph },
5925
5926 /* Execution environment configuration */
5927 [CSR_MENVCFG] = { "menvcfg", umode, read_menvcfg, write_menvcfg,
5928 .min_priv_ver = PRIV_VERSION_1_12_0 },
5929 [CSR_MENVCFGH] = { "menvcfgh", umode32, read_menvcfgh, write_menvcfgh,
5930 .min_priv_ver = PRIV_VERSION_1_12_0 },
5931 [CSR_SENVCFG] = { "senvcfg", smode, read_senvcfg, write_senvcfg,
5932 .min_priv_ver = PRIV_VERSION_1_12_0 },
5933 [CSR_HENVCFG] = { "henvcfg", hmode, read_henvcfg, write_henvcfg,
5934 .min_priv_ver = PRIV_VERSION_1_12_0 },
5935 [CSR_HENVCFGH] = { "henvcfgh", hmode32, read_henvcfgh, write_henvcfgh,
5936 .min_priv_ver = PRIV_VERSION_1_12_0 },
5937
5938 /* Smstateen extension CSRs */
5939 [CSR_MSTATEEN0] = { "mstateen0", mstateen, read_mstateen, write_mstateen0,
5940 .min_priv_ver = PRIV_VERSION_1_12_0 },
5941 [CSR_MSTATEEN0H] = { "mstateen0h", mstateen, read_mstateenh,
5942 write_mstateen0h,
5943 .min_priv_ver = PRIV_VERSION_1_12_0 },
5944 [CSR_MSTATEEN1] = { "mstateen1", mstateen, read_mstateen,
5945 write_mstateen_1_3,
5946 .min_priv_ver = PRIV_VERSION_1_12_0 },
5947 [CSR_MSTATEEN1H] = { "mstateen1h", mstateen, read_mstateenh,
5948 write_mstateenh_1_3,
5949 .min_priv_ver = PRIV_VERSION_1_12_0 },
5950 [CSR_MSTATEEN2] = { "mstateen2", mstateen, read_mstateen,
5951 write_mstateen_1_3,
5952 .min_priv_ver = PRIV_VERSION_1_12_0 },
5953 [CSR_MSTATEEN2H] = { "mstateen2h", mstateen, read_mstateenh,
5954 write_mstateenh_1_3,
5955 .min_priv_ver = PRIV_VERSION_1_12_0 },
5956 [CSR_MSTATEEN3] = { "mstateen3", mstateen, read_mstateen,
5957 write_mstateen_1_3,
5958 .min_priv_ver = PRIV_VERSION_1_12_0 },
5959 [CSR_MSTATEEN3H] = { "mstateen3h", mstateen, read_mstateenh,
5960 write_mstateenh_1_3,
5961 .min_priv_ver = PRIV_VERSION_1_12_0 },
5962 [CSR_HSTATEEN0] = { "hstateen0", hstateen, read_hstateen, write_hstateen0,
5963 .min_priv_ver = PRIV_VERSION_1_12_0 },
5964 [CSR_HSTATEEN0H] = { "hstateen0h", hstateenh, read_hstateenh,
5965 write_hstateen0h,
5966 .min_priv_ver = PRIV_VERSION_1_12_0 },
5967 [CSR_HSTATEEN1] = { "hstateen1", hstateen, read_hstateen,
5968 write_hstateen_1_3,
5969 .min_priv_ver = PRIV_VERSION_1_12_0 },
5970 [CSR_HSTATEEN1H] = { "hstateen1h", hstateenh, read_hstateenh,
5971 write_hstateenh_1_3,
5972 .min_priv_ver = PRIV_VERSION_1_12_0 },
5973 [CSR_HSTATEEN2] = { "hstateen2", hstateen, read_hstateen,
5974 write_hstateen_1_3,
5975 .min_priv_ver = PRIV_VERSION_1_12_0 },
5976 [CSR_HSTATEEN2H] = { "hstateen2h", hstateenh, read_hstateenh,
5977 write_hstateenh_1_3,
5978 .min_priv_ver = PRIV_VERSION_1_12_0 },
5979 [CSR_HSTATEEN3] = { "hstateen3", hstateen, read_hstateen,
5980 write_hstateen_1_3,
5981 .min_priv_ver = PRIV_VERSION_1_12_0 },
5982 [CSR_HSTATEEN3H] = { "hstateen3h", hstateenh, read_hstateenh,
5983 write_hstateenh_1_3,
5984 .min_priv_ver = PRIV_VERSION_1_12_0 },
5985 [CSR_SSTATEEN0] = { "sstateen0", sstateen, read_sstateen, write_sstateen0,
5986 .min_priv_ver = PRIV_VERSION_1_12_0 },
5987 [CSR_SSTATEEN1] = { "sstateen1", sstateen, read_sstateen,
5988 write_sstateen_1_3,
5989 .min_priv_ver = PRIV_VERSION_1_12_0 },
5990 [CSR_SSTATEEN2] = { "sstateen2", sstateen, read_sstateen,
5991 write_sstateen_1_3,
5992 .min_priv_ver = PRIV_VERSION_1_12_0 },
5993 [CSR_SSTATEEN3] = { "sstateen3", sstateen, read_sstateen,
5994 write_sstateen_1_3,
5995 .min_priv_ver = PRIV_VERSION_1_12_0 },
5996
5997 /* RNMI */
5998 [CSR_MNSCRATCH] = { "mnscratch", rnmi, read_mnscratch, write_mnscratch,
5999 .min_priv_ver = PRIV_VERSION_1_12_0 },
6000 [CSR_MNEPC] = { "mnepc", rnmi, read_mnepc, write_mnepc,
6001 .min_priv_ver = PRIV_VERSION_1_12_0 },
6002 [CSR_MNCAUSE] = { "mncause", rnmi, read_mncause, write_mncause,
6003 .min_priv_ver = PRIV_VERSION_1_12_0 },
6004 [CSR_MNSTATUS] = { "mnstatus", rnmi, read_mnstatus, write_mnstatus,
6005 .min_priv_ver = PRIV_VERSION_1_12_0 },
6006
6007 /* Supervisor Counter Delegation */
6008 [CSR_SCOUNTINHIBIT] = {"scountinhibit", scountinhibit_pred,
6009 read_scountinhibit, write_scountinhibit,
6010 .min_priv_ver = PRIV_VERSION_1_12_0 },
6011
6012 /* Supervisor Trap Setup */
6013 [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus,
6014 NULL, read_sstatus_i128 },
6015 [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie },
6016 [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec },
6017 [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren,
6018 write_scounteren },
6019
6020 /* Supervisor Trap Handling */
6021 [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch,
6022 NULL, read_sscratch_i128, write_sscratch_i128 },
6023 [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc },
6024 [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause },
6025 [CSR_STVAL] = { "stval", smode, read_stval, write_stval },
6026 [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip },
6027 [CSR_STIMECMP] = { "stimecmp", sstc, read_stimecmp, write_stimecmp,
6028 .min_priv_ver = PRIV_VERSION_1_12_0 },
6029 [CSR_STIMECMPH] = { "stimecmph", sstc_32, read_stimecmph, write_stimecmph,
6030 .min_priv_ver = PRIV_VERSION_1_12_0 },
6031 [CSR_VSTIMECMP] = { "vstimecmp", sstc, read_vstimecmp,
6032 write_vstimecmp,
6033 .min_priv_ver = PRIV_VERSION_1_12_0 },
6034 [CSR_VSTIMECMPH] = { "vstimecmph", sstc_32, read_vstimecmph,
6035 write_vstimecmph,
6036 .min_priv_ver = PRIV_VERSION_1_12_0 },
6037
6038 /* Supervisor Protection and Translation */
6039 [CSR_SATP] = { "satp", satp, read_satp, write_satp },
6040
6041 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
6042 [CSR_SISELECT] = { "siselect", csrind_or_aia_smode, NULL, NULL,
6043 rmw_xiselect },
6044 [CSR_SIREG] = { "sireg", csrind_or_aia_smode, NULL, NULL,
6045 rmw_xireg },
6046
6047 /* Supervisor Indirect Register Alias */
6048 [CSR_SIREG2] = { "sireg2", csrind_smode, NULL, NULL, rmw_xiregi,
6049 .min_priv_ver = PRIV_VERSION_1_12_0 },
6050 [CSR_SIREG3] = { "sireg3", csrind_smode, NULL, NULL, rmw_xiregi,
6051 .min_priv_ver = PRIV_VERSION_1_12_0 },
6052 [CSR_SIREG4] = { "sireg4", csrind_smode, NULL, NULL, rmw_xiregi,
6053 .min_priv_ver = PRIV_VERSION_1_12_0 },
6054 [CSR_SIREG5] = { "sireg5", csrind_smode, NULL, NULL, rmw_xiregi,
6055 .min_priv_ver = PRIV_VERSION_1_12_0 },
6056 [CSR_SIREG6] = { "sireg6", csrind_smode, NULL, NULL, rmw_xiregi,
6057 .min_priv_ver = PRIV_VERSION_1_12_0 },
6058
6059 /* Supervisor-Level Interrupts (AIA) */
6060 [CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei },
6061 [CSR_STOPI] = { "stopi", aia_smode, read_stopi },
6062
6063 /* Supervisor-Level High-Half CSRs (AIA) */
6064 [CSR_SIEH] = { "sieh", aia_smode32, NULL, NULL, rmw_sieh },
6065 [CSR_SIPH] = { "siph", aia_smode32, NULL, NULL, rmw_siph },
6066
6067 [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus,
6068 .min_priv_ver = PRIV_VERSION_1_12_0 },
6069 [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg,
6070 .min_priv_ver = PRIV_VERSION_1_12_0 },
6071 [CSR_HIDELEG] = { "hideleg", hmode, NULL, NULL, rmw_hideleg,
6072 .min_priv_ver = PRIV_VERSION_1_12_0 },
6073 [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip,
6074 .min_priv_ver = PRIV_VERSION_1_12_0 },
6075 [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip,
6076 .min_priv_ver = PRIV_VERSION_1_12_0 },
6077 [CSR_HIE] = { "hie", hmode, NULL, NULL, rmw_hie,
6078 .min_priv_ver = PRIV_VERSION_1_12_0 },
6079 [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren,
6080 write_hcounteren,
6081 .min_priv_ver = PRIV_VERSION_1_12_0 },
6082 [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie,
6083 .min_priv_ver = PRIV_VERSION_1_12_0 },
6084 [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval,
6085 .min_priv_ver = PRIV_VERSION_1_12_0 },
6086 [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst,
6087 .min_priv_ver = PRIV_VERSION_1_12_0 },
6088 [CSR_HGEIP] = { "hgeip", hmode, read_hgeip,
6089 .min_priv_ver = PRIV_VERSION_1_12_0 },
6090 [CSR_HGATP] = { "hgatp", hgatp, read_hgatp, write_hgatp,
6091 .min_priv_ver = PRIV_VERSION_1_12_0 },
6092 [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta,
6093 write_htimedelta,
6094 .min_priv_ver = PRIV_VERSION_1_12_0 },
6095 [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah,
6096 write_htimedeltah,
6097 .min_priv_ver = PRIV_VERSION_1_12_0 },
6098
6099 [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus,
6100 write_vsstatus,
6101 .min_priv_ver = PRIV_VERSION_1_12_0 },
6102 [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip,
6103 .min_priv_ver = PRIV_VERSION_1_12_0 },
6104 [CSR_VSIE] = { "vsie", hmode, NULL, NULL, rmw_vsie ,
6105 .min_priv_ver = PRIV_VERSION_1_12_0 },
6106 [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec,
6107 .min_priv_ver = PRIV_VERSION_1_12_0 },
6108 [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch,
6109 write_vsscratch,
6110 .min_priv_ver = PRIV_VERSION_1_12_0 },
6111 [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc,
6112 .min_priv_ver = PRIV_VERSION_1_12_0 },
6113 [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause,
6114 .min_priv_ver = PRIV_VERSION_1_12_0 },
6115 [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval,
6116 .min_priv_ver = PRIV_VERSION_1_12_0 },
6117 [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp,
6118 .min_priv_ver = PRIV_VERSION_1_12_0 },
6119
6120 [CSR_MTVAL2] = { "mtval2", dbltrp_hmode, read_mtval2, write_mtval2,
6121 .min_priv_ver = PRIV_VERSION_1_12_0 },
6122 [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst,
6123 .min_priv_ver = PRIV_VERSION_1_12_0 },
6124
6125 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
6126 [CSR_HVIEN] = { "hvien", aia_hmode, NULL, NULL, rmw_hvien },
6127 [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl,
6128 write_hvictl },
6129 [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1,
6130 write_hviprio1 },
6131 [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2,
6132 write_hviprio2 },
6133 /*
6134 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
6135 */
6136 [CSR_VSISELECT] = { "vsiselect", csrind_or_aia_hmode, NULL, NULL,
6137 rmw_xiselect },
6138 [CSR_VSIREG] = { "vsireg", csrind_or_aia_hmode, NULL, NULL,
6139 rmw_xireg },
6140
6141 /* Virtual Supervisor Indirect Alias */
6142 [CSR_VSIREG2] = { "vsireg2", csrind_hmode, NULL, NULL, rmw_xiregi,
6143 .min_priv_ver = PRIV_VERSION_1_12_0 },
6144 [CSR_VSIREG3] = { "vsireg3", csrind_hmode, NULL, NULL, rmw_xiregi,
6145 .min_priv_ver = PRIV_VERSION_1_12_0 },
6146 [CSR_VSIREG4] = { "vsireg4", csrind_hmode, NULL, NULL, rmw_xiregi,
6147 .min_priv_ver = PRIV_VERSION_1_12_0 },
6148 [CSR_VSIREG5] = { "vsireg5", csrind_hmode, NULL, NULL, rmw_xiregi,
6149 .min_priv_ver = PRIV_VERSION_1_12_0 },
6150 [CSR_VSIREG6] = { "vsireg6", csrind_hmode, NULL, NULL, rmw_xiregi,
6151 .min_priv_ver = PRIV_VERSION_1_12_0 },
6152
6153 /* VS-Level Interrupts (H-extension with AIA) */
6154 [CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei },
6155 [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi },
6156
6157 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
6158 [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL,
6159 rmw_hidelegh },
6160 [CSR_HVIENH] = { "hvienh", aia_hmode32, NULL, NULL, rmw_hvienh },
6161 [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph },
6162 [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h,
6163 write_hviprio1h },
6164 [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h,
6165 write_hviprio2h },
6166 [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh },
6167 [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph },
6168
6169 /* Physical Memory Protection */
6170 [CSR_MSECCFG] = { "mseccfg", have_mseccfg, read_mseccfg, write_mseccfg,
6171 .min_priv_ver = PRIV_VERSION_1_11_0 },
6172 [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg },
6173 [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg },
6174 [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg },
6175 [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg },
6176 [CSR_PMPCFG4] = { "pmpcfg4", pmp, read_pmpcfg, write_pmpcfg,
6177 .min_priv_ver = PRIV_VERSION_1_12_0 },
6178 [CSR_PMPCFG5] = { "pmpcfg5", pmp, read_pmpcfg, write_pmpcfg,
6179 .min_priv_ver = PRIV_VERSION_1_12_0 },
6180 [CSR_PMPCFG6] = { "pmpcfg6", pmp, read_pmpcfg, write_pmpcfg,
6181 .min_priv_ver = PRIV_VERSION_1_12_0 },
6182 [CSR_PMPCFG7] = { "pmpcfg7", pmp, read_pmpcfg, write_pmpcfg,
6183 .min_priv_ver = PRIV_VERSION_1_12_0 },
6184 [CSR_PMPCFG8] = { "pmpcfg8", pmp, read_pmpcfg, write_pmpcfg,
6185 .min_priv_ver = PRIV_VERSION_1_12_0 },
6186 [CSR_PMPCFG9] = { "pmpcfg9", pmp, read_pmpcfg, write_pmpcfg,
6187 .min_priv_ver = PRIV_VERSION_1_12_0 },
6188 [CSR_PMPCFG10] = { "pmpcfg10", pmp, read_pmpcfg, write_pmpcfg,
6189 .min_priv_ver = PRIV_VERSION_1_12_0 },
6190 [CSR_PMPCFG11] = { "pmpcfg11", pmp, read_pmpcfg, write_pmpcfg,
6191 .min_priv_ver = PRIV_VERSION_1_12_0 },
6192 [CSR_PMPCFG12] = { "pmpcfg12", pmp, read_pmpcfg, write_pmpcfg,
6193 .min_priv_ver = PRIV_VERSION_1_12_0 },
6194 [CSR_PMPCFG13] = { "pmpcfg13", pmp, read_pmpcfg, write_pmpcfg,
6195 .min_priv_ver = PRIV_VERSION_1_12_0 },
6196 [CSR_PMPCFG14] = { "pmpcfg14", pmp, read_pmpcfg, write_pmpcfg,
6197 .min_priv_ver = PRIV_VERSION_1_12_0 },
6198 [CSR_PMPCFG15] = { "pmpcfg15", pmp, read_pmpcfg, write_pmpcfg,
6199 .min_priv_ver = PRIV_VERSION_1_12_0 },
6200 [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr },
6201 [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr },
6202 [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr },
6203 [CSR_PMPADDR3] = { "pmpaddr3", pmp, read_pmpaddr, write_pmpaddr },
6204 [CSR_PMPADDR4] = { "pmpaddr4", pmp, read_pmpaddr, write_pmpaddr },
6205 [CSR_PMPADDR5] = { "pmpaddr5", pmp, read_pmpaddr, write_pmpaddr },
6206 [CSR_PMPADDR6] = { "pmpaddr6", pmp, read_pmpaddr, write_pmpaddr },
6207 [CSR_PMPADDR7] = { "pmpaddr7", pmp, read_pmpaddr, write_pmpaddr },
6208 [CSR_PMPADDR8] = { "pmpaddr8", pmp, read_pmpaddr, write_pmpaddr },
6209 [CSR_PMPADDR9] = { "pmpaddr9", pmp, read_pmpaddr, write_pmpaddr },
6210 [CSR_PMPADDR10] = { "pmpaddr10", pmp, read_pmpaddr, write_pmpaddr },
6211 [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr },
6212 [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr },
6213 [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr },
6214 [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
6215 [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
6216 [CSR_PMPADDR16] = { "pmpaddr16", pmp, read_pmpaddr, write_pmpaddr,
6217 .min_priv_ver = PRIV_VERSION_1_12_0 },
6218 [CSR_PMPADDR17] = { "pmpaddr17", pmp, read_pmpaddr, write_pmpaddr,
6219 .min_priv_ver = PRIV_VERSION_1_12_0 },
6220 [CSR_PMPADDR18] = { "pmpaddr18", pmp, read_pmpaddr, write_pmpaddr,
6221 .min_priv_ver = PRIV_VERSION_1_12_0 },
6222 [CSR_PMPADDR19] = { "pmpaddr19", pmp, read_pmpaddr, write_pmpaddr,
6223 .min_priv_ver = PRIV_VERSION_1_12_0 },
6224 [CSR_PMPADDR20] = { "pmpaddr20", pmp, read_pmpaddr, write_pmpaddr,
6225 .min_priv_ver = PRIV_VERSION_1_12_0 },
6226 [CSR_PMPADDR21] = { "pmpaddr21", pmp, read_pmpaddr, write_pmpaddr,
6227 .min_priv_ver = PRIV_VERSION_1_12_0 },
6228 [CSR_PMPADDR22] = { "pmpaddr22", pmp, read_pmpaddr, write_pmpaddr,
6229 .min_priv_ver = PRIV_VERSION_1_12_0 },
6230 [CSR_PMPADDR23] = { "pmpaddr23", pmp, read_pmpaddr, write_pmpaddr,
6231 .min_priv_ver = PRIV_VERSION_1_12_0 },
6232 [CSR_PMPADDR24] = { "pmpaddr24", pmp, read_pmpaddr, write_pmpaddr,
6233 .min_priv_ver = PRIV_VERSION_1_12_0 },
6234 [CSR_PMPADDR25] = { "pmpaddr25", pmp, read_pmpaddr, write_pmpaddr,
6235 .min_priv_ver = PRIV_VERSION_1_12_0 },
6236 [CSR_PMPADDR26] = { "pmpaddr26", pmp, read_pmpaddr, write_pmpaddr,
6237 .min_priv_ver = PRIV_VERSION_1_12_0 },
6238 [CSR_PMPADDR27] = { "pmpaddr27", pmp, read_pmpaddr, write_pmpaddr,
6239 .min_priv_ver = PRIV_VERSION_1_12_0 },
6240 [CSR_PMPADDR28] = { "pmpaddr28", pmp, read_pmpaddr, write_pmpaddr,
6241 .min_priv_ver = PRIV_VERSION_1_12_0 },
6242 [CSR_PMPADDR29] = { "pmpaddr29", pmp, read_pmpaddr, write_pmpaddr,
6243 .min_priv_ver = PRIV_VERSION_1_12_0 },
6244 [CSR_PMPADDR30] = { "pmpaddr30", pmp, read_pmpaddr, write_pmpaddr,
6245 .min_priv_ver = PRIV_VERSION_1_12_0 },
6246 [CSR_PMPADDR31] = { "pmpaddr31", pmp, read_pmpaddr, write_pmpaddr,
6247 .min_priv_ver = PRIV_VERSION_1_12_0 },
6248 [CSR_PMPADDR32] = { "pmpaddr32", pmp, read_pmpaddr, write_pmpaddr,
6249 .min_priv_ver = PRIV_VERSION_1_12_0 },
6250 [CSR_PMPADDR33] = { "pmpaddr33", pmp, read_pmpaddr, write_pmpaddr,
6251 .min_priv_ver = PRIV_VERSION_1_12_0 },
6252 [CSR_PMPADDR34] = { "pmpaddr34", pmp, read_pmpaddr, write_pmpaddr,
6253 .min_priv_ver = PRIV_VERSION_1_12_0 },
6254 [CSR_PMPADDR35] = { "pmpaddr35", pmp, read_pmpaddr, write_pmpaddr,
6255 .min_priv_ver = PRIV_VERSION_1_12_0 },
6256 [CSR_PMPADDR36] = { "pmpaddr36", pmp, read_pmpaddr, write_pmpaddr,
6257 .min_priv_ver = PRIV_VERSION_1_12_0 },
6258 [CSR_PMPADDR37] = { "pmpaddr37", pmp, read_pmpaddr, write_pmpaddr,
6259 .min_priv_ver = PRIV_VERSION_1_12_0 },
6260 [CSR_PMPADDR38] = { "pmpaddr38", pmp, read_pmpaddr, write_pmpaddr,
6261 .min_priv_ver = PRIV_VERSION_1_12_0 },
6262 [CSR_PMPADDR39] = { "pmpaddr39", pmp, read_pmpaddr, write_pmpaddr,
6263 .min_priv_ver = PRIV_VERSION_1_12_0 },
6264 [CSR_PMPADDR40] = { "pmpaddr40", pmp, read_pmpaddr, write_pmpaddr,
6265 .min_priv_ver = PRIV_VERSION_1_12_0 },
6266 [CSR_PMPADDR41] = { "pmpaddr41", pmp, read_pmpaddr, write_pmpaddr,
6267 .min_priv_ver = PRIV_VERSION_1_12_0 },
6268 [CSR_PMPADDR42] = { "pmpaddr42", pmp, read_pmpaddr, write_pmpaddr,
6269 .min_priv_ver = PRIV_VERSION_1_12_0 },
6270 [CSR_PMPADDR43] = { "pmpaddr43", pmp, read_pmpaddr, write_pmpaddr,
6271 .min_priv_ver = PRIV_VERSION_1_12_0 },
6272 [CSR_PMPADDR44] = { "pmpaddr44", pmp, read_pmpaddr, write_pmpaddr,
6273 .min_priv_ver = PRIV_VERSION_1_12_0 },
6274 [CSR_PMPADDR45] = { "pmpaddr45", pmp, read_pmpaddr, write_pmpaddr,
6275 .min_priv_ver = PRIV_VERSION_1_12_0 },
6276 [CSR_PMPADDR46] = { "pmpaddr46", pmp, read_pmpaddr, write_pmpaddr,
6277 .min_priv_ver = PRIV_VERSION_1_12_0 },
6278 [CSR_PMPADDR47] = { "pmpaddr47", pmp, read_pmpaddr, write_pmpaddr,
6279 .min_priv_ver = PRIV_VERSION_1_12_0 },
6280 [CSR_PMPADDR48] = { "pmpaddr48", pmp, read_pmpaddr, write_pmpaddr,
6281 .min_priv_ver = PRIV_VERSION_1_12_0 },
6282 [CSR_PMPADDR49] = { "pmpaddr49", pmp, read_pmpaddr, write_pmpaddr,
6283 .min_priv_ver = PRIV_VERSION_1_12_0 },
6284 [CSR_PMPADDR50] = { "pmpaddr50", pmp, read_pmpaddr, write_pmpaddr,
6285 .min_priv_ver = PRIV_VERSION_1_12_0 },
6286 [CSR_PMPADDR51] = { "pmpaddr51", pmp, read_pmpaddr, write_pmpaddr,
6287 .min_priv_ver = PRIV_VERSION_1_12_0 },
6288 [CSR_PMPADDR52] = { "pmpaddr52", pmp, read_pmpaddr, write_pmpaddr,
6289 .min_priv_ver = PRIV_VERSION_1_12_0 },
6290 [CSR_PMPADDR53] = { "pmpaddr53", pmp, read_pmpaddr, write_pmpaddr,
6291 .min_priv_ver = PRIV_VERSION_1_12_0 },
6292 [CSR_PMPADDR54] = { "pmpaddr54", pmp, read_pmpaddr, write_pmpaddr,
6293 .min_priv_ver = PRIV_VERSION_1_12_0 },
6294 [CSR_PMPADDR55] = { "pmpaddr55", pmp, read_pmpaddr, write_pmpaddr,
6295 .min_priv_ver = PRIV_VERSION_1_12_0 },
6296 [CSR_PMPADDR56] = { "pmpaddr56", pmp, read_pmpaddr, write_pmpaddr,
6297 .min_priv_ver = PRIV_VERSION_1_12_0 },
6298 [CSR_PMPADDR57] = { "pmpaddr57", pmp, read_pmpaddr, write_pmpaddr,
6299 .min_priv_ver = PRIV_VERSION_1_12_0 },
6300 [CSR_PMPADDR58] = { "pmpaddr58", pmp, read_pmpaddr, write_pmpaddr,
6301 .min_priv_ver = PRIV_VERSION_1_12_0 },
6302 [CSR_PMPADDR59] = { "pmpaddr59", pmp, read_pmpaddr, write_pmpaddr,
6303 .min_priv_ver = PRIV_VERSION_1_12_0 },
6304 [CSR_PMPADDR60] = { "pmpaddr60", pmp, read_pmpaddr, write_pmpaddr,
6305 .min_priv_ver = PRIV_VERSION_1_12_0 },
6306 [CSR_PMPADDR61] = { "pmpaddr61", pmp, read_pmpaddr, write_pmpaddr,
6307 .min_priv_ver = PRIV_VERSION_1_12_0 },
6308 [CSR_PMPADDR62] = { "pmpaddr62", pmp, read_pmpaddr, write_pmpaddr,
6309 .min_priv_ver = PRIV_VERSION_1_12_0 },
6310 [CSR_PMPADDR63] = { "pmpaddr63", pmp, read_pmpaddr, write_pmpaddr,
6311 .min_priv_ver = PRIV_VERSION_1_12_0 },
6312
6313 /* Debug CSRs */
6314 [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect },
6315 [CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata },
6316 [CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata },
6317 [CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata },
6318 [CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore },
6319 [CSR_MCONTEXT] = { "mcontext", debug, read_mcontext, write_mcontext },
6320
6321 [CSR_MCTRCTL] = { "mctrctl", ctr_mmode, NULL, NULL, rmw_xctrctl },
6322 [CSR_SCTRCTL] = { "sctrctl", ctr_smode, NULL, NULL, rmw_xctrctl },
6323 [CSR_VSCTRCTL] = { "vsctrctl", ctr_smode, NULL, NULL, rmw_xctrctl },
6324 [CSR_SCTRDEPTH] = { "sctrdepth", ctr_smode, NULL, NULL, rmw_sctrdepth },
6325 [CSR_SCTRSTATUS] = { "sctrstatus", ctr_smode, NULL, NULL, rmw_sctrstatus },
6326
6327 /* Performance Counters */
6328 [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter },
6329 [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter },
6330 [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_hpmcounter },
6331 [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_hpmcounter },
6332 [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_hpmcounter },
6333 [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_hpmcounter },
6334 [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_hpmcounter },
6335 [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_hpmcounter },
6336 [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_hpmcounter },
6337 [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_hpmcounter },
6338 [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_hpmcounter },
6339 [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_hpmcounter },
6340 [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_hpmcounter },
6341 [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_hpmcounter },
6342 [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_hpmcounter },
6343 [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_hpmcounter },
6344 [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_hpmcounter },
6345 [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_hpmcounter },
6346 [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_hpmcounter },
6347 [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_hpmcounter },
6348 [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_hpmcounter },
6349 [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_hpmcounter },
6350 [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_hpmcounter },
6351 [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_hpmcounter },
6352 [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_hpmcounter },
6353 [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_hpmcounter },
6354 [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_hpmcounter },
6355 [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_hpmcounter },
6356 [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_hpmcounter },
6357
6358 [CSR_MHPMCOUNTER3] = { "mhpmcounter3", mctr, read_hpmcounter,
6359 write_mhpmcounter },
6360 [CSR_MHPMCOUNTER4] = { "mhpmcounter4", mctr, read_hpmcounter,
6361 write_mhpmcounter },
6362 [CSR_MHPMCOUNTER5] = { "mhpmcounter5", mctr, read_hpmcounter,
6363 write_mhpmcounter },
6364 [CSR_MHPMCOUNTER6] = { "mhpmcounter6", mctr, read_hpmcounter,
6365 write_mhpmcounter },
6366 [CSR_MHPMCOUNTER7] = { "mhpmcounter7", mctr, read_hpmcounter,
6367 write_mhpmcounter },
6368 [CSR_MHPMCOUNTER8] = { "mhpmcounter8", mctr, read_hpmcounter,
6369 write_mhpmcounter },
6370 [CSR_MHPMCOUNTER9] = { "mhpmcounter9", mctr, read_hpmcounter,
6371 write_mhpmcounter },
6372 [CSR_MHPMCOUNTER10] = { "mhpmcounter10", mctr, read_hpmcounter,
6373 write_mhpmcounter },
6374 [CSR_MHPMCOUNTER11] = { "mhpmcounter11", mctr, read_hpmcounter,
6375 write_mhpmcounter },
6376 [CSR_MHPMCOUNTER12] = { "mhpmcounter12", mctr, read_hpmcounter,
6377 write_mhpmcounter },
6378 [CSR_MHPMCOUNTER13] = { "mhpmcounter13", mctr, read_hpmcounter,
6379 write_mhpmcounter },
6380 [CSR_MHPMCOUNTER14] = { "mhpmcounter14", mctr, read_hpmcounter,
6381 write_mhpmcounter },
6382 [CSR_MHPMCOUNTER15] = { "mhpmcounter15", mctr, read_hpmcounter,
6383 write_mhpmcounter },
6384 [CSR_MHPMCOUNTER16] = { "mhpmcounter16", mctr, read_hpmcounter,
6385 write_mhpmcounter },
6386 [CSR_MHPMCOUNTER17] = { "mhpmcounter17", mctr, read_hpmcounter,
6387 write_mhpmcounter },
6388 [CSR_MHPMCOUNTER18] = { "mhpmcounter18", mctr, read_hpmcounter,
6389 write_mhpmcounter },
6390 [CSR_MHPMCOUNTER19] = { "mhpmcounter19", mctr, read_hpmcounter,
6391 write_mhpmcounter },
6392 [CSR_MHPMCOUNTER20] = { "mhpmcounter20", mctr, read_hpmcounter,
6393 write_mhpmcounter },
6394 [CSR_MHPMCOUNTER21] = { "mhpmcounter21", mctr, read_hpmcounter,
6395 write_mhpmcounter },
6396 [CSR_MHPMCOUNTER22] = { "mhpmcounter22", mctr, read_hpmcounter,
6397 write_mhpmcounter },
6398 [CSR_MHPMCOUNTER23] = { "mhpmcounter23", mctr, read_hpmcounter,
6399 write_mhpmcounter },
6400 [CSR_MHPMCOUNTER24] = { "mhpmcounter24", mctr, read_hpmcounter,
6401 write_mhpmcounter },
6402 [CSR_MHPMCOUNTER25] = { "mhpmcounter25", mctr, read_hpmcounter,
6403 write_mhpmcounter },
6404 [CSR_MHPMCOUNTER26] = { "mhpmcounter26", mctr, read_hpmcounter,
6405 write_mhpmcounter },
6406 [CSR_MHPMCOUNTER27] = { "mhpmcounter27", mctr, read_hpmcounter,
6407 write_mhpmcounter },
6408 [CSR_MHPMCOUNTER28] = { "mhpmcounter28", mctr, read_hpmcounter,
6409 write_mhpmcounter },
6410 [CSR_MHPMCOUNTER29] = { "mhpmcounter29", mctr, read_hpmcounter,
6411 write_mhpmcounter },
6412 [CSR_MHPMCOUNTER30] = { "mhpmcounter30", mctr, read_hpmcounter,
6413 write_mhpmcounter },
6414 [CSR_MHPMCOUNTER31] = { "mhpmcounter31", mctr, read_hpmcounter,
6415 write_mhpmcounter },
6416
6417 [CSR_MCOUNTINHIBIT] = { "mcountinhibit", any, read_mcountinhibit,
6418 write_mcountinhibit,
6419 .min_priv_ver = PRIV_VERSION_1_11_0 },
6420
6421 [CSR_MCYCLECFG] = { "mcyclecfg", smcntrpmf, read_mcyclecfg,
6422 write_mcyclecfg,
6423 .min_priv_ver = PRIV_VERSION_1_12_0 },
6424 [CSR_MINSTRETCFG] = { "minstretcfg", smcntrpmf, read_minstretcfg,
6425 write_minstretcfg,
6426 .min_priv_ver = PRIV_VERSION_1_12_0 },
6427
6428 [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent,
6429 write_mhpmevent },
6430 [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent,
6431 write_mhpmevent },
6432 [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_mhpmevent,
6433 write_mhpmevent },
6434 [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_mhpmevent,
6435 write_mhpmevent },
6436 [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_mhpmevent,
6437 write_mhpmevent },
6438 [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_mhpmevent,
6439 write_mhpmevent },
6440 [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_mhpmevent,
6441 write_mhpmevent },
6442 [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_mhpmevent,
6443 write_mhpmevent },
6444 [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_mhpmevent,
6445 write_mhpmevent },
6446 [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_mhpmevent,
6447 write_mhpmevent },
6448 [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_mhpmevent,
6449 write_mhpmevent },
6450 [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_mhpmevent,
6451 write_mhpmevent },
6452 [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_mhpmevent,
6453 write_mhpmevent },
6454 [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_mhpmevent,
6455 write_mhpmevent },
6456 [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_mhpmevent,
6457 write_mhpmevent },
6458 [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_mhpmevent,
6459 write_mhpmevent },
6460 [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_mhpmevent,
6461 write_mhpmevent },
6462 [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_mhpmevent,
6463 write_mhpmevent },
6464 [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_mhpmevent,
6465 write_mhpmevent },
6466 [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_mhpmevent,
6467 write_mhpmevent },
6468 [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_mhpmevent,
6469 write_mhpmevent },
6470 [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_mhpmevent,
6471 write_mhpmevent },
6472 [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_mhpmevent,
6473 write_mhpmevent },
6474 [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_mhpmevent,
6475 write_mhpmevent },
6476 [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_mhpmevent,
6477 write_mhpmevent },
6478 [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_mhpmevent,
6479 write_mhpmevent },
6480 [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_mhpmevent,
6481 write_mhpmevent },
6482 [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_mhpmevent,
6483 write_mhpmevent },
6484 [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent,
6485 write_mhpmevent },
6486
6487 [CSR_MCYCLECFGH] = { "mcyclecfgh", smcntrpmf_32, read_mcyclecfgh,
6488 write_mcyclecfgh,
6489 .min_priv_ver = PRIV_VERSION_1_12_0 },
6490 [CSR_MINSTRETCFGH] = { "minstretcfgh", smcntrpmf_32, read_minstretcfgh,
6491 write_minstretcfgh,
6492 .min_priv_ver = PRIV_VERSION_1_12_0 },
6493
6494 [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf_32, read_mhpmeventh,
6495 write_mhpmeventh,
6496 .min_priv_ver = PRIV_VERSION_1_12_0 },
6497 [CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf_32, read_mhpmeventh,
6498 write_mhpmeventh,
6499 .min_priv_ver = PRIV_VERSION_1_12_0 },
6500 [CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf_32, read_mhpmeventh,
6501 write_mhpmeventh,
6502 .min_priv_ver = PRIV_VERSION_1_12_0 },
6503 [CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf_32, read_mhpmeventh,
6504 write_mhpmeventh,
6505 .min_priv_ver = PRIV_VERSION_1_12_0 },
6506 [CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf_32, read_mhpmeventh,
6507 write_mhpmeventh,
6508 .min_priv_ver = PRIV_VERSION_1_12_0 },
6509 [CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf_32, read_mhpmeventh,
6510 write_mhpmeventh,
6511 .min_priv_ver = PRIV_VERSION_1_12_0 },
6512 [CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf_32, read_mhpmeventh,
6513 write_mhpmeventh,
6514 .min_priv_ver = PRIV_VERSION_1_12_0 },
6515 [CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf_32, read_mhpmeventh,
6516 write_mhpmeventh,
6517 .min_priv_ver = PRIV_VERSION_1_12_0 },
6518 [CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf_32, read_mhpmeventh,
6519 write_mhpmeventh,
6520 .min_priv_ver = PRIV_VERSION_1_12_0 },
6521 [CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf_32, read_mhpmeventh,
6522 write_mhpmeventh,
6523 .min_priv_ver = PRIV_VERSION_1_12_0 },
6524 [CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf_32, read_mhpmeventh,
6525 write_mhpmeventh,
6526 .min_priv_ver = PRIV_VERSION_1_12_0 },
6527 [CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf_32, read_mhpmeventh,
6528 write_mhpmeventh,
6529 .min_priv_ver = PRIV_VERSION_1_12_0 },
6530 [CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf_32, read_mhpmeventh,
6531 write_mhpmeventh,
6532 .min_priv_ver = PRIV_VERSION_1_12_0 },
6533 [CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf_32, read_mhpmeventh,
6534 write_mhpmeventh,
6535 .min_priv_ver = PRIV_VERSION_1_12_0 },
6536 [CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf_32, read_mhpmeventh,
6537 write_mhpmeventh,
6538 .min_priv_ver = PRIV_VERSION_1_12_0 },
6539 [CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf_32, read_mhpmeventh,
6540 write_mhpmeventh,
6541 .min_priv_ver = PRIV_VERSION_1_12_0 },
6542 [CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf_32, read_mhpmeventh,
6543 write_mhpmeventh,
6544 .min_priv_ver = PRIV_VERSION_1_12_0 },
6545 [CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf_32, read_mhpmeventh,
6546 write_mhpmeventh,
6547 .min_priv_ver = PRIV_VERSION_1_12_0 },
6548 [CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf_32, read_mhpmeventh,
6549 write_mhpmeventh,
6550 .min_priv_ver = PRIV_VERSION_1_12_0 },
6551 [CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf_32, read_mhpmeventh,
6552 write_mhpmeventh,
6553 .min_priv_ver = PRIV_VERSION_1_12_0 },
6554 [CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf_32, read_mhpmeventh,
6555 write_mhpmeventh,
6556 .min_priv_ver = PRIV_VERSION_1_12_0 },
6557 [CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf_32, read_mhpmeventh,
6558 write_mhpmeventh,
6559 .min_priv_ver = PRIV_VERSION_1_12_0 },
6560 [CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf_32, read_mhpmeventh,
6561 write_mhpmeventh,
6562 .min_priv_ver = PRIV_VERSION_1_12_0 },
6563 [CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf_32, read_mhpmeventh,
6564 write_mhpmeventh,
6565 .min_priv_ver = PRIV_VERSION_1_12_0 },
6566 [CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf_32, read_mhpmeventh,
6567 write_mhpmeventh,
6568 .min_priv_ver = PRIV_VERSION_1_12_0 },
6569 [CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf_32, read_mhpmeventh,
6570 write_mhpmeventh,
6571 .min_priv_ver = PRIV_VERSION_1_12_0 },
6572 [CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf_32, read_mhpmeventh,
6573 write_mhpmeventh,
6574 .min_priv_ver = PRIV_VERSION_1_12_0 },
6575 [CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf_32, read_mhpmeventh,
6576 write_mhpmeventh,
6577 .min_priv_ver = PRIV_VERSION_1_12_0 },
6578 [CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf_32, read_mhpmeventh,
6579 write_mhpmeventh,
6580 .min_priv_ver = PRIV_VERSION_1_12_0 },
6581
6582 [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_hpmcounterh },
6583 [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_hpmcounterh },
6584 [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_hpmcounterh },
6585 [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_hpmcounterh },
6586 [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_hpmcounterh },
6587 [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_hpmcounterh },
6588 [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_hpmcounterh },
6589 [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_hpmcounterh },
6590 [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_hpmcounterh },
6591 [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_hpmcounterh },
6592 [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_hpmcounterh },
6593 [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_hpmcounterh },
6594 [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_hpmcounterh },
6595 [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_hpmcounterh },
6596 [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_hpmcounterh },
6597 [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_hpmcounterh },
6598 [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_hpmcounterh },
6599 [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_hpmcounterh },
6600 [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_hpmcounterh },
6601 [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_hpmcounterh },
6602 [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_hpmcounterh },
6603 [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_hpmcounterh },
6604 [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_hpmcounterh },
6605 [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_hpmcounterh },
6606 [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_hpmcounterh },
6607 [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_hpmcounterh },
6608 [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_hpmcounterh },
6609 [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_hpmcounterh },
6610 [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_hpmcounterh },
6611
6612 [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", mctr32, read_hpmcounterh,
6613 write_mhpmcounterh },
6614 [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", mctr32, read_hpmcounterh,
6615 write_mhpmcounterh },
6616 [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", mctr32, read_hpmcounterh,
6617 write_mhpmcounterh },
6618 [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", mctr32, read_hpmcounterh,
6619 write_mhpmcounterh },
6620 [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", mctr32, read_hpmcounterh,
6621 write_mhpmcounterh },
6622 [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", mctr32, read_hpmcounterh,
6623 write_mhpmcounterh },
6624 [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", mctr32, read_hpmcounterh,
6625 write_mhpmcounterh },
6626 [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", mctr32, read_hpmcounterh,
6627 write_mhpmcounterh },
6628 [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", mctr32, read_hpmcounterh,
6629 write_mhpmcounterh },
6630 [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", mctr32, read_hpmcounterh,
6631 write_mhpmcounterh },
6632 [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", mctr32, read_hpmcounterh,
6633 write_mhpmcounterh },
6634 [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", mctr32, read_hpmcounterh,
6635 write_mhpmcounterh },
6636 [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", mctr32, read_hpmcounterh,
6637 write_mhpmcounterh },
6638 [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", mctr32, read_hpmcounterh,
6639 write_mhpmcounterh },
6640 [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", mctr32, read_hpmcounterh,
6641 write_mhpmcounterh },
6642 [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", mctr32, read_hpmcounterh,
6643 write_mhpmcounterh },
6644 [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", mctr32, read_hpmcounterh,
6645 write_mhpmcounterh },
6646 [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", mctr32, read_hpmcounterh,
6647 write_mhpmcounterh },
6648 [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", mctr32, read_hpmcounterh,
6649 write_mhpmcounterh },
6650 [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", mctr32, read_hpmcounterh,
6651 write_mhpmcounterh },
6652 [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", mctr32, read_hpmcounterh,
6653 write_mhpmcounterh },
6654 [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", mctr32, read_hpmcounterh,
6655 write_mhpmcounterh },
6656 [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", mctr32, read_hpmcounterh,
6657 write_mhpmcounterh },
6658 [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", mctr32, read_hpmcounterh,
6659 write_mhpmcounterh },
6660 [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", mctr32, read_hpmcounterh,
6661 write_mhpmcounterh },
6662 [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", mctr32, read_hpmcounterh,
6663 write_mhpmcounterh },
6664 [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", mctr32, read_hpmcounterh,
6665 write_mhpmcounterh },
6666 [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", mctr32, read_hpmcounterh,
6667 write_mhpmcounterh },
6668 [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", mctr32, read_hpmcounterh,
6669 write_mhpmcounterh },
6670 [CSR_SCOUNTOVF] = { "scountovf", sscofpmf, read_scountovf,
6671 .min_priv_ver = PRIV_VERSION_1_12_0 },
6672
6673 #endif /* !CONFIG_USER_ONLY */
6674 };
6675