1 /*
2 * RISC-V Control and Status Registers.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/timer.h"
23 #include "cpu.h"
24 #include "tcg/tcg-cpu.h"
25 #include "pmu.h"
26 #include "time_helper.h"
27 #include "exec/exec-all.h"
28 #include "exec/tb-flush.h"
29 #include "sysemu/cpu-timers.h"
30 #include "qemu/guest-random.h"
31 #include "qapi/error.h"
32
33 /* CSR function table public API */
riscv_get_csr_ops(int csrno,riscv_csr_operations * ops)34 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
35 {
36 *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)];
37 }
38
riscv_set_csr_ops(int csrno,riscv_csr_operations * ops)39 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
40 {
41 csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops;
42 }
43
44 /* Predicates */
45 #if !defined(CONFIG_USER_ONLY)
smstateen_acc_ok(CPURISCVState * env,int index,uint64_t bit)46 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit)
47 {
48 bool virt = env->virt_enabled;
49
50 if (env->priv == PRV_M || !riscv_cpu_cfg(env)->ext_smstateen) {
51 return RISCV_EXCP_NONE;
52 }
53
54 if (!(env->mstateen[index] & bit)) {
55 return RISCV_EXCP_ILLEGAL_INST;
56 }
57
58 if (virt) {
59 if (!(env->hstateen[index] & bit)) {
60 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
61 }
62
63 if (env->priv == PRV_U && !(env->sstateen[index] & bit)) {
64 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
65 }
66 }
67
68 if (env->priv == PRV_U && riscv_has_ext(env, RVS)) {
69 if (!(env->sstateen[index] & bit)) {
70 return RISCV_EXCP_ILLEGAL_INST;
71 }
72 }
73
74 return RISCV_EXCP_NONE;
75 }
76 #endif
77
fs(CPURISCVState * env,int csrno)78 static RISCVException fs(CPURISCVState *env, int csrno)
79 {
80 #if !defined(CONFIG_USER_ONLY)
81 if (!env->debugger && !riscv_cpu_fp_enabled(env) &&
82 !riscv_cpu_cfg(env)->ext_zfinx) {
83 return RISCV_EXCP_ILLEGAL_INST;
84 }
85
86 if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
87 return smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR);
88 }
89 #endif
90 return RISCV_EXCP_NONE;
91 }
92
vs(CPURISCVState * env,int csrno)93 static RISCVException vs(CPURISCVState *env, int csrno)
94 {
95 if (riscv_cpu_cfg(env)->ext_zve32x) {
96 #if !defined(CONFIG_USER_ONLY)
97 if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
98 return RISCV_EXCP_ILLEGAL_INST;
99 }
100 #endif
101 return RISCV_EXCP_NONE;
102 }
103 return RISCV_EXCP_ILLEGAL_INST;
104 }
105
ctr(CPURISCVState * env,int csrno)106 static RISCVException ctr(CPURISCVState *env, int csrno)
107 {
108 #if !defined(CONFIG_USER_ONLY)
109 RISCVCPU *cpu = env_archcpu(env);
110 int ctr_index;
111 target_ulong ctr_mask;
112 int base_csrno = CSR_CYCLE;
113 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
114
115 if (rv32 && csrno >= CSR_CYCLEH) {
116 /* Offset for RV32 hpmcounternh counters */
117 base_csrno += 0x80;
118 }
119 ctr_index = csrno - base_csrno;
120 ctr_mask = BIT(ctr_index);
121
122 if ((csrno >= CSR_CYCLE && csrno <= CSR_INSTRET) ||
123 (csrno >= CSR_CYCLEH && csrno <= CSR_INSTRETH)) {
124 if (!riscv_cpu_cfg(env)->ext_zicntr) {
125 return RISCV_EXCP_ILLEGAL_INST;
126 }
127
128 goto skip_ext_pmu_check;
129 }
130
131 if (!(cpu->pmu_avail_ctrs & ctr_mask)) {
132 /* No counter is enabled in PMU or the counter is out of range */
133 return RISCV_EXCP_ILLEGAL_INST;
134 }
135
136 skip_ext_pmu_check:
137
138 if (env->debugger) {
139 return RISCV_EXCP_NONE;
140 }
141
142 if (env->priv < PRV_M && !get_field(env->mcounteren, ctr_mask)) {
143 return RISCV_EXCP_ILLEGAL_INST;
144 }
145
146 if (env->virt_enabled) {
147 if (!get_field(env->hcounteren, ctr_mask) ||
148 (env->priv == PRV_U && !get_field(env->scounteren, ctr_mask))) {
149 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
150 }
151 }
152
153 if (riscv_has_ext(env, RVS) && env->priv == PRV_U &&
154 !get_field(env->scounteren, ctr_mask)) {
155 return RISCV_EXCP_ILLEGAL_INST;
156 }
157
158 #endif
159 return RISCV_EXCP_NONE;
160 }
161
ctr32(CPURISCVState * env,int csrno)162 static RISCVException ctr32(CPURISCVState *env, int csrno)
163 {
164 if (riscv_cpu_mxl(env) != MXL_RV32) {
165 return RISCV_EXCP_ILLEGAL_INST;
166 }
167
168 return ctr(env, csrno);
169 }
170
zcmt(CPURISCVState * env,int csrno)171 static RISCVException zcmt(CPURISCVState *env, int csrno)
172 {
173 if (!riscv_cpu_cfg(env)->ext_zcmt) {
174 return RISCV_EXCP_ILLEGAL_INST;
175 }
176
177 #if !defined(CONFIG_USER_ONLY)
178 RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_JVT);
179 if (ret != RISCV_EXCP_NONE) {
180 return ret;
181 }
182 #endif
183
184 return RISCV_EXCP_NONE;
185 }
186
cfi_ss(CPURISCVState * env,int csrno)187 static RISCVException cfi_ss(CPURISCVState *env, int csrno)
188 {
189 if (!env_archcpu(env)->cfg.ext_zicfiss) {
190 return RISCV_EXCP_ILLEGAL_INST;
191 }
192
193 /* if bcfi not active for current env, access to csr is illegal */
194 if (!cpu_get_bcfien(env)) {
195 #if !defined(CONFIG_USER_ONLY)
196 if (env->debugger) {
197 return RISCV_EXCP_NONE;
198 }
199 #endif
200 return RISCV_EXCP_ILLEGAL_INST;
201 }
202
203 return RISCV_EXCP_NONE;
204 }
205
206 #if !defined(CONFIG_USER_ONLY)
mctr(CPURISCVState * env,int csrno)207 static RISCVException mctr(CPURISCVState *env, int csrno)
208 {
209 RISCVCPU *cpu = env_archcpu(env);
210 uint32_t pmu_avail_ctrs = cpu->pmu_avail_ctrs;
211 int ctr_index;
212 int base_csrno = CSR_MHPMCOUNTER3;
213
214 if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) {
215 /* Offset for RV32 mhpmcounternh counters */
216 csrno -= 0x80;
217 }
218
219 g_assert(csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31);
220
221 ctr_index = csrno - base_csrno;
222 if ((BIT(ctr_index) & pmu_avail_ctrs >> 3) == 0) {
223 /* The PMU is not enabled or counter is out of range */
224 return RISCV_EXCP_ILLEGAL_INST;
225 }
226
227 return RISCV_EXCP_NONE;
228 }
229
mctr32(CPURISCVState * env,int csrno)230 static RISCVException mctr32(CPURISCVState *env, int csrno)
231 {
232 if (riscv_cpu_mxl(env) != MXL_RV32) {
233 return RISCV_EXCP_ILLEGAL_INST;
234 }
235
236 return mctr(env, csrno);
237 }
238
sscofpmf(CPURISCVState * env,int csrno)239 static RISCVException sscofpmf(CPURISCVState *env, int csrno)
240 {
241 if (!riscv_cpu_cfg(env)->ext_sscofpmf) {
242 return RISCV_EXCP_ILLEGAL_INST;
243 }
244
245 return RISCV_EXCP_NONE;
246 }
247
sscofpmf_32(CPURISCVState * env,int csrno)248 static RISCVException sscofpmf_32(CPURISCVState *env, int csrno)
249 {
250 if (riscv_cpu_mxl(env) != MXL_RV32) {
251 return RISCV_EXCP_ILLEGAL_INST;
252 }
253
254 return sscofpmf(env, csrno);
255 }
256
smcntrpmf(CPURISCVState * env,int csrno)257 static RISCVException smcntrpmf(CPURISCVState *env, int csrno)
258 {
259 if (!riscv_cpu_cfg(env)->ext_smcntrpmf) {
260 return RISCV_EXCP_ILLEGAL_INST;
261 }
262
263 return RISCV_EXCP_NONE;
264 }
265
smcntrpmf_32(CPURISCVState * env,int csrno)266 static RISCVException smcntrpmf_32(CPURISCVState *env, int csrno)
267 {
268 if (riscv_cpu_mxl(env) != MXL_RV32) {
269 return RISCV_EXCP_ILLEGAL_INST;
270 }
271
272 return smcntrpmf(env, csrno);
273 }
274
any(CPURISCVState * env,int csrno)275 static RISCVException any(CPURISCVState *env, int csrno)
276 {
277 return RISCV_EXCP_NONE;
278 }
279
any32(CPURISCVState * env,int csrno)280 static RISCVException any32(CPURISCVState *env, int csrno)
281 {
282 if (riscv_cpu_mxl(env) != MXL_RV32) {
283 return RISCV_EXCP_ILLEGAL_INST;
284 }
285
286 return any(env, csrno);
287
288 }
289
aia_any(CPURISCVState * env,int csrno)290 static RISCVException aia_any(CPURISCVState *env, int csrno)
291 {
292 if (!riscv_cpu_cfg(env)->ext_smaia) {
293 return RISCV_EXCP_ILLEGAL_INST;
294 }
295
296 return any(env, csrno);
297 }
298
aia_any32(CPURISCVState * env,int csrno)299 static RISCVException aia_any32(CPURISCVState *env, int csrno)
300 {
301 if (!riscv_cpu_cfg(env)->ext_smaia) {
302 return RISCV_EXCP_ILLEGAL_INST;
303 }
304
305 return any32(env, csrno);
306 }
307
smode(CPURISCVState * env,int csrno)308 static RISCVException smode(CPURISCVState *env, int csrno)
309 {
310 if (riscv_has_ext(env, RVS)) {
311 return RISCV_EXCP_NONE;
312 }
313
314 return RISCV_EXCP_ILLEGAL_INST;
315 }
316
smode32(CPURISCVState * env,int csrno)317 static RISCVException smode32(CPURISCVState *env, int csrno)
318 {
319 if (riscv_cpu_mxl(env) != MXL_RV32) {
320 return RISCV_EXCP_ILLEGAL_INST;
321 }
322
323 return smode(env, csrno);
324 }
325
aia_smode(CPURISCVState * env,int csrno)326 static RISCVException aia_smode(CPURISCVState *env, int csrno)
327 {
328 if (!riscv_cpu_cfg(env)->ext_ssaia) {
329 return RISCV_EXCP_ILLEGAL_INST;
330 }
331
332 return smode(env, csrno);
333 }
334
aia_smode32(CPURISCVState * env,int csrno)335 static RISCVException aia_smode32(CPURISCVState *env, int csrno)
336 {
337 if (!riscv_cpu_cfg(env)->ext_ssaia) {
338 return RISCV_EXCP_ILLEGAL_INST;
339 }
340
341 return smode32(env, csrno);
342 }
343
hmode(CPURISCVState * env,int csrno)344 static RISCVException hmode(CPURISCVState *env, int csrno)
345 {
346 if (riscv_has_ext(env, RVH)) {
347 return RISCV_EXCP_NONE;
348 }
349
350 return RISCV_EXCP_ILLEGAL_INST;
351 }
352
hmode32(CPURISCVState * env,int csrno)353 static RISCVException hmode32(CPURISCVState *env, int csrno)
354 {
355 if (riscv_cpu_mxl(env) != MXL_RV32) {
356 return RISCV_EXCP_ILLEGAL_INST;
357 }
358
359 return hmode(env, csrno);
360
361 }
362
umode(CPURISCVState * env,int csrno)363 static RISCVException umode(CPURISCVState *env, int csrno)
364 {
365 if (riscv_has_ext(env, RVU)) {
366 return RISCV_EXCP_NONE;
367 }
368
369 return RISCV_EXCP_ILLEGAL_INST;
370 }
371
umode32(CPURISCVState * env,int csrno)372 static RISCVException umode32(CPURISCVState *env, int csrno)
373 {
374 if (riscv_cpu_mxl(env) != MXL_RV32) {
375 return RISCV_EXCP_ILLEGAL_INST;
376 }
377
378 return umode(env, csrno);
379 }
380
mstateen(CPURISCVState * env,int csrno)381 static RISCVException mstateen(CPURISCVState *env, int csrno)
382 {
383 if (!riscv_cpu_cfg(env)->ext_smstateen) {
384 return RISCV_EXCP_ILLEGAL_INST;
385 }
386
387 return any(env, csrno);
388 }
389
hstateen_pred(CPURISCVState * env,int csrno,int base)390 static RISCVException hstateen_pred(CPURISCVState *env, int csrno, int base)
391 {
392 if (!riscv_cpu_cfg(env)->ext_smstateen) {
393 return RISCV_EXCP_ILLEGAL_INST;
394 }
395
396 RISCVException ret = hmode(env, csrno);
397 if (ret != RISCV_EXCP_NONE) {
398 return ret;
399 }
400
401 if (env->debugger) {
402 return RISCV_EXCP_NONE;
403 }
404
405 if (env->priv < PRV_M) {
406 if (!(env->mstateen[csrno - base] & SMSTATEEN_STATEEN)) {
407 return RISCV_EXCP_ILLEGAL_INST;
408 }
409 }
410
411 return RISCV_EXCP_NONE;
412 }
413
hstateen(CPURISCVState * env,int csrno)414 static RISCVException hstateen(CPURISCVState *env, int csrno)
415 {
416 return hstateen_pred(env, csrno, CSR_HSTATEEN0);
417 }
418
hstateenh(CPURISCVState * env,int csrno)419 static RISCVException hstateenh(CPURISCVState *env, int csrno)
420 {
421 return hstateen_pred(env, csrno, CSR_HSTATEEN0H);
422 }
423
sstateen(CPURISCVState * env,int csrno)424 static RISCVException sstateen(CPURISCVState *env, int csrno)
425 {
426 bool virt = env->virt_enabled;
427 int index = csrno - CSR_SSTATEEN0;
428
429 if (!riscv_cpu_cfg(env)->ext_smstateen) {
430 return RISCV_EXCP_ILLEGAL_INST;
431 }
432
433 RISCVException ret = smode(env, csrno);
434 if (ret != RISCV_EXCP_NONE) {
435 return ret;
436 }
437
438 if (env->debugger) {
439 return RISCV_EXCP_NONE;
440 }
441
442 if (env->priv < PRV_M) {
443 if (!(env->mstateen[index] & SMSTATEEN_STATEEN)) {
444 return RISCV_EXCP_ILLEGAL_INST;
445 }
446
447 if (virt) {
448 if (!(env->hstateen[index] & SMSTATEEN_STATEEN)) {
449 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
450 }
451 }
452 }
453
454 return RISCV_EXCP_NONE;
455 }
456
sstc(CPURISCVState * env,int csrno)457 static RISCVException sstc(CPURISCVState *env, int csrno)
458 {
459 bool hmode_check = false;
460
461 if (!riscv_cpu_cfg(env)->ext_sstc || !env->rdtime_fn) {
462 return RISCV_EXCP_ILLEGAL_INST;
463 }
464
465 if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) {
466 hmode_check = true;
467 }
468
469 RISCVException ret = hmode_check ? hmode(env, csrno) : smode(env, csrno);
470 if (ret != RISCV_EXCP_NONE) {
471 return ret;
472 }
473
474 if (env->debugger) {
475 return RISCV_EXCP_NONE;
476 }
477
478 if (env->priv == PRV_M) {
479 return RISCV_EXCP_NONE;
480 }
481
482 /*
483 * No need of separate function for rv32 as menvcfg stores both menvcfg
484 * menvcfgh for RV32.
485 */
486 if (!(get_field(env->mcounteren, COUNTEREN_TM) &&
487 get_field(env->menvcfg, MENVCFG_STCE))) {
488 return RISCV_EXCP_ILLEGAL_INST;
489 }
490
491 if (env->virt_enabled) {
492 if (!(get_field(env->hcounteren, COUNTEREN_TM) &&
493 get_field(env->henvcfg, HENVCFG_STCE))) {
494 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
495 }
496 }
497
498 return RISCV_EXCP_NONE;
499 }
500
sstc_32(CPURISCVState * env,int csrno)501 static RISCVException sstc_32(CPURISCVState *env, int csrno)
502 {
503 if (riscv_cpu_mxl(env) != MXL_RV32) {
504 return RISCV_EXCP_ILLEGAL_INST;
505 }
506
507 return sstc(env, csrno);
508 }
509
satp(CPURISCVState * env,int csrno)510 static RISCVException satp(CPURISCVState *env, int csrno)
511 {
512 if (env->priv == PRV_S && !env->virt_enabled &&
513 get_field(env->mstatus, MSTATUS_TVM)) {
514 return RISCV_EXCP_ILLEGAL_INST;
515 }
516 if (env->priv == PRV_S && env->virt_enabled &&
517 get_field(env->hstatus, HSTATUS_VTVM)) {
518 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
519 }
520
521 return smode(env, csrno);
522 }
523
hgatp(CPURISCVState * env,int csrno)524 static RISCVException hgatp(CPURISCVState *env, int csrno)
525 {
526 if (env->priv == PRV_S && !env->virt_enabled &&
527 get_field(env->mstatus, MSTATUS_TVM)) {
528 return RISCV_EXCP_ILLEGAL_INST;
529 }
530
531 return hmode(env, csrno);
532 }
533
534 /* Checks if PointerMasking registers could be accessed */
pointer_masking(CPURISCVState * env,int csrno)535 static RISCVException pointer_masking(CPURISCVState *env, int csrno)
536 {
537 /* Check if j-ext is present */
538 if (riscv_has_ext(env, RVJ)) {
539 return RISCV_EXCP_NONE;
540 }
541 return RISCV_EXCP_ILLEGAL_INST;
542 }
543
aia_hmode(CPURISCVState * env,int csrno)544 static RISCVException aia_hmode(CPURISCVState *env, int csrno)
545 {
546 if (!riscv_cpu_cfg(env)->ext_ssaia) {
547 return RISCV_EXCP_ILLEGAL_INST;
548 }
549
550 return hmode(env, csrno);
551 }
552
aia_hmode32(CPURISCVState * env,int csrno)553 static RISCVException aia_hmode32(CPURISCVState *env, int csrno)
554 {
555 if (!riscv_cpu_cfg(env)->ext_ssaia) {
556 return RISCV_EXCP_ILLEGAL_INST;
557 }
558
559 return hmode32(env, csrno);
560 }
561
pmp(CPURISCVState * env,int csrno)562 static RISCVException pmp(CPURISCVState *env, int csrno)
563 {
564 if (riscv_cpu_cfg(env)->pmp) {
565 if (csrno <= CSR_PMPCFG3) {
566 uint32_t reg_index = csrno - CSR_PMPCFG0;
567
568 /* TODO: RV128 restriction check */
569 if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) {
570 return RISCV_EXCP_ILLEGAL_INST;
571 }
572 }
573
574 return RISCV_EXCP_NONE;
575 }
576
577 return RISCV_EXCP_ILLEGAL_INST;
578 }
579
have_mseccfg(CPURISCVState * env,int csrno)580 static RISCVException have_mseccfg(CPURISCVState *env, int csrno)
581 {
582 if (riscv_cpu_cfg(env)->ext_smepmp) {
583 return RISCV_EXCP_NONE;
584 }
585 if (riscv_cpu_cfg(env)->ext_zkr) {
586 return RISCV_EXCP_NONE;
587 }
588
589 return RISCV_EXCP_ILLEGAL_INST;
590 }
591
debug(CPURISCVState * env,int csrno)592 static RISCVException debug(CPURISCVState *env, int csrno)
593 {
594 if (riscv_cpu_cfg(env)->debug) {
595 return RISCV_EXCP_NONE;
596 }
597
598 return RISCV_EXCP_ILLEGAL_INST;
599 }
600 #endif
601
seed(CPURISCVState * env,int csrno)602 static RISCVException seed(CPURISCVState *env, int csrno)
603 {
604 if (!riscv_cpu_cfg(env)->ext_zkr) {
605 return RISCV_EXCP_ILLEGAL_INST;
606 }
607
608 #if !defined(CONFIG_USER_ONLY)
609 if (env->debugger) {
610 return RISCV_EXCP_NONE;
611 }
612
613 /*
614 * With a CSR read-write instruction:
615 * 1) The seed CSR is always available in machine mode as normal.
616 * 2) Attempted access to seed from virtual modes VS and VU always raises
617 * an exception(virtual instruction exception only if mseccfg.sseed=1).
618 * 3) Without the corresponding access control bit set to 1, any attempted
619 * access to seed from U, S or HS modes will raise an illegal instruction
620 * exception.
621 */
622 if (env->priv == PRV_M) {
623 return RISCV_EXCP_NONE;
624 } else if (env->virt_enabled) {
625 if (env->mseccfg & MSECCFG_SSEED) {
626 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
627 } else {
628 return RISCV_EXCP_ILLEGAL_INST;
629 }
630 } else {
631 if (env->priv == PRV_S && (env->mseccfg & MSECCFG_SSEED)) {
632 return RISCV_EXCP_NONE;
633 } else if (env->priv == PRV_U && (env->mseccfg & MSECCFG_USEED)) {
634 return RISCV_EXCP_NONE;
635 } else {
636 return RISCV_EXCP_ILLEGAL_INST;
637 }
638 }
639 #else
640 return RISCV_EXCP_NONE;
641 #endif
642 }
643
644 /* zicfiss CSR_SSP read and write */
read_ssp(CPURISCVState * env,int csrno,target_ulong * val)645 static int read_ssp(CPURISCVState *env, int csrno, target_ulong *val)
646 {
647 *val = env->ssp;
648 return RISCV_EXCP_NONE;
649 }
650
write_ssp(CPURISCVState * env,int csrno,target_ulong val)651 static int write_ssp(CPURISCVState *env, int csrno, target_ulong val)
652 {
653 env->ssp = val;
654 return RISCV_EXCP_NONE;
655 }
656
657 /* User Floating-Point CSRs */
read_fflags(CPURISCVState * env,int csrno,target_ulong * val)658 static RISCVException read_fflags(CPURISCVState *env, int csrno,
659 target_ulong *val)
660 {
661 *val = riscv_cpu_get_fflags(env);
662 return RISCV_EXCP_NONE;
663 }
664
write_fflags(CPURISCVState * env,int csrno,target_ulong val)665 static RISCVException write_fflags(CPURISCVState *env, int csrno,
666 target_ulong val)
667 {
668 #if !defined(CONFIG_USER_ONLY)
669 if (riscv_has_ext(env, RVF)) {
670 env->mstatus |= MSTATUS_FS;
671 }
672 #endif
673 riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT));
674 return RISCV_EXCP_NONE;
675 }
676
read_frm(CPURISCVState * env,int csrno,target_ulong * val)677 static RISCVException read_frm(CPURISCVState *env, int csrno,
678 target_ulong *val)
679 {
680 *val = env->frm;
681 return RISCV_EXCP_NONE;
682 }
683
write_frm(CPURISCVState * env,int csrno,target_ulong val)684 static RISCVException write_frm(CPURISCVState *env, int csrno,
685 target_ulong val)
686 {
687 #if !defined(CONFIG_USER_ONLY)
688 if (riscv_has_ext(env, RVF)) {
689 env->mstatus |= MSTATUS_FS;
690 }
691 #endif
692 env->frm = val & (FSR_RD >> FSR_RD_SHIFT);
693 return RISCV_EXCP_NONE;
694 }
695
read_fcsr(CPURISCVState * env,int csrno,target_ulong * val)696 static RISCVException read_fcsr(CPURISCVState *env, int csrno,
697 target_ulong *val)
698 {
699 *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT)
700 | (env->frm << FSR_RD_SHIFT);
701 return RISCV_EXCP_NONE;
702 }
703
write_fcsr(CPURISCVState * env,int csrno,target_ulong val)704 static RISCVException write_fcsr(CPURISCVState *env, int csrno,
705 target_ulong val)
706 {
707 #if !defined(CONFIG_USER_ONLY)
708 if (riscv_has_ext(env, RVF)) {
709 env->mstatus |= MSTATUS_FS;
710 }
711 #endif
712 env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
713 riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
714 return RISCV_EXCP_NONE;
715 }
716
read_vtype(CPURISCVState * env,int csrno,target_ulong * val)717 static RISCVException read_vtype(CPURISCVState *env, int csrno,
718 target_ulong *val)
719 {
720 uint64_t vill;
721 switch (env->xl) {
722 case MXL_RV32:
723 vill = (uint32_t)env->vill << 31;
724 break;
725 case MXL_RV64:
726 vill = (uint64_t)env->vill << 63;
727 break;
728 default:
729 g_assert_not_reached();
730 }
731 *val = (target_ulong)vill | env->vtype;
732 return RISCV_EXCP_NONE;
733 }
734
read_vl(CPURISCVState * env,int csrno,target_ulong * val)735 static RISCVException read_vl(CPURISCVState *env, int csrno,
736 target_ulong *val)
737 {
738 *val = env->vl;
739 return RISCV_EXCP_NONE;
740 }
741
read_vlenb(CPURISCVState * env,int csrno,target_ulong * val)742 static RISCVException read_vlenb(CPURISCVState *env, int csrno,
743 target_ulong *val)
744 {
745 *val = riscv_cpu_cfg(env)->vlenb;
746 return RISCV_EXCP_NONE;
747 }
748
read_vxrm(CPURISCVState * env,int csrno,target_ulong * val)749 static RISCVException read_vxrm(CPURISCVState *env, int csrno,
750 target_ulong *val)
751 {
752 *val = env->vxrm;
753 return RISCV_EXCP_NONE;
754 }
755
write_vxrm(CPURISCVState * env,int csrno,target_ulong val)756 static RISCVException write_vxrm(CPURISCVState *env, int csrno,
757 target_ulong val)
758 {
759 #if !defined(CONFIG_USER_ONLY)
760 env->mstatus |= MSTATUS_VS;
761 #endif
762 env->vxrm = val;
763 return RISCV_EXCP_NONE;
764 }
765
read_vxsat(CPURISCVState * env,int csrno,target_ulong * val)766 static RISCVException read_vxsat(CPURISCVState *env, int csrno,
767 target_ulong *val)
768 {
769 *val = env->vxsat & BIT(0);
770 return RISCV_EXCP_NONE;
771 }
772
write_vxsat(CPURISCVState * env,int csrno,target_ulong val)773 static RISCVException write_vxsat(CPURISCVState *env, int csrno,
774 target_ulong val)
775 {
776 #if !defined(CONFIG_USER_ONLY)
777 env->mstatus |= MSTATUS_VS;
778 #endif
779 env->vxsat = val & BIT(0);
780 return RISCV_EXCP_NONE;
781 }
782
read_vstart(CPURISCVState * env,int csrno,target_ulong * val)783 static RISCVException read_vstart(CPURISCVState *env, int csrno,
784 target_ulong *val)
785 {
786 *val = env->vstart;
787 return RISCV_EXCP_NONE;
788 }
789
write_vstart(CPURISCVState * env,int csrno,target_ulong val)790 static RISCVException write_vstart(CPURISCVState *env, int csrno,
791 target_ulong val)
792 {
793 #if !defined(CONFIG_USER_ONLY)
794 env->mstatus |= MSTATUS_VS;
795 #endif
796 /*
797 * The vstart CSR is defined to have only enough writable bits
798 * to hold the largest element index, i.e. lg2(VLEN) bits.
799 */
800 env->vstart = val & ~(~0ULL << ctzl(riscv_cpu_cfg(env)->vlenb << 3));
801 return RISCV_EXCP_NONE;
802 }
803
read_vcsr(CPURISCVState * env,int csrno,target_ulong * val)804 static RISCVException read_vcsr(CPURISCVState *env, int csrno,
805 target_ulong *val)
806 {
807 *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT);
808 return RISCV_EXCP_NONE;
809 }
810
write_vcsr(CPURISCVState * env,int csrno,target_ulong val)811 static RISCVException write_vcsr(CPURISCVState *env, int csrno,
812 target_ulong val)
813 {
814 #if !defined(CONFIG_USER_ONLY)
815 env->mstatus |= MSTATUS_VS;
816 #endif
817 env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT;
818 env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT;
819 return RISCV_EXCP_NONE;
820 }
821
822 #if defined(CONFIG_USER_ONLY)
823 /* User Timers and Counters */
get_ticks(bool shift)824 static target_ulong get_ticks(bool shift)
825 {
826 int64_t val = cpu_get_host_ticks();
827 target_ulong result = shift ? val >> 32 : val;
828
829 return result;
830 }
831
read_time(CPURISCVState * env,int csrno,target_ulong * val)832 static RISCVException read_time(CPURISCVState *env, int csrno,
833 target_ulong *val)
834 {
835 *val = cpu_get_host_ticks();
836 return RISCV_EXCP_NONE;
837 }
838
read_timeh(CPURISCVState * env,int csrno,target_ulong * val)839 static RISCVException read_timeh(CPURISCVState *env, int csrno,
840 target_ulong *val)
841 {
842 *val = cpu_get_host_ticks() >> 32;
843 return RISCV_EXCP_NONE;
844 }
845
read_hpmcounter(CPURISCVState * env,int csrno,target_ulong * val)846 static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
847 target_ulong *val)
848 {
849 *val = get_ticks(false);
850 return RISCV_EXCP_NONE;
851 }
852
read_hpmcounterh(CPURISCVState * env,int csrno,target_ulong * val)853 static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
854 target_ulong *val)
855 {
856 *val = get_ticks(true);
857 return RISCV_EXCP_NONE;
858 }
859
860 #else /* CONFIG_USER_ONLY */
861
read_mcyclecfg(CPURISCVState * env,int csrno,target_ulong * val)862 static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno,
863 target_ulong *val)
864 {
865 *val = env->mcyclecfg;
866 return RISCV_EXCP_NONE;
867 }
868
write_mcyclecfg(CPURISCVState * env,int csrno,target_ulong val)869 static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno,
870 target_ulong val)
871 {
872 uint64_t inh_avail_mask;
873
874 if (riscv_cpu_mxl(env) == MXL_RV32) {
875 env->mcyclecfg = val;
876 } else {
877 /* Set xINH fields if priv mode supported */
878 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MCYCLECFG_BIT_MINH;
879 inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFG_BIT_UINH : 0;
880 inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFG_BIT_SINH : 0;
881 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
882 riscv_has_ext(env, RVU)) ? MCYCLECFG_BIT_VUINH : 0;
883 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
884 riscv_has_ext(env, RVS)) ? MCYCLECFG_BIT_VSINH : 0;
885 env->mcyclecfg = val & inh_avail_mask;
886 }
887
888 return RISCV_EXCP_NONE;
889 }
890
read_mcyclecfgh(CPURISCVState * env,int csrno,target_ulong * val)891 static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno,
892 target_ulong *val)
893 {
894 *val = env->mcyclecfgh;
895 return RISCV_EXCP_NONE;
896 }
897
write_mcyclecfgh(CPURISCVState * env,int csrno,target_ulong val)898 static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno,
899 target_ulong val)
900 {
901 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
902 MCYCLECFGH_BIT_MINH);
903
904 /* Set xINH fields if priv mode supported */
905 inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFGH_BIT_UINH : 0;
906 inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFGH_BIT_SINH : 0;
907 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
908 riscv_has_ext(env, RVU)) ? MCYCLECFGH_BIT_VUINH : 0;
909 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
910 riscv_has_ext(env, RVS)) ? MCYCLECFGH_BIT_VSINH : 0;
911
912 env->mcyclecfgh = val & inh_avail_mask;
913 return RISCV_EXCP_NONE;
914 }
915
read_minstretcfg(CPURISCVState * env,int csrno,target_ulong * val)916 static RISCVException read_minstretcfg(CPURISCVState *env, int csrno,
917 target_ulong *val)
918 {
919 *val = env->minstretcfg;
920 return RISCV_EXCP_NONE;
921 }
922
write_minstretcfg(CPURISCVState * env,int csrno,target_ulong val)923 static RISCVException write_minstretcfg(CPURISCVState *env, int csrno,
924 target_ulong val)
925 {
926 uint64_t inh_avail_mask;
927
928 if (riscv_cpu_mxl(env) == MXL_RV32) {
929 env->minstretcfg = val;
930 } else {
931 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MINSTRETCFG_BIT_MINH;
932 inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFG_BIT_UINH : 0;
933 inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFG_BIT_SINH : 0;
934 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
935 riscv_has_ext(env, RVU)) ? MINSTRETCFG_BIT_VUINH : 0;
936 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
937 riscv_has_ext(env, RVS)) ? MINSTRETCFG_BIT_VSINH : 0;
938 env->minstretcfg = val & inh_avail_mask;
939 }
940 return RISCV_EXCP_NONE;
941 }
942
read_minstretcfgh(CPURISCVState * env,int csrno,target_ulong * val)943 static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno,
944 target_ulong *val)
945 {
946 *val = env->minstretcfgh;
947 return RISCV_EXCP_NONE;
948 }
949
write_minstretcfgh(CPURISCVState * env,int csrno,target_ulong val)950 static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno,
951 target_ulong val)
952 {
953 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
954 MINSTRETCFGH_BIT_MINH);
955
956 inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFGH_BIT_UINH : 0;
957 inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFGH_BIT_SINH : 0;
958 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
959 riscv_has_ext(env, RVU)) ? MINSTRETCFGH_BIT_VUINH : 0;
960 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
961 riscv_has_ext(env, RVS)) ? MINSTRETCFGH_BIT_VSINH : 0;
962
963 env->minstretcfgh = val & inh_avail_mask;
964 return RISCV_EXCP_NONE;
965 }
966
read_mhpmevent(CPURISCVState * env,int csrno,target_ulong * val)967 static RISCVException read_mhpmevent(CPURISCVState *env, int csrno,
968 target_ulong *val)
969 {
970 int evt_index = csrno - CSR_MCOUNTINHIBIT;
971
972 *val = env->mhpmevent_val[evt_index];
973
974 return RISCV_EXCP_NONE;
975 }
976
write_mhpmevent(CPURISCVState * env,int csrno,target_ulong val)977 static RISCVException write_mhpmevent(CPURISCVState *env, int csrno,
978 target_ulong val)
979 {
980 int evt_index = csrno - CSR_MCOUNTINHIBIT;
981 uint64_t mhpmevt_val = val;
982 uint64_t inh_avail_mask;
983
984 if (riscv_cpu_mxl(env) == MXL_RV32) {
985 env->mhpmevent_val[evt_index] = val;
986 mhpmevt_val = mhpmevt_val |
987 ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
988 } else {
989 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MHPMEVENT_BIT_MINH;
990 inh_avail_mask |= riscv_has_ext(env, RVU) ? MHPMEVENT_BIT_UINH : 0;
991 inh_avail_mask |= riscv_has_ext(env, RVS) ? MHPMEVENT_BIT_SINH : 0;
992 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
993 riscv_has_ext(env, RVU)) ? MHPMEVENT_BIT_VUINH : 0;
994 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
995 riscv_has_ext(env, RVS)) ? MHPMEVENT_BIT_VSINH : 0;
996 mhpmevt_val = val & inh_avail_mask;
997 env->mhpmevent_val[evt_index] = mhpmevt_val;
998 }
999
1000 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1001
1002 return RISCV_EXCP_NONE;
1003 }
1004
read_mhpmeventh(CPURISCVState * env,int csrno,target_ulong * val)1005 static RISCVException read_mhpmeventh(CPURISCVState *env, int csrno,
1006 target_ulong *val)
1007 {
1008 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
1009
1010 *val = env->mhpmeventh_val[evt_index];
1011
1012 return RISCV_EXCP_NONE;
1013 }
1014
write_mhpmeventh(CPURISCVState * env,int csrno,target_ulong val)1015 static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno,
1016 target_ulong val)
1017 {
1018 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
1019 uint64_t mhpmevth_val;
1020 uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
1021 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
1022 MHPMEVENTH_BIT_MINH);
1023
1024 inh_avail_mask |= riscv_has_ext(env, RVU) ? MHPMEVENTH_BIT_UINH : 0;
1025 inh_avail_mask |= riscv_has_ext(env, RVS) ? MHPMEVENTH_BIT_SINH : 0;
1026 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1027 riscv_has_ext(env, RVU)) ? MHPMEVENTH_BIT_VUINH : 0;
1028 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1029 riscv_has_ext(env, RVS)) ? MHPMEVENTH_BIT_VSINH : 0;
1030
1031 mhpmevth_val = val & inh_avail_mask;
1032 mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
1033 env->mhpmeventh_val[evt_index] = mhpmevth_val;
1034
1035 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1036
1037 return RISCV_EXCP_NONE;
1038 }
1039
riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState * env,int counter_idx,bool upper_half)1040 static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env,
1041 int counter_idx,
1042 bool upper_half)
1043 {
1044 int inst = riscv_pmu_ctr_monitor_instructions(env, counter_idx);
1045 uint64_t *counter_arr_virt = env->pmu_fixed_ctrs[inst].counter_virt;
1046 uint64_t *counter_arr = env->pmu_fixed_ctrs[inst].counter;
1047 target_ulong result = 0;
1048 uint64_t curr_val = 0;
1049 uint64_t cfg_val = 0;
1050
1051 if (counter_idx == 0) {
1052 cfg_val = upper_half ? ((uint64_t)env->mcyclecfgh << 32) :
1053 env->mcyclecfg;
1054 } else if (counter_idx == 2) {
1055 cfg_val = upper_half ? ((uint64_t)env->minstretcfgh << 32) :
1056 env->minstretcfg;
1057 } else {
1058 cfg_val = upper_half ?
1059 ((uint64_t)env->mhpmeventh_val[counter_idx] << 32) :
1060 env->mhpmevent_val[counter_idx];
1061 cfg_val &= MHPMEVENT_FILTER_MASK;
1062 }
1063
1064 if (!cfg_val) {
1065 if (icount_enabled()) {
1066 curr_val = inst ? icount_get_raw() : icount_get();
1067 } else {
1068 curr_val = cpu_get_host_ticks();
1069 }
1070
1071 goto done;
1072 }
1073
1074 /* Update counter before reading. */
1075 riscv_pmu_update_fixed_ctrs(env, env->priv, env->virt_enabled);
1076
1077 if (!(cfg_val & MCYCLECFG_BIT_MINH)) {
1078 curr_val += counter_arr[PRV_M];
1079 }
1080
1081 if (!(cfg_val & MCYCLECFG_BIT_SINH)) {
1082 curr_val += counter_arr[PRV_S];
1083 }
1084
1085 if (!(cfg_val & MCYCLECFG_BIT_UINH)) {
1086 curr_val += counter_arr[PRV_U];
1087 }
1088
1089 if (!(cfg_val & MCYCLECFG_BIT_VSINH)) {
1090 curr_val += counter_arr_virt[PRV_S];
1091 }
1092
1093 if (!(cfg_val & MCYCLECFG_BIT_VUINH)) {
1094 curr_val += counter_arr_virt[PRV_U];
1095 }
1096
1097 done:
1098 if (riscv_cpu_mxl(env) == MXL_RV32) {
1099 result = upper_half ? curr_val >> 32 : curr_val;
1100 } else {
1101 result = curr_val;
1102 }
1103
1104 return result;
1105 }
1106
write_mhpmcounter(CPURISCVState * env,int csrno,target_ulong val)1107 static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
1108 target_ulong val)
1109 {
1110 int ctr_idx = csrno - CSR_MCYCLE;
1111 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1112 uint64_t mhpmctr_val = val;
1113
1114 counter->mhpmcounter_val = val;
1115 if (!get_field(env->mcountinhibit, BIT(ctr_idx)) &&
1116 (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1117 riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) {
1118 counter->mhpmcounter_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
1119 ctr_idx, false);
1120 if (ctr_idx > 2) {
1121 if (riscv_cpu_mxl(env) == MXL_RV32) {
1122 mhpmctr_val = mhpmctr_val |
1123 ((uint64_t)counter->mhpmcounterh_val << 32);
1124 }
1125 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
1126 }
1127 } else {
1128 /* Other counters can keep incrementing from the given value */
1129 counter->mhpmcounter_prev = val;
1130 }
1131
1132 return RISCV_EXCP_NONE;
1133 }
1134
write_mhpmcounterh(CPURISCVState * env,int csrno,target_ulong val)1135 static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
1136 target_ulong val)
1137 {
1138 int ctr_idx = csrno - CSR_MCYCLEH;
1139 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1140 uint64_t mhpmctr_val = counter->mhpmcounter_val;
1141 uint64_t mhpmctrh_val = val;
1142
1143 counter->mhpmcounterh_val = val;
1144 mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
1145 if (!get_field(env->mcountinhibit, BIT(ctr_idx)) &&
1146 (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1147 riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) {
1148 counter->mhpmcounterh_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
1149 ctr_idx, true);
1150 if (ctr_idx > 2) {
1151 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
1152 }
1153 } else {
1154 counter->mhpmcounterh_prev = val;
1155 }
1156
1157 return RISCV_EXCP_NONE;
1158 }
1159
riscv_pmu_read_ctr(CPURISCVState * env,target_ulong * val,bool upper_half,uint32_t ctr_idx)1160 RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
1161 bool upper_half, uint32_t ctr_idx)
1162 {
1163 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1164 target_ulong ctr_prev = upper_half ? counter->mhpmcounterh_prev :
1165 counter->mhpmcounter_prev;
1166 target_ulong ctr_val = upper_half ? counter->mhpmcounterh_val :
1167 counter->mhpmcounter_val;
1168
1169 if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
1170 /*
1171 * Counter should not increment if inhibit bit is set. Just return the
1172 * current counter value.
1173 */
1174 *val = ctr_val;
1175 return RISCV_EXCP_NONE;
1176 }
1177
1178 /*
1179 * The kernel computes the perf delta by subtracting the current value from
1180 * the value it initialized previously (ctr_val).
1181 */
1182 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1183 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
1184 *val = riscv_pmu_ctr_get_fixed_counters_val(env, ctr_idx, upper_half) -
1185 ctr_prev + ctr_val;
1186 } else {
1187 *val = ctr_val;
1188 }
1189
1190 return RISCV_EXCP_NONE;
1191 }
1192
read_hpmcounter(CPURISCVState * env,int csrno,target_ulong * val)1193 static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
1194 target_ulong *val)
1195 {
1196 uint16_t ctr_index;
1197
1198 if (csrno >= CSR_MCYCLE && csrno <= CSR_MHPMCOUNTER31) {
1199 ctr_index = csrno - CSR_MCYCLE;
1200 } else if (csrno >= CSR_CYCLE && csrno <= CSR_HPMCOUNTER31) {
1201 ctr_index = csrno - CSR_CYCLE;
1202 } else {
1203 return RISCV_EXCP_ILLEGAL_INST;
1204 }
1205
1206 return riscv_pmu_read_ctr(env, val, false, ctr_index);
1207 }
1208
read_hpmcounterh(CPURISCVState * env,int csrno,target_ulong * val)1209 static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
1210 target_ulong *val)
1211 {
1212 uint16_t ctr_index;
1213
1214 if (csrno >= CSR_MCYCLEH && csrno <= CSR_MHPMCOUNTER31H) {
1215 ctr_index = csrno - CSR_MCYCLEH;
1216 } else if (csrno >= CSR_CYCLEH && csrno <= CSR_HPMCOUNTER31H) {
1217 ctr_index = csrno - CSR_CYCLEH;
1218 } else {
1219 return RISCV_EXCP_ILLEGAL_INST;
1220 }
1221
1222 return riscv_pmu_read_ctr(env, val, true, ctr_index);
1223 }
1224
read_scountovf(CPURISCVState * env,int csrno,target_ulong * val)1225 static RISCVException read_scountovf(CPURISCVState *env, int csrno,
1226 target_ulong *val)
1227 {
1228 int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT;
1229 int i;
1230 *val = 0;
1231 target_ulong *mhpm_evt_val;
1232 uint64_t of_bit_mask;
1233
1234 if (riscv_cpu_mxl(env) == MXL_RV32) {
1235 mhpm_evt_val = env->mhpmeventh_val;
1236 of_bit_mask = MHPMEVENTH_BIT_OF;
1237 } else {
1238 mhpm_evt_val = env->mhpmevent_val;
1239 of_bit_mask = MHPMEVENT_BIT_OF;
1240 }
1241
1242 for (i = mhpmevt_start; i < RV_MAX_MHPMEVENTS; i++) {
1243 if ((get_field(env->mcounteren, BIT(i))) &&
1244 (mhpm_evt_val[i] & of_bit_mask)) {
1245 *val |= BIT(i);
1246 }
1247 }
1248
1249 return RISCV_EXCP_NONE;
1250 }
1251
read_time(CPURISCVState * env,int csrno,target_ulong * val)1252 static RISCVException read_time(CPURISCVState *env, int csrno,
1253 target_ulong *val)
1254 {
1255 uint64_t delta = env->virt_enabled ? env->htimedelta : 0;
1256
1257 if (!env->rdtime_fn) {
1258 return RISCV_EXCP_ILLEGAL_INST;
1259 }
1260
1261 *val = env->rdtime_fn(env->rdtime_fn_arg) + delta;
1262 return RISCV_EXCP_NONE;
1263 }
1264
read_timeh(CPURISCVState * env,int csrno,target_ulong * val)1265 static RISCVException read_timeh(CPURISCVState *env, int csrno,
1266 target_ulong *val)
1267 {
1268 uint64_t delta = env->virt_enabled ? env->htimedelta : 0;
1269
1270 if (!env->rdtime_fn) {
1271 return RISCV_EXCP_ILLEGAL_INST;
1272 }
1273
1274 *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32;
1275 return RISCV_EXCP_NONE;
1276 }
1277
read_vstimecmp(CPURISCVState * env,int csrno,target_ulong * val)1278 static RISCVException read_vstimecmp(CPURISCVState *env, int csrno,
1279 target_ulong *val)
1280 {
1281 *val = env->vstimecmp;
1282
1283 return RISCV_EXCP_NONE;
1284 }
1285
read_vstimecmph(CPURISCVState * env,int csrno,target_ulong * val)1286 static RISCVException read_vstimecmph(CPURISCVState *env, int csrno,
1287 target_ulong *val)
1288 {
1289 *val = env->vstimecmp >> 32;
1290
1291 return RISCV_EXCP_NONE;
1292 }
1293
write_vstimecmp(CPURISCVState * env,int csrno,target_ulong val)1294 static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
1295 target_ulong val)
1296 {
1297 if (riscv_cpu_mxl(env) == MXL_RV32) {
1298 env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val);
1299 } else {
1300 env->vstimecmp = val;
1301 }
1302
1303 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
1304 env->htimedelta, MIP_VSTIP);
1305
1306 return RISCV_EXCP_NONE;
1307 }
1308
write_vstimecmph(CPURISCVState * env,int csrno,target_ulong val)1309 static RISCVException write_vstimecmph(CPURISCVState *env, int csrno,
1310 target_ulong val)
1311 {
1312 env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val);
1313 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
1314 env->htimedelta, MIP_VSTIP);
1315
1316 return RISCV_EXCP_NONE;
1317 }
1318
read_stimecmp(CPURISCVState * env,int csrno,target_ulong * val)1319 static RISCVException read_stimecmp(CPURISCVState *env, int csrno,
1320 target_ulong *val)
1321 {
1322 if (env->virt_enabled) {
1323 *val = env->vstimecmp;
1324 } else {
1325 *val = env->stimecmp;
1326 }
1327
1328 return RISCV_EXCP_NONE;
1329 }
1330
read_stimecmph(CPURISCVState * env,int csrno,target_ulong * val)1331 static RISCVException read_stimecmph(CPURISCVState *env, int csrno,
1332 target_ulong *val)
1333 {
1334 if (env->virt_enabled) {
1335 *val = env->vstimecmp >> 32;
1336 } else {
1337 *val = env->stimecmp >> 32;
1338 }
1339
1340 return RISCV_EXCP_NONE;
1341 }
1342
write_stimecmp(CPURISCVState * env,int csrno,target_ulong val)1343 static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
1344 target_ulong val)
1345 {
1346 if (env->virt_enabled) {
1347 if (env->hvictl & HVICTL_VTI) {
1348 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1349 }
1350 return write_vstimecmp(env, csrno, val);
1351 }
1352
1353 if (riscv_cpu_mxl(env) == MXL_RV32) {
1354 env->stimecmp = deposit64(env->stimecmp, 0, 32, (uint64_t)val);
1355 } else {
1356 env->stimecmp = val;
1357 }
1358
1359 riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP);
1360
1361 return RISCV_EXCP_NONE;
1362 }
1363
write_stimecmph(CPURISCVState * env,int csrno,target_ulong val)1364 static RISCVException write_stimecmph(CPURISCVState *env, int csrno,
1365 target_ulong val)
1366 {
1367 if (env->virt_enabled) {
1368 if (env->hvictl & HVICTL_VTI) {
1369 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1370 }
1371 return write_vstimecmph(env, csrno, val);
1372 }
1373
1374 env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val);
1375 riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP);
1376
1377 return RISCV_EXCP_NONE;
1378 }
1379
1380 #define VSTOPI_NUM_SRCS 5
1381
1382 /*
1383 * All core local interrupts except the fixed ones 0:12. This macro is for
1384 * virtual interrupts logic so please don't change this to avoid messing up
1385 * the whole support, For reference see AIA spec: `5.3 Interrupt filtering and
1386 * virtual interrupts for supervisor level` and `6.3.2 Virtual interrupts for
1387 * VS level`.
1388 */
1389 #define LOCAL_INTERRUPTS (~0x1FFFULL)
1390
1391 static const uint64_t delegable_ints =
1392 S_MODE_INTERRUPTS | VS_MODE_INTERRUPTS | MIP_LCOFIP;
1393 static const uint64_t vs_delegable_ints =
1394 (VS_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & ~MIP_LCOFIP;
1395 static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
1396 HS_MODE_INTERRUPTS | LOCAL_INTERRUPTS;
1397 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
1398 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
1399 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
1400 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \
1401 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \
1402 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \
1403 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \
1404 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \
1405 (1ULL << (RISCV_EXCP_U_ECALL)) | \
1406 (1ULL << (RISCV_EXCP_S_ECALL)) | \
1407 (1ULL << (RISCV_EXCP_VS_ECALL)) | \
1408 (1ULL << (RISCV_EXCP_M_ECALL)) | \
1409 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
1410 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
1411 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
1412 (1ULL << (RISCV_EXCP_SW_CHECK)) | \
1413 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
1414 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
1415 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
1416 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)))
1417 static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
1418 ~((1ULL << (RISCV_EXCP_S_ECALL)) |
1419 (1ULL << (RISCV_EXCP_VS_ECALL)) |
1420 (1ULL << (RISCV_EXCP_M_ECALL)) |
1421 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) |
1422 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) |
1423 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) |
1424 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)));
1425 static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
1426 SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
1427 SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
1428
1429 /*
1430 * Spec allows for bits 13:63 to be either read-only or writable.
1431 * So far we have interrupt LCOFIP in that region which is writable.
1432 *
1433 * Also, spec allows to inject virtual interrupts in this region even
1434 * without any hardware interrupts for that interrupt number.
1435 *
1436 * For now interrupt in 13:63 region are all kept writable. 13 being
1437 * LCOFIP and 14:63 being virtual only. Change this in future if we
1438 * introduce more interrupts that are not writable.
1439 */
1440
1441 /* Bit STIP can be an alias of mip.STIP that's why it's writable in mvip. */
1442 static const uint64_t mvip_writable_mask = MIP_SSIP | MIP_STIP | MIP_SEIP |
1443 LOCAL_INTERRUPTS;
1444 static const uint64_t mvien_writable_mask = MIP_SSIP | MIP_SEIP |
1445 LOCAL_INTERRUPTS;
1446
1447 static const uint64_t sip_writable_mask = SIP_SSIP | LOCAL_INTERRUPTS;
1448 static const uint64_t hip_writable_mask = MIP_VSSIP;
1449 static const uint64_t hvip_writable_mask = MIP_VSSIP | MIP_VSTIP |
1450 MIP_VSEIP | LOCAL_INTERRUPTS;
1451 static const uint64_t hvien_writable_mask = LOCAL_INTERRUPTS;
1452
1453 static const uint64_t vsip_writable_mask = MIP_VSSIP | LOCAL_INTERRUPTS;
1454
1455 const bool valid_vm_1_10_32[16] = {
1456 [VM_1_10_MBARE] = true,
1457 [VM_1_10_SV32] = true
1458 };
1459
1460 const bool valid_vm_1_10_64[16] = {
1461 [VM_1_10_MBARE] = true,
1462 [VM_1_10_SV39] = true,
1463 [VM_1_10_SV48] = true,
1464 [VM_1_10_SV57] = true
1465 };
1466
1467 /* Machine Information Registers */
read_zero(CPURISCVState * env,int csrno,target_ulong * val)1468 static RISCVException read_zero(CPURISCVState *env, int csrno,
1469 target_ulong *val)
1470 {
1471 *val = 0;
1472 return RISCV_EXCP_NONE;
1473 }
1474
write_ignore(CPURISCVState * env,int csrno,target_ulong val)1475 static RISCVException write_ignore(CPURISCVState *env, int csrno,
1476 target_ulong val)
1477 {
1478 return RISCV_EXCP_NONE;
1479 }
1480
read_mvendorid(CPURISCVState * env,int csrno,target_ulong * val)1481 static RISCVException read_mvendorid(CPURISCVState *env, int csrno,
1482 target_ulong *val)
1483 {
1484 *val = riscv_cpu_cfg(env)->mvendorid;
1485 return RISCV_EXCP_NONE;
1486 }
1487
read_marchid(CPURISCVState * env,int csrno,target_ulong * val)1488 static RISCVException read_marchid(CPURISCVState *env, int csrno,
1489 target_ulong *val)
1490 {
1491 *val = riscv_cpu_cfg(env)->marchid;
1492 return RISCV_EXCP_NONE;
1493 }
1494
read_mimpid(CPURISCVState * env,int csrno,target_ulong * val)1495 static RISCVException read_mimpid(CPURISCVState *env, int csrno,
1496 target_ulong *val)
1497 {
1498 *val = riscv_cpu_cfg(env)->mimpid;
1499 return RISCV_EXCP_NONE;
1500 }
1501
read_mhartid(CPURISCVState * env,int csrno,target_ulong * val)1502 static RISCVException read_mhartid(CPURISCVState *env, int csrno,
1503 target_ulong *val)
1504 {
1505 *val = env->mhartid;
1506 return RISCV_EXCP_NONE;
1507 }
1508
1509 /* Machine Trap Setup */
1510
1511 /* We do not store SD explicitly, only compute it on demand. */
add_status_sd(RISCVMXL xl,uint64_t status)1512 static uint64_t add_status_sd(RISCVMXL xl, uint64_t status)
1513 {
1514 if ((status & MSTATUS_FS) == MSTATUS_FS ||
1515 (status & MSTATUS_VS) == MSTATUS_VS ||
1516 (status & MSTATUS_XS) == MSTATUS_XS) {
1517 switch (xl) {
1518 case MXL_RV32:
1519 return status | MSTATUS32_SD;
1520 case MXL_RV64:
1521 return status | MSTATUS64_SD;
1522 case MXL_RV128:
1523 return MSTATUSH128_SD;
1524 default:
1525 g_assert_not_reached();
1526 }
1527 }
1528 return status;
1529 }
1530
read_mstatus(CPURISCVState * env,int csrno,target_ulong * val)1531 static RISCVException read_mstatus(CPURISCVState *env, int csrno,
1532 target_ulong *val)
1533 {
1534 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus);
1535 return RISCV_EXCP_NONE;
1536 }
1537
validate_vm(CPURISCVState * env,target_ulong vm)1538 static bool validate_vm(CPURISCVState *env, target_ulong vm)
1539 {
1540 uint64_t mode_supported = riscv_cpu_cfg(env)->satp_mode.map;
1541 return get_field(mode_supported, (1 << vm));
1542 }
1543
legalize_xatp(CPURISCVState * env,target_ulong old_xatp,target_ulong val)1544 static target_ulong legalize_xatp(CPURISCVState *env, target_ulong old_xatp,
1545 target_ulong val)
1546 {
1547 target_ulong mask;
1548 bool vm;
1549 if (riscv_cpu_mxl(env) == MXL_RV32) {
1550 vm = validate_vm(env, get_field(val, SATP32_MODE));
1551 mask = (val ^ old_xatp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN);
1552 } else {
1553 vm = validate_vm(env, get_field(val, SATP64_MODE));
1554 mask = (val ^ old_xatp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN);
1555 }
1556
1557 if (vm && mask) {
1558 /*
1559 * The ISA defines SATP.MODE=Bare as "no translation", but we still
1560 * pass these through QEMU's TLB emulation as it improves
1561 * performance. Flushing the TLB on SATP writes with paging
1562 * enabled avoids leaking those invalid cached mappings.
1563 */
1564 tlb_flush(env_cpu(env));
1565 return val;
1566 }
1567 return old_xatp;
1568 }
1569
legalize_mpp(CPURISCVState * env,target_ulong old_mpp,target_ulong val)1570 static target_ulong legalize_mpp(CPURISCVState *env, target_ulong old_mpp,
1571 target_ulong val)
1572 {
1573 bool valid = false;
1574 target_ulong new_mpp = get_field(val, MSTATUS_MPP);
1575
1576 switch (new_mpp) {
1577 case PRV_M:
1578 valid = true;
1579 break;
1580 case PRV_S:
1581 valid = riscv_has_ext(env, RVS);
1582 break;
1583 case PRV_U:
1584 valid = riscv_has_ext(env, RVU);
1585 break;
1586 }
1587
1588 /* Remain field unchanged if new_mpp value is invalid */
1589 if (!valid) {
1590 val = set_field(val, MSTATUS_MPP, old_mpp);
1591 }
1592
1593 return val;
1594 }
1595
write_mstatus(CPURISCVState * env,int csrno,target_ulong val)1596 static RISCVException write_mstatus(CPURISCVState *env, int csrno,
1597 target_ulong val)
1598 {
1599 uint64_t mstatus = env->mstatus;
1600 uint64_t mask = 0;
1601 RISCVMXL xl = riscv_cpu_mxl(env);
1602
1603 /*
1604 * MPP field have been made WARL since priv version 1.11. However,
1605 * legalization for it will not break any software running on 1.10.
1606 */
1607 val = legalize_mpp(env, get_field(mstatus, MSTATUS_MPP), val);
1608
1609 /* flush tlb on mstatus fields that affect VM */
1610 if ((val ^ mstatus) & MSTATUS_MXR) {
1611 tlb_flush(env_cpu(env));
1612 }
1613 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
1614 MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM |
1615 MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
1616 MSTATUS_TW;
1617
1618 if (riscv_has_ext(env, RVF)) {
1619 mask |= MSTATUS_FS;
1620 }
1621 if (riscv_has_ext(env, RVV)) {
1622 mask |= MSTATUS_VS;
1623 }
1624
1625 if (xl != MXL_RV32 || env->debugger) {
1626 if (riscv_has_ext(env, RVH)) {
1627 mask |= MSTATUS_MPV | MSTATUS_GVA;
1628 }
1629 if ((val & MSTATUS64_UXL) != 0) {
1630 mask |= MSTATUS64_UXL;
1631 }
1632 }
1633
1634 /* If cfi lp extension is available, then apply cfi lp mask */
1635 if (env_archcpu(env)->cfg.ext_zicfilp) {
1636 mask |= (MSTATUS_MPELP | MSTATUS_SPELP);
1637 }
1638
1639 mstatus = (mstatus & ~mask) | (val & mask);
1640
1641 env->mstatus = mstatus;
1642
1643 /*
1644 * Except in debug mode, UXL/SXL can only be modified by higher
1645 * privilege mode. So xl will not be changed in normal mode.
1646 */
1647 if (env->debugger) {
1648 env->xl = cpu_recompute_xl(env);
1649 }
1650
1651 riscv_cpu_update_mask(env);
1652 return RISCV_EXCP_NONE;
1653 }
1654
read_mstatush(CPURISCVState * env,int csrno,target_ulong * val)1655 static RISCVException read_mstatush(CPURISCVState *env, int csrno,
1656 target_ulong *val)
1657 {
1658 *val = env->mstatus >> 32;
1659 return RISCV_EXCP_NONE;
1660 }
1661
write_mstatush(CPURISCVState * env,int csrno,target_ulong val)1662 static RISCVException write_mstatush(CPURISCVState *env, int csrno,
1663 target_ulong val)
1664 {
1665 uint64_t valh = (uint64_t)val << 32;
1666 uint64_t mask = riscv_has_ext(env, RVH) ? MSTATUS_MPV | MSTATUS_GVA : 0;
1667
1668 env->mstatus = (env->mstatus & ~mask) | (valh & mask);
1669
1670 return RISCV_EXCP_NONE;
1671 }
1672
read_mstatus_i128(CPURISCVState * env,int csrno,Int128 * val)1673 static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno,
1674 Int128 *val)
1675 {
1676 *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128,
1677 env->mstatus));
1678 return RISCV_EXCP_NONE;
1679 }
1680
read_misa_i128(CPURISCVState * env,int csrno,Int128 * val)1681 static RISCVException read_misa_i128(CPURISCVState *env, int csrno,
1682 Int128 *val)
1683 {
1684 *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62);
1685 return RISCV_EXCP_NONE;
1686 }
1687
read_misa(CPURISCVState * env,int csrno,target_ulong * val)1688 static RISCVException read_misa(CPURISCVState *env, int csrno,
1689 target_ulong *val)
1690 {
1691 target_ulong misa;
1692
1693 switch (env->misa_mxl) {
1694 case MXL_RV32:
1695 misa = (target_ulong)MXL_RV32 << 30;
1696 break;
1697 #ifdef TARGET_RISCV64
1698 case MXL_RV64:
1699 misa = (target_ulong)MXL_RV64 << 62;
1700 break;
1701 #endif
1702 default:
1703 g_assert_not_reached();
1704 }
1705
1706 *val = misa | env->misa_ext;
1707 return RISCV_EXCP_NONE;
1708 }
1709
write_misa(CPURISCVState * env,int csrno,target_ulong val)1710 static RISCVException write_misa(CPURISCVState *env, int csrno,
1711 target_ulong val)
1712 {
1713 RISCVCPU *cpu = env_archcpu(env);
1714 uint32_t orig_misa_ext = env->misa_ext;
1715 Error *local_err = NULL;
1716
1717 if (!riscv_cpu_cfg(env)->misa_w) {
1718 /* drop write to misa */
1719 return RISCV_EXCP_NONE;
1720 }
1721
1722 /* Mask extensions that are not supported by this hart */
1723 val &= env->misa_ext_mask;
1724
1725 /*
1726 * Suppress 'C' if next instruction is not aligned
1727 * TODO: this should check next_pc
1728 */
1729 if ((val & RVC) && (GETPC() & ~3) != 0) {
1730 val &= ~RVC;
1731 }
1732
1733 /* Disable RVG if any of its dependencies are disabled */
1734 if (!(val & RVI && val & RVM && val & RVA &&
1735 val & RVF && val & RVD)) {
1736 val &= ~RVG;
1737 }
1738
1739 /* If nothing changed, do nothing. */
1740 if (val == env->misa_ext) {
1741 return RISCV_EXCP_NONE;
1742 }
1743
1744 env->misa_ext = val;
1745 riscv_cpu_validate_set_extensions(cpu, &local_err);
1746 if (local_err != NULL) {
1747 /* Rollback on validation error */
1748 qemu_log_mask(LOG_GUEST_ERROR, "Unable to write MISA ext value "
1749 "0x%x, keeping existing MISA ext 0x%x\n",
1750 env->misa_ext, orig_misa_ext);
1751
1752 env->misa_ext = orig_misa_ext;
1753
1754 return RISCV_EXCP_NONE;
1755 }
1756
1757 if (!(env->misa_ext & RVF)) {
1758 env->mstatus &= ~MSTATUS_FS;
1759 }
1760
1761 /* flush translation cache */
1762 tb_flush(env_cpu(env));
1763 env->xl = riscv_cpu_mxl(env);
1764 return RISCV_EXCP_NONE;
1765 }
1766
read_medeleg(CPURISCVState * env,int csrno,target_ulong * val)1767 static RISCVException read_medeleg(CPURISCVState *env, int csrno,
1768 target_ulong *val)
1769 {
1770 *val = env->medeleg;
1771 return RISCV_EXCP_NONE;
1772 }
1773
write_medeleg(CPURISCVState * env,int csrno,target_ulong val)1774 static RISCVException write_medeleg(CPURISCVState *env, int csrno,
1775 target_ulong val)
1776 {
1777 env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS);
1778 return RISCV_EXCP_NONE;
1779 }
1780
rmw_mideleg64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)1781 static RISCVException rmw_mideleg64(CPURISCVState *env, int csrno,
1782 uint64_t *ret_val,
1783 uint64_t new_val, uint64_t wr_mask)
1784 {
1785 uint64_t mask = wr_mask & delegable_ints;
1786
1787 if (ret_val) {
1788 *ret_val = env->mideleg;
1789 }
1790
1791 env->mideleg = (env->mideleg & ~mask) | (new_val & mask);
1792
1793 if (riscv_has_ext(env, RVH)) {
1794 env->mideleg |= HS_MODE_INTERRUPTS;
1795 }
1796
1797 return RISCV_EXCP_NONE;
1798 }
1799
rmw_mideleg(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1800 static RISCVException rmw_mideleg(CPURISCVState *env, int csrno,
1801 target_ulong *ret_val,
1802 target_ulong new_val, target_ulong wr_mask)
1803 {
1804 uint64_t rval;
1805 RISCVException ret;
1806
1807 ret = rmw_mideleg64(env, csrno, &rval, new_val, wr_mask);
1808 if (ret_val) {
1809 *ret_val = rval;
1810 }
1811
1812 return ret;
1813 }
1814
rmw_midelegh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1815 static RISCVException rmw_midelegh(CPURISCVState *env, int csrno,
1816 target_ulong *ret_val,
1817 target_ulong new_val,
1818 target_ulong wr_mask)
1819 {
1820 uint64_t rval;
1821 RISCVException ret;
1822
1823 ret = rmw_mideleg64(env, csrno, &rval,
1824 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1825 if (ret_val) {
1826 *ret_val = rval >> 32;
1827 }
1828
1829 return ret;
1830 }
1831
rmw_mie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)1832 static RISCVException rmw_mie64(CPURISCVState *env, int csrno,
1833 uint64_t *ret_val,
1834 uint64_t new_val, uint64_t wr_mask)
1835 {
1836 uint64_t mask = wr_mask & all_ints;
1837
1838 if (ret_val) {
1839 *ret_val = env->mie;
1840 }
1841
1842 env->mie = (env->mie & ~mask) | (new_val & mask);
1843
1844 if (!riscv_has_ext(env, RVH)) {
1845 env->mie &= ~((uint64_t)HS_MODE_INTERRUPTS);
1846 }
1847
1848 return RISCV_EXCP_NONE;
1849 }
1850
rmw_mie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1851 static RISCVException rmw_mie(CPURISCVState *env, int csrno,
1852 target_ulong *ret_val,
1853 target_ulong new_val, target_ulong wr_mask)
1854 {
1855 uint64_t rval;
1856 RISCVException ret;
1857
1858 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask);
1859 if (ret_val) {
1860 *ret_val = rval;
1861 }
1862
1863 return ret;
1864 }
1865
rmw_mieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1866 static RISCVException rmw_mieh(CPURISCVState *env, int csrno,
1867 target_ulong *ret_val,
1868 target_ulong new_val, target_ulong wr_mask)
1869 {
1870 uint64_t rval;
1871 RISCVException ret;
1872
1873 ret = rmw_mie64(env, csrno, &rval,
1874 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1875 if (ret_val) {
1876 *ret_val = rval >> 32;
1877 }
1878
1879 return ret;
1880 }
1881
rmw_mvien64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)1882 static RISCVException rmw_mvien64(CPURISCVState *env, int csrno,
1883 uint64_t *ret_val,
1884 uint64_t new_val, uint64_t wr_mask)
1885 {
1886 uint64_t mask = wr_mask & mvien_writable_mask;
1887
1888 if (ret_val) {
1889 *ret_val = env->mvien;
1890 }
1891
1892 env->mvien = (env->mvien & ~mask) | (new_val & mask);
1893
1894 return RISCV_EXCP_NONE;
1895 }
1896
rmw_mvien(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1897 static RISCVException rmw_mvien(CPURISCVState *env, int csrno,
1898 target_ulong *ret_val,
1899 target_ulong new_val, target_ulong wr_mask)
1900 {
1901 uint64_t rval;
1902 RISCVException ret;
1903
1904 ret = rmw_mvien64(env, csrno, &rval, new_val, wr_mask);
1905 if (ret_val) {
1906 *ret_val = rval;
1907 }
1908
1909 return ret;
1910 }
1911
rmw_mvienh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1912 static RISCVException rmw_mvienh(CPURISCVState *env, int csrno,
1913 target_ulong *ret_val,
1914 target_ulong new_val, target_ulong wr_mask)
1915 {
1916 uint64_t rval;
1917 RISCVException ret;
1918
1919 ret = rmw_mvien64(env, csrno, &rval,
1920 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1921 if (ret_val) {
1922 *ret_val = rval >> 32;
1923 }
1924
1925 return ret;
1926 }
1927
read_mtopi(CPURISCVState * env,int csrno,target_ulong * val)1928 static RISCVException read_mtopi(CPURISCVState *env, int csrno,
1929 target_ulong *val)
1930 {
1931 int irq;
1932 uint8_t iprio;
1933
1934 irq = riscv_cpu_mirq_pending(env);
1935 if (irq <= 0 || irq > 63) {
1936 *val = 0;
1937 } else {
1938 iprio = env->miprio[irq];
1939 if (!iprio) {
1940 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_M) {
1941 iprio = IPRIO_MMAXIPRIO;
1942 }
1943 }
1944 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
1945 *val |= iprio;
1946 }
1947
1948 return RISCV_EXCP_NONE;
1949 }
1950
aia_xlate_vs_csrno(CPURISCVState * env,int csrno)1951 static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
1952 {
1953 if (!env->virt_enabled) {
1954 return csrno;
1955 }
1956
1957 switch (csrno) {
1958 case CSR_SISELECT:
1959 return CSR_VSISELECT;
1960 case CSR_SIREG:
1961 return CSR_VSIREG;
1962 case CSR_STOPEI:
1963 return CSR_VSTOPEI;
1964 default:
1965 return csrno;
1966 };
1967 }
1968
rmw_xiselect(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1969 static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
1970 target_ulong *val, target_ulong new_val,
1971 target_ulong wr_mask)
1972 {
1973 target_ulong *iselect;
1974
1975 /* Translate CSR number for VS-mode */
1976 csrno = aia_xlate_vs_csrno(env, csrno);
1977
1978 /* Find the iselect CSR based on CSR number */
1979 switch (csrno) {
1980 case CSR_MISELECT:
1981 iselect = &env->miselect;
1982 break;
1983 case CSR_SISELECT:
1984 iselect = &env->siselect;
1985 break;
1986 case CSR_VSISELECT:
1987 iselect = &env->vsiselect;
1988 break;
1989 default:
1990 return RISCV_EXCP_ILLEGAL_INST;
1991 };
1992
1993 if (val) {
1994 *val = *iselect;
1995 }
1996
1997 wr_mask &= ISELECT_MASK;
1998 if (wr_mask) {
1999 *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask);
2000 }
2001
2002 return RISCV_EXCP_NONE;
2003 }
2004
rmw_iprio(target_ulong xlen,target_ulong iselect,uint8_t * iprio,target_ulong * val,target_ulong new_val,target_ulong wr_mask,int ext_irq_no)2005 static int rmw_iprio(target_ulong xlen,
2006 target_ulong iselect, uint8_t *iprio,
2007 target_ulong *val, target_ulong new_val,
2008 target_ulong wr_mask, int ext_irq_no)
2009 {
2010 int i, firq, nirqs;
2011 target_ulong old_val;
2012
2013 if (iselect < ISELECT_IPRIO0 || ISELECT_IPRIO15 < iselect) {
2014 return -EINVAL;
2015 }
2016 if (xlen != 32 && iselect & 0x1) {
2017 return -EINVAL;
2018 }
2019
2020 nirqs = 4 * (xlen / 32);
2021 firq = ((iselect - ISELECT_IPRIO0) / (xlen / 32)) * (nirqs);
2022
2023 old_val = 0;
2024 for (i = 0; i < nirqs; i++) {
2025 old_val |= ((target_ulong)iprio[firq + i]) << (IPRIO_IRQ_BITS * i);
2026 }
2027
2028 if (val) {
2029 *val = old_val;
2030 }
2031
2032 if (wr_mask) {
2033 new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
2034 for (i = 0; i < nirqs; i++) {
2035 /*
2036 * M-level and S-level external IRQ priority always read-only
2037 * zero. This means default priority order is always preferred
2038 * for M-level and S-level external IRQs.
2039 */
2040 if ((firq + i) == ext_irq_no) {
2041 continue;
2042 }
2043 iprio[firq + i] = (new_val >> (IPRIO_IRQ_BITS * i)) & 0xff;
2044 }
2045 }
2046
2047 return 0;
2048 }
2049
rmw_xireg(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2050 static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
2051 target_ulong *val, target_ulong new_val,
2052 target_ulong wr_mask)
2053 {
2054 bool virt, isel_reserved;
2055 uint8_t *iprio;
2056 int ret = -EINVAL;
2057 target_ulong priv, isel, vgein;
2058
2059 /* Translate CSR number for VS-mode */
2060 csrno = aia_xlate_vs_csrno(env, csrno);
2061
2062 /* Decode register details from CSR number */
2063 virt = false;
2064 isel_reserved = false;
2065 switch (csrno) {
2066 case CSR_MIREG:
2067 iprio = env->miprio;
2068 isel = env->miselect;
2069 priv = PRV_M;
2070 break;
2071 case CSR_SIREG:
2072 if (env->priv == PRV_S && env->mvien & MIP_SEIP &&
2073 env->siselect >= ISELECT_IMSIC_EIDELIVERY &&
2074 env->siselect <= ISELECT_IMSIC_EIE63) {
2075 goto done;
2076 }
2077 iprio = env->siprio;
2078 isel = env->siselect;
2079 priv = PRV_S;
2080 break;
2081 case CSR_VSIREG:
2082 iprio = env->hviprio;
2083 isel = env->vsiselect;
2084 priv = PRV_S;
2085 virt = true;
2086 break;
2087 default:
2088 goto done;
2089 };
2090
2091 /* Find the selected guest interrupt file */
2092 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
2093
2094 if (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) {
2095 /* Local interrupt priority registers not available for VS-mode */
2096 if (!virt) {
2097 ret = rmw_iprio(riscv_cpu_mxl_bits(env),
2098 isel, iprio, val, new_val, wr_mask,
2099 (priv == PRV_M) ? IRQ_M_EXT : IRQ_S_EXT);
2100 }
2101 } else if (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST) {
2102 /* IMSIC registers only available when machine implements it. */
2103 if (env->aia_ireg_rmw_fn[priv]) {
2104 /* Selected guest interrupt file should not be zero */
2105 if (virt && (!vgein || env->geilen < vgein)) {
2106 goto done;
2107 }
2108 /* Call machine specific IMSIC register emulation */
2109 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
2110 AIA_MAKE_IREG(isel, priv, virt, vgein,
2111 riscv_cpu_mxl_bits(env)),
2112 val, new_val, wr_mask);
2113 }
2114 } else {
2115 isel_reserved = true;
2116 }
2117
2118 done:
2119 if (ret) {
2120 return (env->virt_enabled && virt && !isel_reserved) ?
2121 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2122 }
2123 return RISCV_EXCP_NONE;
2124 }
2125
rmw_xtopei(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2126 static RISCVException rmw_xtopei(CPURISCVState *env, int csrno,
2127 target_ulong *val, target_ulong new_val,
2128 target_ulong wr_mask)
2129 {
2130 bool virt;
2131 int ret = -EINVAL;
2132 target_ulong priv, vgein;
2133
2134 /* Translate CSR number for VS-mode */
2135 csrno = aia_xlate_vs_csrno(env, csrno);
2136
2137 /* Decode register details from CSR number */
2138 virt = false;
2139 switch (csrno) {
2140 case CSR_MTOPEI:
2141 priv = PRV_M;
2142 break;
2143 case CSR_STOPEI:
2144 if (env->mvien & MIP_SEIP && env->priv == PRV_S) {
2145 goto done;
2146 }
2147 priv = PRV_S;
2148 break;
2149 case CSR_VSTOPEI:
2150 priv = PRV_S;
2151 virt = true;
2152 break;
2153 default:
2154 goto done;
2155 };
2156
2157 /* IMSIC CSRs only available when machine implements IMSIC. */
2158 if (!env->aia_ireg_rmw_fn[priv]) {
2159 goto done;
2160 }
2161
2162 /* Find the selected guest interrupt file */
2163 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
2164
2165 /* Selected guest interrupt file should be valid */
2166 if (virt && (!vgein || env->geilen < vgein)) {
2167 goto done;
2168 }
2169
2170 /* Call machine specific IMSIC register emulation for TOPEI */
2171 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
2172 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, priv, virt, vgein,
2173 riscv_cpu_mxl_bits(env)),
2174 val, new_val, wr_mask);
2175
2176 done:
2177 if (ret) {
2178 return (env->virt_enabled && virt) ?
2179 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2180 }
2181 return RISCV_EXCP_NONE;
2182 }
2183
read_mtvec(CPURISCVState * env,int csrno,target_ulong * val)2184 static RISCVException read_mtvec(CPURISCVState *env, int csrno,
2185 target_ulong *val)
2186 {
2187 *val = env->mtvec;
2188 return RISCV_EXCP_NONE;
2189 }
2190
write_mtvec(CPURISCVState * env,int csrno,target_ulong val)2191 static RISCVException write_mtvec(CPURISCVState *env, int csrno,
2192 target_ulong val)
2193 {
2194 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
2195 if ((val & 3) < 2) {
2196 env->mtvec = val;
2197 } else {
2198 qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n");
2199 }
2200 return RISCV_EXCP_NONE;
2201 }
2202
read_mcountinhibit(CPURISCVState * env,int csrno,target_ulong * val)2203 static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno,
2204 target_ulong *val)
2205 {
2206 *val = env->mcountinhibit;
2207 return RISCV_EXCP_NONE;
2208 }
2209
write_mcountinhibit(CPURISCVState * env,int csrno,target_ulong val)2210 static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
2211 target_ulong val)
2212 {
2213 int cidx;
2214 PMUCTRState *counter;
2215 RISCVCPU *cpu = env_archcpu(env);
2216 uint32_t present_ctrs = cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_IR;
2217 target_ulong updated_ctrs = (env->mcountinhibit ^ val) & present_ctrs;
2218 uint64_t mhpmctr_val, prev_count, curr_count;
2219
2220 /* WARL register - disable unavailable counters; TM bit is always 0 */
2221 env->mcountinhibit = val & present_ctrs;
2222
2223 /* Check if any other counter is also monitoring cycles/instructions */
2224 for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
2225 if (!(updated_ctrs & BIT(cidx)) ||
2226 (!riscv_pmu_ctr_monitor_cycles(env, cidx) &&
2227 !riscv_pmu_ctr_monitor_instructions(env, cidx))) {
2228 continue;
2229 }
2230
2231 counter = &env->pmu_ctrs[cidx];
2232
2233 if (!get_field(env->mcountinhibit, BIT(cidx))) {
2234 counter->mhpmcounter_prev =
2235 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false);
2236 if (riscv_cpu_mxl(env) == MXL_RV32) {
2237 counter->mhpmcounterh_prev =
2238 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true);
2239 }
2240
2241 if (cidx > 2) {
2242 mhpmctr_val = counter->mhpmcounter_val;
2243 if (riscv_cpu_mxl(env) == MXL_RV32) {
2244 mhpmctr_val = mhpmctr_val |
2245 ((uint64_t)counter->mhpmcounterh_val << 32);
2246 }
2247 riscv_pmu_setup_timer(env, mhpmctr_val, cidx);
2248 }
2249 } else {
2250 curr_count = riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false);
2251
2252 mhpmctr_val = counter->mhpmcounter_val;
2253 prev_count = counter->mhpmcounter_prev;
2254 if (riscv_cpu_mxl(env) == MXL_RV32) {
2255 uint64_t tmp =
2256 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true);
2257
2258 curr_count = curr_count | (tmp << 32);
2259 mhpmctr_val = mhpmctr_val |
2260 ((uint64_t)counter->mhpmcounterh_val << 32);
2261 prev_count = prev_count |
2262 ((uint64_t)counter->mhpmcounterh_prev << 32);
2263 }
2264
2265 /* Adjust the counter for later reads. */
2266 mhpmctr_val = curr_count - prev_count + mhpmctr_val;
2267 counter->mhpmcounter_val = mhpmctr_val;
2268 if (riscv_cpu_mxl(env) == MXL_RV32) {
2269 counter->mhpmcounterh_val = mhpmctr_val >> 32;
2270 }
2271 }
2272 }
2273
2274 return RISCV_EXCP_NONE;
2275 }
2276
read_mcounteren(CPURISCVState * env,int csrno,target_ulong * val)2277 static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
2278 target_ulong *val)
2279 {
2280 *val = env->mcounteren;
2281 return RISCV_EXCP_NONE;
2282 }
2283
write_mcounteren(CPURISCVState * env,int csrno,target_ulong val)2284 static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
2285 target_ulong val)
2286 {
2287 RISCVCPU *cpu = env_archcpu(env);
2288
2289 /* WARL register - disable unavailable counters */
2290 env->mcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
2291 COUNTEREN_IR);
2292 return RISCV_EXCP_NONE;
2293 }
2294
2295 /* Machine Trap Handling */
read_mscratch_i128(CPURISCVState * env,int csrno,Int128 * val)2296 static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno,
2297 Int128 *val)
2298 {
2299 *val = int128_make128(env->mscratch, env->mscratchh);
2300 return RISCV_EXCP_NONE;
2301 }
2302
write_mscratch_i128(CPURISCVState * env,int csrno,Int128 val)2303 static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno,
2304 Int128 val)
2305 {
2306 env->mscratch = int128_getlo(val);
2307 env->mscratchh = int128_gethi(val);
2308 return RISCV_EXCP_NONE;
2309 }
2310
read_mscratch(CPURISCVState * env,int csrno,target_ulong * val)2311 static RISCVException read_mscratch(CPURISCVState *env, int csrno,
2312 target_ulong *val)
2313 {
2314 *val = env->mscratch;
2315 return RISCV_EXCP_NONE;
2316 }
2317
write_mscratch(CPURISCVState * env,int csrno,target_ulong val)2318 static RISCVException write_mscratch(CPURISCVState *env, int csrno,
2319 target_ulong val)
2320 {
2321 env->mscratch = val;
2322 return RISCV_EXCP_NONE;
2323 }
2324
read_mepc(CPURISCVState * env,int csrno,target_ulong * val)2325 static RISCVException read_mepc(CPURISCVState *env, int csrno,
2326 target_ulong *val)
2327 {
2328 *val = env->mepc;
2329 return RISCV_EXCP_NONE;
2330 }
2331
write_mepc(CPURISCVState * env,int csrno,target_ulong val)2332 static RISCVException write_mepc(CPURISCVState *env, int csrno,
2333 target_ulong val)
2334 {
2335 env->mepc = val;
2336 return RISCV_EXCP_NONE;
2337 }
2338
read_mcause(CPURISCVState * env,int csrno,target_ulong * val)2339 static RISCVException read_mcause(CPURISCVState *env, int csrno,
2340 target_ulong *val)
2341 {
2342 *val = env->mcause;
2343 return RISCV_EXCP_NONE;
2344 }
2345
write_mcause(CPURISCVState * env,int csrno,target_ulong val)2346 static RISCVException write_mcause(CPURISCVState *env, int csrno,
2347 target_ulong val)
2348 {
2349 env->mcause = val;
2350 return RISCV_EXCP_NONE;
2351 }
2352
read_mtval(CPURISCVState * env,int csrno,target_ulong * val)2353 static RISCVException read_mtval(CPURISCVState *env, int csrno,
2354 target_ulong *val)
2355 {
2356 *val = env->mtval;
2357 return RISCV_EXCP_NONE;
2358 }
2359
write_mtval(CPURISCVState * env,int csrno,target_ulong val)2360 static RISCVException write_mtval(CPURISCVState *env, int csrno,
2361 target_ulong val)
2362 {
2363 env->mtval = val;
2364 return RISCV_EXCP_NONE;
2365 }
2366
2367 /* Execution environment configuration setup */
read_menvcfg(CPURISCVState * env,int csrno,target_ulong * val)2368 static RISCVException read_menvcfg(CPURISCVState *env, int csrno,
2369 target_ulong *val)
2370 {
2371 *val = env->menvcfg;
2372 return RISCV_EXCP_NONE;
2373 }
2374
write_menvcfg(CPURISCVState * env,int csrno,target_ulong val)2375 static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
2376 target_ulong val)
2377 {
2378 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
2379 uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE;
2380
2381 if (riscv_cpu_mxl(env) == MXL_RV64) {
2382 mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
2383 (cfg->ext_sstc ? MENVCFG_STCE : 0) |
2384 (cfg->ext_svadu ? MENVCFG_ADUE : 0);
2385
2386 if (env_archcpu(env)->cfg.ext_zicfilp) {
2387 mask |= MENVCFG_LPE;
2388 }
2389
2390 if (env_archcpu(env)->cfg.ext_zicfiss) {
2391 mask |= MENVCFG_SSE;
2392 }
2393 }
2394 env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
2395
2396 return RISCV_EXCP_NONE;
2397 }
2398
read_menvcfgh(CPURISCVState * env,int csrno,target_ulong * val)2399 static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
2400 target_ulong *val)
2401 {
2402 *val = env->menvcfg >> 32;
2403 return RISCV_EXCP_NONE;
2404 }
2405
write_menvcfgh(CPURISCVState * env,int csrno,target_ulong val)2406 static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
2407 target_ulong val)
2408 {
2409 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
2410 uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
2411 (cfg->ext_sstc ? MENVCFG_STCE : 0) |
2412 (cfg->ext_svadu ? MENVCFG_ADUE : 0);
2413 uint64_t valh = (uint64_t)val << 32;
2414
2415 env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
2416
2417 return RISCV_EXCP_NONE;
2418 }
2419
read_senvcfg(CPURISCVState * env,int csrno,target_ulong * val)2420 static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
2421 target_ulong *val)
2422 {
2423 RISCVException ret;
2424
2425 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2426 if (ret != RISCV_EXCP_NONE) {
2427 return ret;
2428 }
2429
2430 *val = env->senvcfg;
2431 return RISCV_EXCP_NONE;
2432 }
2433
write_senvcfg(CPURISCVState * env,int csrno,target_ulong val)2434 static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
2435 target_ulong val)
2436 {
2437 uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE;
2438 RISCVException ret;
2439
2440 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2441 if (ret != RISCV_EXCP_NONE) {
2442 return ret;
2443 }
2444
2445 if (env_archcpu(env)->cfg.ext_zicfilp) {
2446 mask |= SENVCFG_LPE;
2447 }
2448
2449 /* Higher mode SSE must be ON for next-less mode SSE to be ON */
2450 if (env_archcpu(env)->cfg.ext_zicfiss &&
2451 get_field(env->menvcfg, MENVCFG_SSE) &&
2452 (env->virt_enabled ? get_field(env->henvcfg, HENVCFG_SSE) : true)) {
2453 mask |= SENVCFG_SSE;
2454 }
2455
2456 env->senvcfg = (env->senvcfg & ~mask) | (val & mask);
2457 return RISCV_EXCP_NONE;
2458 }
2459
read_henvcfg(CPURISCVState * env,int csrno,target_ulong * val)2460 static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
2461 target_ulong *val)
2462 {
2463 RISCVException ret;
2464
2465 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2466 if (ret != RISCV_EXCP_NONE) {
2467 return ret;
2468 }
2469
2470 /*
2471 * henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0
2472 * henvcfg.stce is read_only 0 when menvcfg.stce = 0
2473 * henvcfg.adue is read_only 0 when menvcfg.adue = 0
2474 */
2475 *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
2476 env->menvcfg);
2477 return RISCV_EXCP_NONE;
2478 }
2479
write_henvcfg(CPURISCVState * env,int csrno,target_ulong val)2480 static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
2481 target_ulong val)
2482 {
2483 uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE;
2484 RISCVException ret;
2485
2486 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2487 if (ret != RISCV_EXCP_NONE) {
2488 return ret;
2489 }
2490
2491 if (riscv_cpu_mxl(env) == MXL_RV64) {
2492 mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE);
2493
2494 if (env_archcpu(env)->cfg.ext_zicfilp) {
2495 mask |= HENVCFG_LPE;
2496 }
2497
2498 /* H can light up SSE for VS only if HS had it from menvcfg */
2499 if (env_archcpu(env)->cfg.ext_zicfiss &&
2500 get_field(env->menvcfg, MENVCFG_SSE)) {
2501 mask |= HENVCFG_SSE;
2502 }
2503 }
2504
2505 env->henvcfg = (env->henvcfg & ~mask) | (val & mask);
2506
2507 return RISCV_EXCP_NONE;
2508 }
2509
read_henvcfgh(CPURISCVState * env,int csrno,target_ulong * val)2510 static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
2511 target_ulong *val)
2512 {
2513 RISCVException ret;
2514
2515 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2516 if (ret != RISCV_EXCP_NONE) {
2517 return ret;
2518 }
2519
2520 *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
2521 env->menvcfg)) >> 32;
2522 return RISCV_EXCP_NONE;
2523 }
2524
write_henvcfgh(CPURISCVState * env,int csrno,target_ulong val)2525 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
2526 target_ulong val)
2527 {
2528 uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE |
2529 HENVCFG_ADUE);
2530 uint64_t valh = (uint64_t)val << 32;
2531 RISCVException ret;
2532
2533 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2534 if (ret != RISCV_EXCP_NONE) {
2535 return ret;
2536 }
2537
2538 env->henvcfg = (env->henvcfg & ~mask) | (valh & mask);
2539 return RISCV_EXCP_NONE;
2540 }
2541
read_mstateen(CPURISCVState * env,int csrno,target_ulong * val)2542 static RISCVException read_mstateen(CPURISCVState *env, int csrno,
2543 target_ulong *val)
2544 {
2545 *val = env->mstateen[csrno - CSR_MSTATEEN0];
2546
2547 return RISCV_EXCP_NONE;
2548 }
2549
write_mstateen(CPURISCVState * env,int csrno,uint64_t wr_mask,target_ulong new_val)2550 static RISCVException write_mstateen(CPURISCVState *env, int csrno,
2551 uint64_t wr_mask, target_ulong new_val)
2552 {
2553 uint64_t *reg;
2554
2555 reg = &env->mstateen[csrno - CSR_MSTATEEN0];
2556 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2557
2558 return RISCV_EXCP_NONE;
2559 }
2560
write_mstateen0(CPURISCVState * env,int csrno,target_ulong new_val)2561 static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
2562 target_ulong new_val)
2563 {
2564 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2565 if (!riscv_has_ext(env, RVF)) {
2566 wr_mask |= SMSTATEEN0_FCSR;
2567 }
2568
2569 if (env->priv_ver >= PRIV_VERSION_1_13_0) {
2570 wr_mask |= SMSTATEEN0_P1P13;
2571 }
2572
2573 return write_mstateen(env, csrno, wr_mask, new_val);
2574 }
2575
write_mstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val)2576 static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno,
2577 target_ulong new_val)
2578 {
2579 return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2580 }
2581
read_mstateenh(CPURISCVState * env,int csrno,target_ulong * val)2582 static RISCVException read_mstateenh(CPURISCVState *env, int csrno,
2583 target_ulong *val)
2584 {
2585 *val = env->mstateen[csrno - CSR_MSTATEEN0H] >> 32;
2586
2587 return RISCV_EXCP_NONE;
2588 }
2589
write_mstateenh(CPURISCVState * env,int csrno,uint64_t wr_mask,target_ulong new_val)2590 static RISCVException write_mstateenh(CPURISCVState *env, int csrno,
2591 uint64_t wr_mask, target_ulong new_val)
2592 {
2593 uint64_t *reg, val;
2594
2595 reg = &env->mstateen[csrno - CSR_MSTATEEN0H];
2596 val = (uint64_t)new_val << 32;
2597 val |= *reg & 0xFFFFFFFF;
2598 *reg = (*reg & ~wr_mask) | (val & wr_mask);
2599
2600 return RISCV_EXCP_NONE;
2601 }
2602
write_mstateen0h(CPURISCVState * env,int csrno,target_ulong new_val)2603 static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
2604 target_ulong new_val)
2605 {
2606 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2607
2608 if (env->priv_ver >= PRIV_VERSION_1_13_0) {
2609 wr_mask |= SMSTATEEN0_P1P13;
2610 }
2611
2612 return write_mstateenh(env, csrno, wr_mask, new_val);
2613 }
2614
write_mstateenh_1_3(CPURISCVState * env,int csrno,target_ulong new_val)2615 static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno,
2616 target_ulong new_val)
2617 {
2618 return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
2619 }
2620
read_hstateen(CPURISCVState * env,int csrno,target_ulong * val)2621 static RISCVException read_hstateen(CPURISCVState *env, int csrno,
2622 target_ulong *val)
2623 {
2624 int index = csrno - CSR_HSTATEEN0;
2625
2626 *val = env->hstateen[index] & env->mstateen[index];
2627
2628 return RISCV_EXCP_NONE;
2629 }
2630
write_hstateen(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)2631 static RISCVException write_hstateen(CPURISCVState *env, int csrno,
2632 uint64_t mask, target_ulong new_val)
2633 {
2634 int index = csrno - CSR_HSTATEEN0;
2635 uint64_t *reg, wr_mask;
2636
2637 reg = &env->hstateen[index];
2638 wr_mask = env->mstateen[index] & mask;
2639 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2640
2641 return RISCV_EXCP_NONE;
2642 }
2643
write_hstateen0(CPURISCVState * env,int csrno,target_ulong new_val)2644 static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
2645 target_ulong new_val)
2646 {
2647 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2648
2649 if (!riscv_has_ext(env, RVF)) {
2650 wr_mask |= SMSTATEEN0_FCSR;
2651 }
2652
2653 return write_hstateen(env, csrno, wr_mask, new_val);
2654 }
2655
write_hstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val)2656 static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno,
2657 target_ulong new_val)
2658 {
2659 return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2660 }
2661
read_hstateenh(CPURISCVState * env,int csrno,target_ulong * val)2662 static RISCVException read_hstateenh(CPURISCVState *env, int csrno,
2663 target_ulong *val)
2664 {
2665 int index = csrno - CSR_HSTATEEN0H;
2666
2667 *val = (env->hstateen[index] >> 32) & (env->mstateen[index] >> 32);
2668
2669 return RISCV_EXCP_NONE;
2670 }
2671
write_hstateenh(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)2672 static RISCVException write_hstateenh(CPURISCVState *env, int csrno,
2673 uint64_t mask, target_ulong new_val)
2674 {
2675 int index = csrno - CSR_HSTATEEN0H;
2676 uint64_t *reg, wr_mask, val;
2677
2678 reg = &env->hstateen[index];
2679 val = (uint64_t)new_val << 32;
2680 val |= *reg & 0xFFFFFFFF;
2681 wr_mask = env->mstateen[index] & mask;
2682 *reg = (*reg & ~wr_mask) | (val & wr_mask);
2683
2684 return RISCV_EXCP_NONE;
2685 }
2686
write_hstateen0h(CPURISCVState * env,int csrno,target_ulong new_val)2687 static RISCVException write_hstateen0h(CPURISCVState *env, int csrno,
2688 target_ulong new_val)
2689 {
2690 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2691
2692 return write_hstateenh(env, csrno, wr_mask, new_val);
2693 }
2694
write_hstateenh_1_3(CPURISCVState * env,int csrno,target_ulong new_val)2695 static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno,
2696 target_ulong new_val)
2697 {
2698 return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
2699 }
2700
read_sstateen(CPURISCVState * env,int csrno,target_ulong * val)2701 static RISCVException read_sstateen(CPURISCVState *env, int csrno,
2702 target_ulong *val)
2703 {
2704 bool virt = env->virt_enabled;
2705 int index = csrno - CSR_SSTATEEN0;
2706
2707 *val = env->sstateen[index] & env->mstateen[index];
2708 if (virt) {
2709 *val &= env->hstateen[index];
2710 }
2711
2712 return RISCV_EXCP_NONE;
2713 }
2714
write_sstateen(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)2715 static RISCVException write_sstateen(CPURISCVState *env, int csrno,
2716 uint64_t mask, target_ulong new_val)
2717 {
2718 bool virt = env->virt_enabled;
2719 int index = csrno - CSR_SSTATEEN0;
2720 uint64_t wr_mask;
2721 uint64_t *reg;
2722
2723 wr_mask = env->mstateen[index] & mask;
2724 if (virt) {
2725 wr_mask &= env->hstateen[index];
2726 }
2727
2728 reg = &env->sstateen[index];
2729 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2730
2731 return RISCV_EXCP_NONE;
2732 }
2733
write_sstateen0(CPURISCVState * env,int csrno,target_ulong new_val)2734 static RISCVException write_sstateen0(CPURISCVState *env, int csrno,
2735 target_ulong new_val)
2736 {
2737 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2738
2739 if (!riscv_has_ext(env, RVF)) {
2740 wr_mask |= SMSTATEEN0_FCSR;
2741 }
2742
2743 return write_sstateen(env, csrno, wr_mask, new_val);
2744 }
2745
write_sstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val)2746 static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno,
2747 target_ulong new_val)
2748 {
2749 return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2750 }
2751
rmw_mip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2752 static RISCVException rmw_mip64(CPURISCVState *env, int csrno,
2753 uint64_t *ret_val,
2754 uint64_t new_val, uint64_t wr_mask)
2755 {
2756 uint64_t old_mip, mask = wr_mask & delegable_ints;
2757 uint32_t gin;
2758
2759 if (mask & MIP_SEIP) {
2760 env->software_seip = new_val & MIP_SEIP;
2761 new_val |= env->external_seip * MIP_SEIP;
2762 }
2763
2764 if (riscv_cpu_cfg(env)->ext_sstc && (env->priv == PRV_M) &&
2765 get_field(env->menvcfg, MENVCFG_STCE)) {
2766 /* sstc extension forbids STIP & VSTIP to be writeable in mip */
2767 mask = mask & ~(MIP_STIP | MIP_VSTIP);
2768 }
2769
2770 if (mask) {
2771 old_mip = riscv_cpu_update_mip(env, mask, (new_val & mask));
2772 } else {
2773 old_mip = env->mip;
2774 }
2775
2776 if (csrno != CSR_HVIP) {
2777 gin = get_field(env->hstatus, HSTATUS_VGEIN);
2778 old_mip |= (env->hgeip & ((target_ulong)1 << gin)) ? MIP_VSEIP : 0;
2779 old_mip |= env->vstime_irq ? MIP_VSTIP : 0;
2780 }
2781
2782 if (ret_val) {
2783 *ret_val = old_mip;
2784 }
2785
2786 return RISCV_EXCP_NONE;
2787 }
2788
rmw_mip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2789 static RISCVException rmw_mip(CPURISCVState *env, int csrno,
2790 target_ulong *ret_val,
2791 target_ulong new_val, target_ulong wr_mask)
2792 {
2793 uint64_t rval;
2794 RISCVException ret;
2795
2796 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask);
2797 if (ret_val) {
2798 *ret_val = rval;
2799 }
2800
2801 return ret;
2802 }
2803
rmw_miph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2804 static RISCVException rmw_miph(CPURISCVState *env, int csrno,
2805 target_ulong *ret_val,
2806 target_ulong new_val, target_ulong wr_mask)
2807 {
2808 uint64_t rval;
2809 RISCVException ret;
2810
2811 ret = rmw_mip64(env, csrno, &rval,
2812 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2813 if (ret_val) {
2814 *ret_val = rval >> 32;
2815 }
2816
2817 return ret;
2818 }
2819
2820 /*
2821 * The function is written for two use-cases:
2822 * 1- To access mvip csr as is for m-mode access.
2823 * 2- To access sip as a combination of mip and mvip for s-mode.
2824 *
2825 * Both report bits 1, 5, 9 and 13:63 but with the exception of
2826 * STIP being read-only zero in case of mvip when sstc extension
2827 * is present.
2828 * Also, sip needs to be read-only zero when both mideleg[i] and
2829 * mvien[i] are zero but mvip needs to be an alias of mip.
2830 */
rmw_mvip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2831 static RISCVException rmw_mvip64(CPURISCVState *env, int csrno,
2832 uint64_t *ret_val,
2833 uint64_t new_val, uint64_t wr_mask)
2834 {
2835 RISCVCPU *cpu = env_archcpu(env);
2836 target_ulong ret_mip = 0;
2837 RISCVException ret;
2838 uint64_t old_mvip;
2839
2840 /*
2841 * mideleg[i] mvien[i]
2842 * 0 0 No delegation. mvip[i] is alias of mip[i].
2843 * 0 1 mvip[i] becomes source of interrupt, mip bypassed.
2844 * 1 X mip[i] is source of interrupt and mvip[i] aliases
2845 * mip[i].
2846 *
2847 * So alias condition would be for bits:
2848 * ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (mideleg | ~mvien)) |
2849 * (!sstc & MIP_STIP)
2850 *
2851 * Non-alias condition will be for bits:
2852 * (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (~mideleg & mvien)
2853 *
2854 * alias_mask denotes the bits that come from mip nalias_mask denotes bits
2855 * that come from hvip.
2856 */
2857 uint64_t alias_mask = ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
2858 (env->mideleg | ~env->mvien)) | MIP_STIP;
2859 uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
2860 (~env->mideleg & env->mvien);
2861 uint64_t wr_mask_mvip;
2862 uint64_t wr_mask_mip;
2863
2864 /*
2865 * mideleg[i] mvien[i]
2866 * 0 0 sip[i] read-only zero.
2867 * 0 1 sip[i] alias of mvip[i].
2868 * 1 X sip[i] alias of mip[i].
2869 *
2870 * Both alias and non-alias mask remain same for sip except for bits
2871 * which are zero in both mideleg and mvien.
2872 */
2873 if (csrno == CSR_SIP) {
2874 /* Remove bits that are zero in both mideleg and mvien. */
2875 alias_mask &= (env->mideleg | env->mvien);
2876 nalias_mask &= (env->mideleg | env->mvien);
2877 }
2878
2879 /*
2880 * If sstc is present, mvip.STIP is not an alias of mip.STIP so clear
2881 * that our in mip returned value.
2882 */
2883 if (cpu->cfg.ext_sstc && (env->priv == PRV_M) &&
2884 get_field(env->menvcfg, MENVCFG_STCE)) {
2885 alias_mask &= ~MIP_STIP;
2886 }
2887
2888 wr_mask_mip = wr_mask & alias_mask & mvip_writable_mask;
2889 wr_mask_mvip = wr_mask & nalias_mask & mvip_writable_mask;
2890
2891 /*
2892 * For bits set in alias_mask, mvip needs to be alias of mip, so forward
2893 * this to rmw_mip.
2894 */
2895 ret = rmw_mip(env, CSR_MIP, &ret_mip, new_val, wr_mask_mip);
2896 if (ret != RISCV_EXCP_NONE) {
2897 return ret;
2898 }
2899
2900 old_mvip = env->mvip;
2901
2902 /*
2903 * Write to mvip. Update only non-alias bits. Alias bits were updated
2904 * in mip in rmw_mip above.
2905 */
2906 if (wr_mask_mvip) {
2907 env->mvip = (env->mvip & ~wr_mask_mvip) | (new_val & wr_mask_mvip);
2908
2909 /*
2910 * Given mvip is separate source from mip, we need to trigger interrupt
2911 * from here separately. Normally this happen from riscv_cpu_update_mip.
2912 */
2913 riscv_cpu_interrupt(env);
2914 }
2915
2916 if (ret_val) {
2917 ret_mip &= alias_mask;
2918 old_mvip &= nalias_mask;
2919
2920 *ret_val = old_mvip | ret_mip;
2921 }
2922
2923 return RISCV_EXCP_NONE;
2924 }
2925
rmw_mvip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2926 static RISCVException rmw_mvip(CPURISCVState *env, int csrno,
2927 target_ulong *ret_val,
2928 target_ulong new_val, target_ulong wr_mask)
2929 {
2930 uint64_t rval;
2931 RISCVException ret;
2932
2933 ret = rmw_mvip64(env, csrno, &rval, new_val, wr_mask);
2934 if (ret_val) {
2935 *ret_val = rval;
2936 }
2937
2938 return ret;
2939 }
2940
rmw_mviph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2941 static RISCVException rmw_mviph(CPURISCVState *env, int csrno,
2942 target_ulong *ret_val,
2943 target_ulong new_val, target_ulong wr_mask)
2944 {
2945 uint64_t rval;
2946 RISCVException ret;
2947
2948 ret = rmw_mvip64(env, csrno, &rval,
2949 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2950 if (ret_val) {
2951 *ret_val = rval >> 32;
2952 }
2953
2954 return ret;
2955 }
2956
2957 /* Supervisor Trap Setup */
read_sstatus_i128(CPURISCVState * env,int csrno,Int128 * val)2958 static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
2959 Int128 *val)
2960 {
2961 uint64_t mask = sstatus_v1_10_mask;
2962 uint64_t sstatus = env->mstatus & mask;
2963 if (env->xl != MXL_RV32 || env->debugger) {
2964 mask |= SSTATUS64_UXL;
2965 }
2966
2967 if (env_archcpu(env)->cfg.ext_zicfilp) {
2968 mask |= SSTATUS_SPELP;
2969 }
2970
2971 *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
2972 return RISCV_EXCP_NONE;
2973 }
2974
read_sstatus(CPURISCVState * env,int csrno,target_ulong * val)2975 static RISCVException read_sstatus(CPURISCVState *env, int csrno,
2976 target_ulong *val)
2977 {
2978 target_ulong mask = (sstatus_v1_10_mask);
2979 if (env->xl != MXL_RV32 || env->debugger) {
2980 mask |= SSTATUS64_UXL;
2981 }
2982
2983 if (env_archcpu(env)->cfg.ext_zicfilp) {
2984 mask |= SSTATUS_SPELP;
2985 }
2986
2987 /* TODO: Use SXL not MXL. */
2988 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
2989 return RISCV_EXCP_NONE;
2990 }
2991
write_sstatus(CPURISCVState * env,int csrno,target_ulong val)2992 static RISCVException write_sstatus(CPURISCVState *env, int csrno,
2993 target_ulong val)
2994 {
2995 target_ulong mask = (sstatus_v1_10_mask);
2996
2997 if (env->xl != MXL_RV32 || env->debugger) {
2998 if ((val & SSTATUS64_UXL) != 0) {
2999 mask |= SSTATUS64_UXL;
3000 }
3001 }
3002
3003 if (env_archcpu(env)->cfg.ext_zicfilp) {
3004 mask |= SSTATUS_SPELP;
3005 }
3006
3007 target_ulong newval = (env->mstatus & ~mask) | (val & mask);
3008 return write_mstatus(env, CSR_MSTATUS, newval);
3009 }
3010
rmw_vsie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3011 static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
3012 uint64_t *ret_val,
3013 uint64_t new_val, uint64_t wr_mask)
3014 {
3015 uint64_t alias_mask = (LOCAL_INTERRUPTS | VS_MODE_INTERRUPTS) &
3016 env->hideleg;
3017 uint64_t nalias_mask = LOCAL_INTERRUPTS & (~env->hideleg & env->hvien);
3018 uint64_t rval, rval_vs, vsbits;
3019 uint64_t wr_mask_vsie;
3020 uint64_t wr_mask_mie;
3021 RISCVException ret;
3022
3023 /* Bring VS-level bits to correct position */
3024 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
3025 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
3026 new_val |= vsbits << 1;
3027
3028 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
3029 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
3030 wr_mask |= vsbits << 1;
3031
3032 wr_mask_mie = wr_mask & alias_mask;
3033 wr_mask_vsie = wr_mask & nalias_mask;
3034
3035 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask_mie);
3036
3037 rval_vs = env->vsie & nalias_mask;
3038 env->vsie = (env->vsie & ~wr_mask_vsie) | (new_val & wr_mask_vsie);
3039
3040 if (ret_val) {
3041 rval &= alias_mask;
3042 vsbits = rval & VS_MODE_INTERRUPTS;
3043 rval &= ~VS_MODE_INTERRUPTS;
3044 *ret_val = rval | (vsbits >> 1) | rval_vs;
3045 }
3046
3047 return ret;
3048 }
3049
rmw_vsie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3050 static RISCVException rmw_vsie(CPURISCVState *env, int csrno,
3051 target_ulong *ret_val,
3052 target_ulong new_val, target_ulong wr_mask)
3053 {
3054 uint64_t rval;
3055 RISCVException ret;
3056
3057 ret = rmw_vsie64(env, csrno, &rval, new_val, wr_mask);
3058 if (ret_val) {
3059 *ret_val = rval;
3060 }
3061
3062 return ret;
3063 }
3064
rmw_vsieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3065 static RISCVException rmw_vsieh(CPURISCVState *env, int csrno,
3066 target_ulong *ret_val,
3067 target_ulong new_val, target_ulong wr_mask)
3068 {
3069 uint64_t rval;
3070 RISCVException ret;
3071
3072 ret = rmw_vsie64(env, csrno, &rval,
3073 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3074 if (ret_val) {
3075 *ret_val = rval >> 32;
3076 }
3077
3078 return ret;
3079 }
3080
rmw_sie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3081 static RISCVException rmw_sie64(CPURISCVState *env, int csrno,
3082 uint64_t *ret_val,
3083 uint64_t new_val, uint64_t wr_mask)
3084 {
3085 uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
3086 (~env->mideleg & env->mvien);
3087 uint64_t alias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & env->mideleg;
3088 uint64_t sie_mask = wr_mask & nalias_mask;
3089 RISCVException ret;
3090
3091 /*
3092 * mideleg[i] mvien[i]
3093 * 0 0 sie[i] read-only zero.
3094 * 0 1 sie[i] is a separate writable bit.
3095 * 1 X sie[i] alias of mie[i].
3096 *
3097 * Both alias and non-alias mask remain same for sip except for bits
3098 * which are zero in both mideleg and mvien.
3099 */
3100 if (env->virt_enabled) {
3101 if (env->hvictl & HVICTL_VTI) {
3102 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
3103 }
3104 ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask);
3105 if (ret_val) {
3106 *ret_val &= alias_mask;
3107 }
3108 } else {
3109 ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & alias_mask);
3110 if (ret_val) {
3111 *ret_val &= alias_mask;
3112 *ret_val |= env->sie & nalias_mask;
3113 }
3114
3115 env->sie = (env->sie & ~sie_mask) | (new_val & sie_mask);
3116 }
3117
3118 return ret;
3119 }
3120
rmw_sie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3121 static RISCVException rmw_sie(CPURISCVState *env, int csrno,
3122 target_ulong *ret_val,
3123 target_ulong new_val, target_ulong wr_mask)
3124 {
3125 uint64_t rval;
3126 RISCVException ret;
3127
3128 ret = rmw_sie64(env, csrno, &rval, new_val, wr_mask);
3129 if (ret == RISCV_EXCP_NONE && ret_val) {
3130 *ret_val = rval;
3131 }
3132
3133 return ret;
3134 }
3135
rmw_sieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3136 static RISCVException rmw_sieh(CPURISCVState *env, int csrno,
3137 target_ulong *ret_val,
3138 target_ulong new_val, target_ulong wr_mask)
3139 {
3140 uint64_t rval;
3141 RISCVException ret;
3142
3143 ret = rmw_sie64(env, csrno, &rval,
3144 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3145 if (ret_val) {
3146 *ret_val = rval >> 32;
3147 }
3148
3149 return ret;
3150 }
3151
read_stvec(CPURISCVState * env,int csrno,target_ulong * val)3152 static RISCVException read_stvec(CPURISCVState *env, int csrno,
3153 target_ulong *val)
3154 {
3155 *val = env->stvec;
3156 return RISCV_EXCP_NONE;
3157 }
3158
write_stvec(CPURISCVState * env,int csrno,target_ulong val)3159 static RISCVException write_stvec(CPURISCVState *env, int csrno,
3160 target_ulong val)
3161 {
3162 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
3163 if ((val & 3) < 2) {
3164 env->stvec = val;
3165 } else {
3166 qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n");
3167 }
3168 return RISCV_EXCP_NONE;
3169 }
3170
read_scounteren(CPURISCVState * env,int csrno,target_ulong * val)3171 static RISCVException read_scounteren(CPURISCVState *env, int csrno,
3172 target_ulong *val)
3173 {
3174 *val = env->scounteren;
3175 return RISCV_EXCP_NONE;
3176 }
3177
write_scounteren(CPURISCVState * env,int csrno,target_ulong val)3178 static RISCVException write_scounteren(CPURISCVState *env, int csrno,
3179 target_ulong val)
3180 {
3181 RISCVCPU *cpu = env_archcpu(env);
3182
3183 /* WARL register - disable unavailable counters */
3184 env->scounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
3185 COUNTEREN_IR);
3186 return RISCV_EXCP_NONE;
3187 }
3188
3189 /* Supervisor Trap Handling */
read_sscratch_i128(CPURISCVState * env,int csrno,Int128 * val)3190 static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno,
3191 Int128 *val)
3192 {
3193 *val = int128_make128(env->sscratch, env->sscratchh);
3194 return RISCV_EXCP_NONE;
3195 }
3196
write_sscratch_i128(CPURISCVState * env,int csrno,Int128 val)3197 static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno,
3198 Int128 val)
3199 {
3200 env->sscratch = int128_getlo(val);
3201 env->sscratchh = int128_gethi(val);
3202 return RISCV_EXCP_NONE;
3203 }
3204
read_sscratch(CPURISCVState * env,int csrno,target_ulong * val)3205 static RISCVException read_sscratch(CPURISCVState *env, int csrno,
3206 target_ulong *val)
3207 {
3208 *val = env->sscratch;
3209 return RISCV_EXCP_NONE;
3210 }
3211
write_sscratch(CPURISCVState * env,int csrno,target_ulong val)3212 static RISCVException write_sscratch(CPURISCVState *env, int csrno,
3213 target_ulong val)
3214 {
3215 env->sscratch = val;
3216 return RISCV_EXCP_NONE;
3217 }
3218
read_sepc(CPURISCVState * env,int csrno,target_ulong * val)3219 static RISCVException read_sepc(CPURISCVState *env, int csrno,
3220 target_ulong *val)
3221 {
3222 *val = env->sepc;
3223 return RISCV_EXCP_NONE;
3224 }
3225
write_sepc(CPURISCVState * env,int csrno,target_ulong val)3226 static RISCVException write_sepc(CPURISCVState *env, int csrno,
3227 target_ulong val)
3228 {
3229 env->sepc = val;
3230 return RISCV_EXCP_NONE;
3231 }
3232
read_scause(CPURISCVState * env,int csrno,target_ulong * val)3233 static RISCVException read_scause(CPURISCVState *env, int csrno,
3234 target_ulong *val)
3235 {
3236 *val = env->scause;
3237 return RISCV_EXCP_NONE;
3238 }
3239
write_scause(CPURISCVState * env,int csrno,target_ulong val)3240 static RISCVException write_scause(CPURISCVState *env, int csrno,
3241 target_ulong val)
3242 {
3243 env->scause = val;
3244 return RISCV_EXCP_NONE;
3245 }
3246
read_stval(CPURISCVState * env,int csrno,target_ulong * val)3247 static RISCVException read_stval(CPURISCVState *env, int csrno,
3248 target_ulong *val)
3249 {
3250 *val = env->stval;
3251 return RISCV_EXCP_NONE;
3252 }
3253
write_stval(CPURISCVState * env,int csrno,target_ulong val)3254 static RISCVException write_stval(CPURISCVState *env, int csrno,
3255 target_ulong val)
3256 {
3257 env->stval = val;
3258 return RISCV_EXCP_NONE;
3259 }
3260
3261 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
3262 uint64_t *ret_val,
3263 uint64_t new_val, uint64_t wr_mask);
3264
rmw_vsip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3265 static RISCVException rmw_vsip64(CPURISCVState *env, int csrno,
3266 uint64_t *ret_val,
3267 uint64_t new_val, uint64_t wr_mask)
3268 {
3269 RISCVException ret;
3270 uint64_t rval, mask = env->hideleg & VS_MODE_INTERRUPTS;
3271 uint64_t vsbits;
3272
3273 /* Add virtualized bits into vsip mask. */
3274 mask |= env->hvien & ~env->hideleg;
3275
3276 /* Bring VS-level bits to correct position */
3277 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
3278 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
3279 new_val |= vsbits << 1;
3280 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
3281 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
3282 wr_mask |= vsbits << 1;
3283
3284 ret = rmw_hvip64(env, csrno, &rval, new_val,
3285 wr_mask & mask & vsip_writable_mask);
3286 if (ret_val) {
3287 rval &= mask;
3288 vsbits = rval & VS_MODE_INTERRUPTS;
3289 rval &= ~VS_MODE_INTERRUPTS;
3290 *ret_val = rval | (vsbits >> 1);
3291 }
3292
3293 return ret;
3294 }
3295
rmw_vsip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3296 static RISCVException rmw_vsip(CPURISCVState *env, int csrno,
3297 target_ulong *ret_val,
3298 target_ulong new_val, target_ulong wr_mask)
3299 {
3300 uint64_t rval;
3301 RISCVException ret;
3302
3303 ret = rmw_vsip64(env, csrno, &rval, new_val, wr_mask);
3304 if (ret_val) {
3305 *ret_val = rval;
3306 }
3307
3308 return ret;
3309 }
3310
rmw_vsiph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3311 static RISCVException rmw_vsiph(CPURISCVState *env, int csrno,
3312 target_ulong *ret_val,
3313 target_ulong new_val, target_ulong wr_mask)
3314 {
3315 uint64_t rval;
3316 RISCVException ret;
3317
3318 ret = rmw_vsip64(env, csrno, &rval,
3319 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3320 if (ret_val) {
3321 *ret_val = rval >> 32;
3322 }
3323
3324 return ret;
3325 }
3326
rmw_sip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3327 static RISCVException rmw_sip64(CPURISCVState *env, int csrno,
3328 uint64_t *ret_val,
3329 uint64_t new_val, uint64_t wr_mask)
3330 {
3331 RISCVException ret;
3332 uint64_t mask = (env->mideleg | env->mvien) & sip_writable_mask;
3333
3334 if (env->virt_enabled) {
3335 if (env->hvictl & HVICTL_VTI) {
3336 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
3337 }
3338 ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask);
3339 } else {
3340 ret = rmw_mvip64(env, csrno, ret_val, new_val, wr_mask & mask);
3341 }
3342
3343 if (ret_val) {
3344 *ret_val &= (env->mideleg | env->mvien) &
3345 (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS);
3346 }
3347
3348 return ret;
3349 }
3350
rmw_sip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3351 static RISCVException rmw_sip(CPURISCVState *env, int csrno,
3352 target_ulong *ret_val,
3353 target_ulong new_val, target_ulong wr_mask)
3354 {
3355 uint64_t rval;
3356 RISCVException ret;
3357
3358 ret = rmw_sip64(env, csrno, &rval, new_val, wr_mask);
3359 if (ret_val) {
3360 *ret_val = rval;
3361 }
3362
3363 return ret;
3364 }
3365
rmw_siph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3366 static RISCVException rmw_siph(CPURISCVState *env, int csrno,
3367 target_ulong *ret_val,
3368 target_ulong new_val, target_ulong wr_mask)
3369 {
3370 uint64_t rval;
3371 RISCVException ret;
3372
3373 ret = rmw_sip64(env, csrno, &rval,
3374 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3375 if (ret_val) {
3376 *ret_val = rval >> 32;
3377 }
3378
3379 return ret;
3380 }
3381
3382 /* Supervisor Protection and Translation */
read_satp(CPURISCVState * env,int csrno,target_ulong * val)3383 static RISCVException read_satp(CPURISCVState *env, int csrno,
3384 target_ulong *val)
3385 {
3386 if (!riscv_cpu_cfg(env)->mmu) {
3387 *val = 0;
3388 return RISCV_EXCP_NONE;
3389 }
3390 *val = env->satp;
3391 return RISCV_EXCP_NONE;
3392 }
3393
write_satp(CPURISCVState * env,int csrno,target_ulong val)3394 static RISCVException write_satp(CPURISCVState *env, int csrno,
3395 target_ulong val)
3396 {
3397 if (!riscv_cpu_cfg(env)->mmu) {
3398 return RISCV_EXCP_NONE;
3399 }
3400
3401 env->satp = legalize_xatp(env, env->satp, val);
3402 return RISCV_EXCP_NONE;
3403 }
3404
read_vstopi(CPURISCVState * env,int csrno,target_ulong * val)3405 static RISCVException read_vstopi(CPURISCVState *env, int csrno,
3406 target_ulong *val)
3407 {
3408 int irq, ret;
3409 target_ulong topei;
3410 uint64_t vseip, vsgein;
3411 uint32_t iid, iprio, hviid, hviprio, gein;
3412 uint32_t s, scount = 0, siid[VSTOPI_NUM_SRCS], siprio[VSTOPI_NUM_SRCS];
3413
3414 gein = get_field(env->hstatus, HSTATUS_VGEIN);
3415 hviid = get_field(env->hvictl, HVICTL_IID);
3416 hviprio = get_field(env->hvictl, HVICTL_IPRIO);
3417
3418 if (gein) {
3419 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
3420 vseip = env->mie & (env->mip | vsgein) & MIP_VSEIP;
3421 if (gein <= env->geilen && vseip) {
3422 siid[scount] = IRQ_S_EXT;
3423 siprio[scount] = IPRIO_MMAXIPRIO + 1;
3424 if (env->aia_ireg_rmw_fn[PRV_S]) {
3425 /*
3426 * Call machine specific IMSIC register emulation for
3427 * reading TOPEI.
3428 */
3429 ret = env->aia_ireg_rmw_fn[PRV_S](
3430 env->aia_ireg_rmw_fn_arg[PRV_S],
3431 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, PRV_S, true, gein,
3432 riscv_cpu_mxl_bits(env)),
3433 &topei, 0, 0);
3434 if (!ret && topei) {
3435 siprio[scount] = topei & IMSIC_TOPEI_IPRIO_MASK;
3436 }
3437 }
3438 scount++;
3439 }
3440 } else {
3441 if (hviid == IRQ_S_EXT && hviprio) {
3442 siid[scount] = IRQ_S_EXT;
3443 siprio[scount] = hviprio;
3444 scount++;
3445 }
3446 }
3447
3448 if (env->hvictl & HVICTL_VTI) {
3449 if (hviid != IRQ_S_EXT) {
3450 siid[scount] = hviid;
3451 siprio[scount] = hviprio;
3452 scount++;
3453 }
3454 } else {
3455 irq = riscv_cpu_vsirq_pending(env);
3456 if (irq != IRQ_S_EXT && 0 < irq && irq <= 63) {
3457 siid[scount] = irq;
3458 siprio[scount] = env->hviprio[irq];
3459 scount++;
3460 }
3461 }
3462
3463 iid = 0;
3464 iprio = UINT_MAX;
3465 for (s = 0; s < scount; s++) {
3466 if (siprio[s] < iprio) {
3467 iid = siid[s];
3468 iprio = siprio[s];
3469 }
3470 }
3471
3472 if (iid) {
3473 if (env->hvictl & HVICTL_IPRIOM) {
3474 if (iprio > IPRIO_MMAXIPRIO) {
3475 iprio = IPRIO_MMAXIPRIO;
3476 }
3477 if (!iprio) {
3478 if (riscv_cpu_default_priority(iid) > IPRIO_DEFAULT_S) {
3479 iprio = IPRIO_MMAXIPRIO;
3480 }
3481 }
3482 } else {
3483 iprio = 1;
3484 }
3485 } else {
3486 iprio = 0;
3487 }
3488
3489 *val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT;
3490 *val |= iprio;
3491
3492 return RISCV_EXCP_NONE;
3493 }
3494
read_stopi(CPURISCVState * env,int csrno,target_ulong * val)3495 static RISCVException read_stopi(CPURISCVState *env, int csrno,
3496 target_ulong *val)
3497 {
3498 int irq;
3499 uint8_t iprio;
3500
3501 if (env->virt_enabled) {
3502 return read_vstopi(env, CSR_VSTOPI, val);
3503 }
3504
3505 irq = riscv_cpu_sirq_pending(env);
3506 if (irq <= 0 || irq > 63) {
3507 *val = 0;
3508 } else {
3509 iprio = env->siprio[irq];
3510 if (!iprio) {
3511 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_S) {
3512 iprio = IPRIO_MMAXIPRIO;
3513 }
3514 }
3515 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
3516 *val |= iprio;
3517 }
3518
3519 return RISCV_EXCP_NONE;
3520 }
3521
3522 /* Hypervisor Extensions */
read_hstatus(CPURISCVState * env,int csrno,target_ulong * val)3523 static RISCVException read_hstatus(CPURISCVState *env, int csrno,
3524 target_ulong *val)
3525 {
3526 *val = env->hstatus;
3527 if (riscv_cpu_mxl(env) != MXL_RV32) {
3528 /* We only support 64-bit VSXL */
3529 *val = set_field(*val, HSTATUS_VSXL, 2);
3530 }
3531 /* We only support little endian */
3532 *val = set_field(*val, HSTATUS_VSBE, 0);
3533 return RISCV_EXCP_NONE;
3534 }
3535
write_hstatus(CPURISCVState * env,int csrno,target_ulong val)3536 static RISCVException write_hstatus(CPURISCVState *env, int csrno,
3537 target_ulong val)
3538 {
3539 env->hstatus = val;
3540 if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) {
3541 qemu_log_mask(LOG_UNIMP,
3542 "QEMU does not support mixed HSXLEN options.");
3543 }
3544 if (get_field(val, HSTATUS_VSBE) != 0) {
3545 qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests.");
3546 }
3547 return RISCV_EXCP_NONE;
3548 }
3549
read_hedeleg(CPURISCVState * env,int csrno,target_ulong * val)3550 static RISCVException read_hedeleg(CPURISCVState *env, int csrno,
3551 target_ulong *val)
3552 {
3553 *val = env->hedeleg;
3554 return RISCV_EXCP_NONE;
3555 }
3556
write_hedeleg(CPURISCVState * env,int csrno,target_ulong val)3557 static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
3558 target_ulong val)
3559 {
3560 env->hedeleg = val & vs_delegable_excps;
3561 return RISCV_EXCP_NONE;
3562 }
3563
read_hedelegh(CPURISCVState * env,int csrno,target_ulong * val)3564 static RISCVException read_hedelegh(CPURISCVState *env, int csrno,
3565 target_ulong *val)
3566 {
3567 RISCVException ret;
3568 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13);
3569 if (ret != RISCV_EXCP_NONE) {
3570 return ret;
3571 }
3572
3573 /* Reserved, now read zero */
3574 *val = 0;
3575 return RISCV_EXCP_NONE;
3576 }
3577
write_hedelegh(CPURISCVState * env,int csrno,target_ulong val)3578 static RISCVException write_hedelegh(CPURISCVState *env, int csrno,
3579 target_ulong val)
3580 {
3581 RISCVException ret;
3582 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13);
3583 if (ret != RISCV_EXCP_NONE) {
3584 return ret;
3585 }
3586
3587 /* Reserved, now write ignore */
3588 return RISCV_EXCP_NONE;
3589 }
3590
rmw_hvien64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3591 static RISCVException rmw_hvien64(CPURISCVState *env, int csrno,
3592 uint64_t *ret_val,
3593 uint64_t new_val, uint64_t wr_mask)
3594 {
3595 uint64_t mask = wr_mask & hvien_writable_mask;
3596
3597 if (ret_val) {
3598 *ret_val = env->hvien;
3599 }
3600
3601 env->hvien = (env->hvien & ~mask) | (new_val & mask);
3602
3603 return RISCV_EXCP_NONE;
3604 }
3605
rmw_hvien(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3606 static RISCVException rmw_hvien(CPURISCVState *env, int csrno,
3607 target_ulong *ret_val,
3608 target_ulong new_val, target_ulong wr_mask)
3609 {
3610 uint64_t rval;
3611 RISCVException ret;
3612
3613 ret = rmw_hvien64(env, csrno, &rval, new_val, wr_mask);
3614 if (ret_val) {
3615 *ret_val = rval;
3616 }
3617
3618 return ret;
3619 }
3620
rmw_hvienh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3621 static RISCVException rmw_hvienh(CPURISCVState *env, int csrno,
3622 target_ulong *ret_val,
3623 target_ulong new_val, target_ulong wr_mask)
3624 {
3625 uint64_t rval;
3626 RISCVException ret;
3627
3628 ret = rmw_hvien64(env, csrno, &rval,
3629 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3630 if (ret_val) {
3631 *ret_val = rval >> 32;
3632 }
3633
3634 return ret;
3635 }
3636
rmw_hideleg64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3637 static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno,
3638 uint64_t *ret_val,
3639 uint64_t new_val, uint64_t wr_mask)
3640 {
3641 uint64_t mask = wr_mask & vs_delegable_ints;
3642
3643 if (ret_val) {
3644 *ret_val = env->hideleg & vs_delegable_ints;
3645 }
3646
3647 env->hideleg = (env->hideleg & ~mask) | (new_val & mask);
3648 return RISCV_EXCP_NONE;
3649 }
3650
rmw_hideleg(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3651 static RISCVException rmw_hideleg(CPURISCVState *env, int csrno,
3652 target_ulong *ret_val,
3653 target_ulong new_val, target_ulong wr_mask)
3654 {
3655 uint64_t rval;
3656 RISCVException ret;
3657
3658 ret = rmw_hideleg64(env, csrno, &rval, new_val, wr_mask);
3659 if (ret_val) {
3660 *ret_val = rval;
3661 }
3662
3663 return ret;
3664 }
3665
rmw_hidelegh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3666 static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno,
3667 target_ulong *ret_val,
3668 target_ulong new_val, target_ulong wr_mask)
3669 {
3670 uint64_t rval;
3671 RISCVException ret;
3672
3673 ret = rmw_hideleg64(env, csrno, &rval,
3674 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3675 if (ret_val) {
3676 *ret_val = rval >> 32;
3677 }
3678
3679 return ret;
3680 }
3681
3682 /*
3683 * The function is written for two use-cases:
3684 * 1- To access hvip csr as is for HS-mode access.
3685 * 2- To access vsip as a combination of hvip, and mip for vs-mode.
3686 *
3687 * Both report bits 2, 6, 10 and 13:63.
3688 * vsip needs to be read-only zero when both hideleg[i] and
3689 * hvien[i] are zero.
3690 */
rmw_hvip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3691 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
3692 uint64_t *ret_val,
3693 uint64_t new_val, uint64_t wr_mask)
3694 {
3695 RISCVException ret;
3696 uint64_t old_hvip;
3697 uint64_t ret_mip;
3698
3699 /*
3700 * For bits 10, 6 and 2, vsip[i] is an alias of hip[i]. These bits are
3701 * present in hip, hvip and mip. Where mip[i] is alias of hip[i] and hvip[i]
3702 * is OR'ed in hip[i] to inject virtual interrupts from hypervisor. These
3703 * bits are actually being maintained in mip so we read them from there.
3704 * This way we have a single source of truth and allows for easier
3705 * implementation.
3706 *
3707 * For bits 13:63 we have:
3708 *
3709 * hideleg[i] hvien[i]
3710 * 0 0 No delegation. vsip[i] readonly zero.
3711 * 0 1 vsip[i] is alias of hvip[i], sip bypassed.
3712 * 1 X vsip[i] is alias of sip[i], hvip bypassed.
3713 *
3714 * alias_mask denotes the bits that come from sip (mip here given we
3715 * maintain all bits there). nalias_mask denotes bits that come from
3716 * hvip.
3717 */
3718 uint64_t alias_mask = (env->hideleg | ~env->hvien) | VS_MODE_INTERRUPTS;
3719 uint64_t nalias_mask = (~env->hideleg & env->hvien);
3720 uint64_t wr_mask_hvip;
3721 uint64_t wr_mask_mip;
3722
3723 /*
3724 * Both alias and non-alias mask remain same for vsip except:
3725 * 1- For VS* bits if they are zero in hideleg.
3726 * 2- For 13:63 bits if they are zero in both hideleg and hvien.
3727 */
3728 if (csrno == CSR_VSIP) {
3729 /* zero-out VS* bits that are not delegated to VS mode. */
3730 alias_mask &= (env->hideleg | ~VS_MODE_INTERRUPTS);
3731
3732 /*
3733 * zero-out 13:63 bits that are zero in both hideleg and hvien.
3734 * nalias_mask mask can not contain any VS* bits so only second
3735 * condition applies on it.
3736 */
3737 nalias_mask &= (env->hideleg | env->hvien);
3738 alias_mask &= (env->hideleg | env->hvien);
3739 }
3740
3741 wr_mask_hvip = wr_mask & nalias_mask & hvip_writable_mask;
3742 wr_mask_mip = wr_mask & alias_mask & hvip_writable_mask;
3743
3744 /* Aliased bits, bits 10, 6, 2 need to come from mip. */
3745 ret = rmw_mip64(env, csrno, &ret_mip, new_val, wr_mask_mip);
3746 if (ret != RISCV_EXCP_NONE) {
3747 return ret;
3748 }
3749
3750 old_hvip = env->hvip;
3751
3752 if (wr_mask_hvip) {
3753 env->hvip = (env->hvip & ~wr_mask_hvip) | (new_val & wr_mask_hvip);
3754
3755 /*
3756 * Given hvip is separate source from mip, we need to trigger interrupt
3757 * from here separately. Normally this happen from riscv_cpu_update_mip.
3758 */
3759 riscv_cpu_interrupt(env);
3760 }
3761
3762 if (ret_val) {
3763 /* Only take VS* bits from mip. */
3764 ret_mip &= alias_mask;
3765
3766 /* Take in non-delegated 13:63 bits from hvip. */
3767 old_hvip &= nalias_mask;
3768
3769 *ret_val = ret_mip | old_hvip;
3770 }
3771
3772 return ret;
3773 }
3774
rmw_hvip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3775 static RISCVException rmw_hvip(CPURISCVState *env, int csrno,
3776 target_ulong *ret_val,
3777 target_ulong new_val, target_ulong wr_mask)
3778 {
3779 uint64_t rval;
3780 RISCVException ret;
3781
3782 ret = rmw_hvip64(env, csrno, &rval, new_val, wr_mask);
3783 if (ret_val) {
3784 *ret_val = rval;
3785 }
3786
3787 return ret;
3788 }
3789
rmw_hviph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3790 static RISCVException rmw_hviph(CPURISCVState *env, int csrno,
3791 target_ulong *ret_val,
3792 target_ulong new_val, target_ulong wr_mask)
3793 {
3794 uint64_t rval;
3795 RISCVException ret;
3796
3797 ret = rmw_hvip64(env, csrno, &rval,
3798 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3799 if (ret_val) {
3800 *ret_val = rval >> 32;
3801 }
3802
3803 return ret;
3804 }
3805
rmw_hip(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)3806 static RISCVException rmw_hip(CPURISCVState *env, int csrno,
3807 target_ulong *ret_value,
3808 target_ulong new_value, target_ulong write_mask)
3809 {
3810 int ret = rmw_mip(env, csrno, ret_value, new_value,
3811 write_mask & hip_writable_mask);
3812
3813 if (ret_value) {
3814 *ret_value &= HS_MODE_INTERRUPTS;
3815 }
3816 return ret;
3817 }
3818
rmw_hie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3819 static RISCVException rmw_hie(CPURISCVState *env, int csrno,
3820 target_ulong *ret_val,
3821 target_ulong new_val, target_ulong wr_mask)
3822 {
3823 uint64_t rval;
3824 RISCVException ret;
3825
3826 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & HS_MODE_INTERRUPTS);
3827 if (ret_val) {
3828 *ret_val = rval & HS_MODE_INTERRUPTS;
3829 }
3830
3831 return ret;
3832 }
3833
read_hcounteren(CPURISCVState * env,int csrno,target_ulong * val)3834 static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
3835 target_ulong *val)
3836 {
3837 *val = env->hcounteren;
3838 return RISCV_EXCP_NONE;
3839 }
3840
write_hcounteren(CPURISCVState * env,int csrno,target_ulong val)3841 static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
3842 target_ulong val)
3843 {
3844 RISCVCPU *cpu = env_archcpu(env);
3845
3846 /* WARL register - disable unavailable counters */
3847 env->hcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
3848 COUNTEREN_IR);
3849 return RISCV_EXCP_NONE;
3850 }
3851
read_hgeie(CPURISCVState * env,int csrno,target_ulong * val)3852 static RISCVException read_hgeie(CPURISCVState *env, int csrno,
3853 target_ulong *val)
3854 {
3855 if (val) {
3856 *val = env->hgeie;
3857 }
3858 return RISCV_EXCP_NONE;
3859 }
3860
write_hgeie(CPURISCVState * env,int csrno,target_ulong val)3861 static RISCVException write_hgeie(CPURISCVState *env, int csrno,
3862 target_ulong val)
3863 {
3864 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */
3865 val &= ((((target_ulong)1) << env->geilen) - 1) << 1;
3866 env->hgeie = val;
3867 /* Update mip.SGEIP bit */
3868 riscv_cpu_update_mip(env, MIP_SGEIP,
3869 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
3870 return RISCV_EXCP_NONE;
3871 }
3872
read_htval(CPURISCVState * env,int csrno,target_ulong * val)3873 static RISCVException read_htval(CPURISCVState *env, int csrno,
3874 target_ulong *val)
3875 {
3876 *val = env->htval;
3877 return RISCV_EXCP_NONE;
3878 }
3879
write_htval(CPURISCVState * env,int csrno,target_ulong val)3880 static RISCVException write_htval(CPURISCVState *env, int csrno,
3881 target_ulong val)
3882 {
3883 env->htval = val;
3884 return RISCV_EXCP_NONE;
3885 }
3886
read_htinst(CPURISCVState * env,int csrno,target_ulong * val)3887 static RISCVException read_htinst(CPURISCVState *env, int csrno,
3888 target_ulong *val)
3889 {
3890 *val = env->htinst;
3891 return RISCV_EXCP_NONE;
3892 }
3893
write_htinst(CPURISCVState * env,int csrno,target_ulong val)3894 static RISCVException write_htinst(CPURISCVState *env, int csrno,
3895 target_ulong val)
3896 {
3897 return RISCV_EXCP_NONE;
3898 }
3899
read_hgeip(CPURISCVState * env,int csrno,target_ulong * val)3900 static RISCVException read_hgeip(CPURISCVState *env, int csrno,
3901 target_ulong *val)
3902 {
3903 if (val) {
3904 *val = env->hgeip;
3905 }
3906 return RISCV_EXCP_NONE;
3907 }
3908
read_hgatp(CPURISCVState * env,int csrno,target_ulong * val)3909 static RISCVException read_hgatp(CPURISCVState *env, int csrno,
3910 target_ulong *val)
3911 {
3912 *val = env->hgatp;
3913 return RISCV_EXCP_NONE;
3914 }
3915
write_hgatp(CPURISCVState * env,int csrno,target_ulong val)3916 static RISCVException write_hgatp(CPURISCVState *env, int csrno,
3917 target_ulong val)
3918 {
3919 env->hgatp = legalize_xatp(env, env->hgatp, val);
3920 return RISCV_EXCP_NONE;
3921 }
3922
read_htimedelta(CPURISCVState * env,int csrno,target_ulong * val)3923 static RISCVException read_htimedelta(CPURISCVState *env, int csrno,
3924 target_ulong *val)
3925 {
3926 if (!env->rdtime_fn) {
3927 return RISCV_EXCP_ILLEGAL_INST;
3928 }
3929
3930 *val = env->htimedelta;
3931 return RISCV_EXCP_NONE;
3932 }
3933
write_htimedelta(CPURISCVState * env,int csrno,target_ulong val)3934 static RISCVException write_htimedelta(CPURISCVState *env, int csrno,
3935 target_ulong val)
3936 {
3937 if (!env->rdtime_fn) {
3938 return RISCV_EXCP_ILLEGAL_INST;
3939 }
3940
3941 if (riscv_cpu_mxl(env) == MXL_RV32) {
3942 env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val);
3943 } else {
3944 env->htimedelta = val;
3945 }
3946
3947 if (riscv_cpu_cfg(env)->ext_sstc && env->rdtime_fn) {
3948 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
3949 env->htimedelta, MIP_VSTIP);
3950 }
3951
3952 return RISCV_EXCP_NONE;
3953 }
3954
read_htimedeltah(CPURISCVState * env,int csrno,target_ulong * val)3955 static RISCVException read_htimedeltah(CPURISCVState *env, int csrno,
3956 target_ulong *val)
3957 {
3958 if (!env->rdtime_fn) {
3959 return RISCV_EXCP_ILLEGAL_INST;
3960 }
3961
3962 *val = env->htimedelta >> 32;
3963 return RISCV_EXCP_NONE;
3964 }
3965
write_htimedeltah(CPURISCVState * env,int csrno,target_ulong val)3966 static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
3967 target_ulong val)
3968 {
3969 if (!env->rdtime_fn) {
3970 return RISCV_EXCP_ILLEGAL_INST;
3971 }
3972
3973 env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val);
3974
3975 if (riscv_cpu_cfg(env)->ext_sstc && env->rdtime_fn) {
3976 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
3977 env->htimedelta, MIP_VSTIP);
3978 }
3979
3980 return RISCV_EXCP_NONE;
3981 }
3982
read_hvictl(CPURISCVState * env,int csrno,target_ulong * val)3983 static RISCVException read_hvictl(CPURISCVState *env, int csrno,
3984 target_ulong *val)
3985 {
3986 *val = env->hvictl;
3987 return RISCV_EXCP_NONE;
3988 }
3989
write_hvictl(CPURISCVState * env,int csrno,target_ulong val)3990 static RISCVException write_hvictl(CPURISCVState *env, int csrno,
3991 target_ulong val)
3992 {
3993 env->hvictl = val & HVICTL_VALID_MASK;
3994 return RISCV_EXCP_NONE;
3995 }
3996
read_hvipriox(CPURISCVState * env,int first_index,uint8_t * iprio,target_ulong * val)3997 static RISCVException read_hvipriox(CPURISCVState *env, int first_index,
3998 uint8_t *iprio, target_ulong *val)
3999 {
4000 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
4001
4002 /* First index has to be a multiple of number of irqs per register */
4003 if (first_index % num_irqs) {
4004 return (env->virt_enabled) ?
4005 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
4006 }
4007
4008 /* Fill-up return value */
4009 *val = 0;
4010 for (i = 0; i < num_irqs; i++) {
4011 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
4012 continue;
4013 }
4014 if (rdzero) {
4015 continue;
4016 }
4017 *val |= ((target_ulong)iprio[irq]) << (i * 8);
4018 }
4019
4020 return RISCV_EXCP_NONE;
4021 }
4022
write_hvipriox(CPURISCVState * env,int first_index,uint8_t * iprio,target_ulong val)4023 static RISCVException write_hvipriox(CPURISCVState *env, int first_index,
4024 uint8_t *iprio, target_ulong val)
4025 {
4026 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
4027
4028 /* First index has to be a multiple of number of irqs per register */
4029 if (first_index % num_irqs) {
4030 return (env->virt_enabled) ?
4031 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
4032 }
4033
4034 /* Fill-up priority array */
4035 for (i = 0; i < num_irqs; i++) {
4036 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
4037 continue;
4038 }
4039 if (rdzero) {
4040 iprio[irq] = 0;
4041 } else {
4042 iprio[irq] = (val >> (i * 8)) & 0xff;
4043 }
4044 }
4045
4046 return RISCV_EXCP_NONE;
4047 }
4048
read_hviprio1(CPURISCVState * env,int csrno,target_ulong * val)4049 static RISCVException read_hviprio1(CPURISCVState *env, int csrno,
4050 target_ulong *val)
4051 {
4052 return read_hvipriox(env, 0, env->hviprio, val);
4053 }
4054
write_hviprio1(CPURISCVState * env,int csrno,target_ulong val)4055 static RISCVException write_hviprio1(CPURISCVState *env, int csrno,
4056 target_ulong val)
4057 {
4058 return write_hvipriox(env, 0, env->hviprio, val);
4059 }
4060
read_hviprio1h(CPURISCVState * env,int csrno,target_ulong * val)4061 static RISCVException read_hviprio1h(CPURISCVState *env, int csrno,
4062 target_ulong *val)
4063 {
4064 return read_hvipriox(env, 4, env->hviprio, val);
4065 }
4066
write_hviprio1h(CPURISCVState * env,int csrno,target_ulong val)4067 static RISCVException write_hviprio1h(CPURISCVState *env, int csrno,
4068 target_ulong val)
4069 {
4070 return write_hvipriox(env, 4, env->hviprio, val);
4071 }
4072
read_hviprio2(CPURISCVState * env,int csrno,target_ulong * val)4073 static RISCVException read_hviprio2(CPURISCVState *env, int csrno,
4074 target_ulong *val)
4075 {
4076 return read_hvipriox(env, 8, env->hviprio, val);
4077 }
4078
write_hviprio2(CPURISCVState * env,int csrno,target_ulong val)4079 static RISCVException write_hviprio2(CPURISCVState *env, int csrno,
4080 target_ulong val)
4081 {
4082 return write_hvipriox(env, 8, env->hviprio, val);
4083 }
4084
read_hviprio2h(CPURISCVState * env,int csrno,target_ulong * val)4085 static RISCVException read_hviprio2h(CPURISCVState *env, int csrno,
4086 target_ulong *val)
4087 {
4088 return read_hvipriox(env, 12, env->hviprio, val);
4089 }
4090
write_hviprio2h(CPURISCVState * env,int csrno,target_ulong val)4091 static RISCVException write_hviprio2h(CPURISCVState *env, int csrno,
4092 target_ulong val)
4093 {
4094 return write_hvipriox(env, 12, env->hviprio, val);
4095 }
4096
4097 /* Virtual CSR Registers */
read_vsstatus(CPURISCVState * env,int csrno,target_ulong * val)4098 static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
4099 target_ulong *val)
4100 {
4101 *val = env->vsstatus;
4102 return RISCV_EXCP_NONE;
4103 }
4104
write_vsstatus(CPURISCVState * env,int csrno,target_ulong val)4105 static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
4106 target_ulong val)
4107 {
4108 uint64_t mask = (target_ulong)-1;
4109 if ((val & VSSTATUS64_UXL) == 0) {
4110 mask &= ~VSSTATUS64_UXL;
4111 }
4112 env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
4113 return RISCV_EXCP_NONE;
4114 }
4115
read_vstvec(CPURISCVState * env,int csrno,target_ulong * val)4116 static RISCVException read_vstvec(CPURISCVState *env, int csrno,
4117 target_ulong *val)
4118 {
4119 *val = env->vstvec;
4120 return RISCV_EXCP_NONE;
4121 }
4122
write_vstvec(CPURISCVState * env,int csrno,target_ulong val)4123 static RISCVException write_vstvec(CPURISCVState *env, int csrno,
4124 target_ulong val)
4125 {
4126 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
4127 if ((val & 3) < 2) {
4128 env->vstvec = val;
4129 } else {
4130 qemu_log_mask(LOG_UNIMP, "CSR_VSTVEC: reserved mode not supported\n");
4131 }
4132 return RISCV_EXCP_NONE;
4133 }
4134
read_vsscratch(CPURISCVState * env,int csrno,target_ulong * val)4135 static RISCVException read_vsscratch(CPURISCVState *env, int csrno,
4136 target_ulong *val)
4137 {
4138 *val = env->vsscratch;
4139 return RISCV_EXCP_NONE;
4140 }
4141
write_vsscratch(CPURISCVState * env,int csrno,target_ulong val)4142 static RISCVException write_vsscratch(CPURISCVState *env, int csrno,
4143 target_ulong val)
4144 {
4145 env->vsscratch = val;
4146 return RISCV_EXCP_NONE;
4147 }
4148
read_vsepc(CPURISCVState * env,int csrno,target_ulong * val)4149 static RISCVException read_vsepc(CPURISCVState *env, int csrno,
4150 target_ulong *val)
4151 {
4152 *val = env->vsepc;
4153 return RISCV_EXCP_NONE;
4154 }
4155
write_vsepc(CPURISCVState * env,int csrno,target_ulong val)4156 static RISCVException write_vsepc(CPURISCVState *env, int csrno,
4157 target_ulong val)
4158 {
4159 env->vsepc = val;
4160 return RISCV_EXCP_NONE;
4161 }
4162
read_vscause(CPURISCVState * env,int csrno,target_ulong * val)4163 static RISCVException read_vscause(CPURISCVState *env, int csrno,
4164 target_ulong *val)
4165 {
4166 *val = env->vscause;
4167 return RISCV_EXCP_NONE;
4168 }
4169
write_vscause(CPURISCVState * env,int csrno,target_ulong val)4170 static RISCVException write_vscause(CPURISCVState *env, int csrno,
4171 target_ulong val)
4172 {
4173 env->vscause = val;
4174 return RISCV_EXCP_NONE;
4175 }
4176
read_vstval(CPURISCVState * env,int csrno,target_ulong * val)4177 static RISCVException read_vstval(CPURISCVState *env, int csrno,
4178 target_ulong *val)
4179 {
4180 *val = env->vstval;
4181 return RISCV_EXCP_NONE;
4182 }
4183
write_vstval(CPURISCVState * env,int csrno,target_ulong val)4184 static RISCVException write_vstval(CPURISCVState *env, int csrno,
4185 target_ulong val)
4186 {
4187 env->vstval = val;
4188 return RISCV_EXCP_NONE;
4189 }
4190
read_vsatp(CPURISCVState * env,int csrno,target_ulong * val)4191 static RISCVException read_vsatp(CPURISCVState *env, int csrno,
4192 target_ulong *val)
4193 {
4194 *val = env->vsatp;
4195 return RISCV_EXCP_NONE;
4196 }
4197
write_vsatp(CPURISCVState * env,int csrno,target_ulong val)4198 static RISCVException write_vsatp(CPURISCVState *env, int csrno,
4199 target_ulong val)
4200 {
4201 env->vsatp = legalize_xatp(env, env->vsatp, val);
4202 return RISCV_EXCP_NONE;
4203 }
4204
read_mtval2(CPURISCVState * env,int csrno,target_ulong * val)4205 static RISCVException read_mtval2(CPURISCVState *env, int csrno,
4206 target_ulong *val)
4207 {
4208 *val = env->mtval2;
4209 return RISCV_EXCP_NONE;
4210 }
4211
write_mtval2(CPURISCVState * env,int csrno,target_ulong val)4212 static RISCVException write_mtval2(CPURISCVState *env, int csrno,
4213 target_ulong val)
4214 {
4215 env->mtval2 = val;
4216 return RISCV_EXCP_NONE;
4217 }
4218
read_mtinst(CPURISCVState * env,int csrno,target_ulong * val)4219 static RISCVException read_mtinst(CPURISCVState *env, int csrno,
4220 target_ulong *val)
4221 {
4222 *val = env->mtinst;
4223 return RISCV_EXCP_NONE;
4224 }
4225
write_mtinst(CPURISCVState * env,int csrno,target_ulong val)4226 static RISCVException write_mtinst(CPURISCVState *env, int csrno,
4227 target_ulong val)
4228 {
4229 env->mtinst = val;
4230 return RISCV_EXCP_NONE;
4231 }
4232
4233 /* Physical Memory Protection */
read_mseccfg(CPURISCVState * env,int csrno,target_ulong * val)4234 static RISCVException read_mseccfg(CPURISCVState *env, int csrno,
4235 target_ulong *val)
4236 {
4237 *val = mseccfg_csr_read(env);
4238 return RISCV_EXCP_NONE;
4239 }
4240
write_mseccfg(CPURISCVState * env,int csrno,target_ulong val)4241 static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
4242 target_ulong val)
4243 {
4244 mseccfg_csr_write(env, val);
4245 return RISCV_EXCP_NONE;
4246 }
4247
read_pmpcfg(CPURISCVState * env,int csrno,target_ulong * val)4248 static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
4249 target_ulong *val)
4250 {
4251 uint32_t reg_index = csrno - CSR_PMPCFG0;
4252
4253 *val = pmpcfg_csr_read(env, reg_index);
4254 return RISCV_EXCP_NONE;
4255 }
4256
write_pmpcfg(CPURISCVState * env,int csrno,target_ulong val)4257 static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
4258 target_ulong val)
4259 {
4260 uint32_t reg_index = csrno - CSR_PMPCFG0;
4261
4262 pmpcfg_csr_write(env, reg_index, val);
4263 return RISCV_EXCP_NONE;
4264 }
4265
read_pmpaddr(CPURISCVState * env,int csrno,target_ulong * val)4266 static RISCVException read_pmpaddr(CPURISCVState *env, int csrno,
4267 target_ulong *val)
4268 {
4269 *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0);
4270 return RISCV_EXCP_NONE;
4271 }
4272
write_pmpaddr(CPURISCVState * env,int csrno,target_ulong val)4273 static RISCVException write_pmpaddr(CPURISCVState *env, int csrno,
4274 target_ulong val)
4275 {
4276 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val);
4277 return RISCV_EXCP_NONE;
4278 }
4279
read_tselect(CPURISCVState * env,int csrno,target_ulong * val)4280 static RISCVException read_tselect(CPURISCVState *env, int csrno,
4281 target_ulong *val)
4282 {
4283 *val = tselect_csr_read(env);
4284 return RISCV_EXCP_NONE;
4285 }
4286
write_tselect(CPURISCVState * env,int csrno,target_ulong val)4287 static RISCVException write_tselect(CPURISCVState *env, int csrno,
4288 target_ulong val)
4289 {
4290 tselect_csr_write(env, val);
4291 return RISCV_EXCP_NONE;
4292 }
4293
read_tdata(CPURISCVState * env,int csrno,target_ulong * val)4294 static RISCVException read_tdata(CPURISCVState *env, int csrno,
4295 target_ulong *val)
4296 {
4297 /* return 0 in tdata1 to end the trigger enumeration */
4298 if (env->trigger_cur >= RV_MAX_TRIGGERS && csrno == CSR_TDATA1) {
4299 *val = 0;
4300 return RISCV_EXCP_NONE;
4301 }
4302
4303 if (!tdata_available(env, csrno - CSR_TDATA1)) {
4304 return RISCV_EXCP_ILLEGAL_INST;
4305 }
4306
4307 *val = tdata_csr_read(env, csrno - CSR_TDATA1);
4308 return RISCV_EXCP_NONE;
4309 }
4310
write_tdata(CPURISCVState * env,int csrno,target_ulong val)4311 static RISCVException write_tdata(CPURISCVState *env, int csrno,
4312 target_ulong val)
4313 {
4314 if (!tdata_available(env, csrno - CSR_TDATA1)) {
4315 return RISCV_EXCP_ILLEGAL_INST;
4316 }
4317
4318 tdata_csr_write(env, csrno - CSR_TDATA1, val);
4319 return RISCV_EXCP_NONE;
4320 }
4321
read_tinfo(CPURISCVState * env,int csrno,target_ulong * val)4322 static RISCVException read_tinfo(CPURISCVState *env, int csrno,
4323 target_ulong *val)
4324 {
4325 *val = tinfo_csr_read(env);
4326 return RISCV_EXCP_NONE;
4327 }
4328
read_mcontext(CPURISCVState * env,int csrno,target_ulong * val)4329 static RISCVException read_mcontext(CPURISCVState *env, int csrno,
4330 target_ulong *val)
4331 {
4332 *val = env->mcontext;
4333 return RISCV_EXCP_NONE;
4334 }
4335
write_mcontext(CPURISCVState * env,int csrno,target_ulong val)4336 static RISCVException write_mcontext(CPURISCVState *env, int csrno,
4337 target_ulong val)
4338 {
4339 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
4340 int32_t mask;
4341
4342 if (riscv_has_ext(env, RVH)) {
4343 /* Spec suggest 7-bit for RV32 and 14-bit for RV64 w/ H extension */
4344 mask = rv32 ? MCONTEXT32_HCONTEXT : MCONTEXT64_HCONTEXT;
4345 } else {
4346 /* Spec suggest 6-bit for RV32 and 13-bit for RV64 w/o H extension */
4347 mask = rv32 ? MCONTEXT32 : MCONTEXT64;
4348 }
4349
4350 env->mcontext = val & mask;
4351 return RISCV_EXCP_NONE;
4352 }
4353
4354 /*
4355 * Functions to access Pointer Masking feature registers
4356 * We have to check if current priv lvl could modify
4357 * csr in given mode
4358 */
check_pm_current_disabled(CPURISCVState * env,int csrno)4359 static bool check_pm_current_disabled(CPURISCVState *env, int csrno)
4360 {
4361 int csr_priv = get_field(csrno, 0x300);
4362 int pm_current;
4363
4364 if (env->debugger) {
4365 return false;
4366 }
4367 /*
4368 * If priv lvls differ that means we're accessing csr from higher priv lvl,
4369 * so allow the access
4370 */
4371 if (env->priv != csr_priv) {
4372 return false;
4373 }
4374 switch (env->priv) {
4375 case PRV_M:
4376 pm_current = get_field(env->mmte, M_PM_CURRENT);
4377 break;
4378 case PRV_S:
4379 pm_current = get_field(env->mmte, S_PM_CURRENT);
4380 break;
4381 case PRV_U:
4382 pm_current = get_field(env->mmte, U_PM_CURRENT);
4383 break;
4384 default:
4385 g_assert_not_reached();
4386 }
4387 /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */
4388 return !pm_current;
4389 }
4390
read_mmte(CPURISCVState * env,int csrno,target_ulong * val)4391 static RISCVException read_mmte(CPURISCVState *env, int csrno,
4392 target_ulong *val)
4393 {
4394 *val = env->mmte & MMTE_MASK;
4395 return RISCV_EXCP_NONE;
4396 }
4397
write_mmte(CPURISCVState * env,int csrno,target_ulong val)4398 static RISCVException write_mmte(CPURISCVState *env, int csrno,
4399 target_ulong val)
4400 {
4401 uint64_t mstatus;
4402 target_ulong wpri_val = val & MMTE_MASK;
4403
4404 if (val != wpri_val) {
4405 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
4406 TARGET_FMT_lx "\n", "MMTE: WPRI violation written 0x",
4407 val, "vs expected 0x", wpri_val);
4408 }
4409 /* for machine mode pm.current is hardwired to 1 */
4410 wpri_val |= MMTE_M_PM_CURRENT;
4411
4412 /* hardwiring pm.instruction bit to 0, since it's not supported yet */
4413 wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN);
4414 env->mmte = wpri_val | EXT_STATUS_DIRTY;
4415 riscv_cpu_update_mask(env);
4416
4417 /* Set XS and SD bits, since PM CSRs are dirty */
4418 mstatus = env->mstatus | MSTATUS_XS;
4419 write_mstatus(env, csrno, mstatus);
4420 return RISCV_EXCP_NONE;
4421 }
4422
read_smte(CPURISCVState * env,int csrno,target_ulong * val)4423 static RISCVException read_smte(CPURISCVState *env, int csrno,
4424 target_ulong *val)
4425 {
4426 *val = env->mmte & SMTE_MASK;
4427 return RISCV_EXCP_NONE;
4428 }
4429
write_smte(CPURISCVState * env,int csrno,target_ulong val)4430 static RISCVException write_smte(CPURISCVState *env, int csrno,
4431 target_ulong val)
4432 {
4433 target_ulong wpri_val = val & SMTE_MASK;
4434
4435 if (val != wpri_val) {
4436 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
4437 TARGET_FMT_lx "\n", "SMTE: WPRI violation written 0x",
4438 val, "vs expected 0x", wpri_val);
4439 }
4440
4441 /* if pm.current==0 we can't modify current PM CSRs */
4442 if (check_pm_current_disabled(env, csrno)) {
4443 return RISCV_EXCP_NONE;
4444 }
4445
4446 wpri_val |= (env->mmte & ~SMTE_MASK);
4447 write_mmte(env, csrno, wpri_val);
4448 return RISCV_EXCP_NONE;
4449 }
4450
read_umte(CPURISCVState * env,int csrno,target_ulong * val)4451 static RISCVException read_umte(CPURISCVState *env, int csrno,
4452 target_ulong *val)
4453 {
4454 *val = env->mmte & UMTE_MASK;
4455 return RISCV_EXCP_NONE;
4456 }
4457
write_umte(CPURISCVState * env,int csrno,target_ulong val)4458 static RISCVException write_umte(CPURISCVState *env, int csrno,
4459 target_ulong val)
4460 {
4461 target_ulong wpri_val = val & UMTE_MASK;
4462
4463 if (val != wpri_val) {
4464 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
4465 TARGET_FMT_lx "\n", "UMTE: WPRI violation written 0x",
4466 val, "vs expected 0x", wpri_val);
4467 }
4468
4469 if (check_pm_current_disabled(env, csrno)) {
4470 return RISCV_EXCP_NONE;
4471 }
4472
4473 wpri_val |= (env->mmte & ~UMTE_MASK);
4474 write_mmte(env, csrno, wpri_val);
4475 return RISCV_EXCP_NONE;
4476 }
4477
read_mpmmask(CPURISCVState * env,int csrno,target_ulong * val)4478 static RISCVException read_mpmmask(CPURISCVState *env, int csrno,
4479 target_ulong *val)
4480 {
4481 *val = env->mpmmask;
4482 return RISCV_EXCP_NONE;
4483 }
4484
write_mpmmask(CPURISCVState * env,int csrno,target_ulong val)4485 static RISCVException write_mpmmask(CPURISCVState *env, int csrno,
4486 target_ulong val)
4487 {
4488 uint64_t mstatus;
4489
4490 env->mpmmask = val;
4491 if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) {
4492 env->cur_pmmask = val;
4493 }
4494 env->mmte |= EXT_STATUS_DIRTY;
4495
4496 /* Set XS and SD bits, since PM CSRs are dirty */
4497 mstatus = env->mstatus | MSTATUS_XS;
4498 write_mstatus(env, csrno, mstatus);
4499 return RISCV_EXCP_NONE;
4500 }
4501
read_spmmask(CPURISCVState * env,int csrno,target_ulong * val)4502 static RISCVException read_spmmask(CPURISCVState *env, int csrno,
4503 target_ulong *val)
4504 {
4505 *val = env->spmmask;
4506 return RISCV_EXCP_NONE;
4507 }
4508
write_spmmask(CPURISCVState * env,int csrno,target_ulong val)4509 static RISCVException write_spmmask(CPURISCVState *env, int csrno,
4510 target_ulong val)
4511 {
4512 uint64_t mstatus;
4513
4514 /* if pm.current==0 we can't modify current PM CSRs */
4515 if (check_pm_current_disabled(env, csrno)) {
4516 return RISCV_EXCP_NONE;
4517 }
4518 env->spmmask = val;
4519 if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) {
4520 env->cur_pmmask = val;
4521 if (cpu_get_xl(env, PRV_S) == MXL_RV32) {
4522 env->cur_pmmask &= UINT32_MAX;
4523 }
4524 }
4525 env->mmte |= EXT_STATUS_DIRTY;
4526
4527 /* Set XS and SD bits, since PM CSRs are dirty */
4528 mstatus = env->mstatus | MSTATUS_XS;
4529 write_mstatus(env, csrno, mstatus);
4530 return RISCV_EXCP_NONE;
4531 }
4532
read_upmmask(CPURISCVState * env,int csrno,target_ulong * val)4533 static RISCVException read_upmmask(CPURISCVState *env, int csrno,
4534 target_ulong *val)
4535 {
4536 *val = env->upmmask;
4537 return RISCV_EXCP_NONE;
4538 }
4539
write_upmmask(CPURISCVState * env,int csrno,target_ulong val)4540 static RISCVException write_upmmask(CPURISCVState *env, int csrno,
4541 target_ulong val)
4542 {
4543 uint64_t mstatus;
4544
4545 /* if pm.current==0 we can't modify current PM CSRs */
4546 if (check_pm_current_disabled(env, csrno)) {
4547 return RISCV_EXCP_NONE;
4548 }
4549 env->upmmask = val;
4550 if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) {
4551 env->cur_pmmask = val;
4552 if (cpu_get_xl(env, PRV_U) == MXL_RV32) {
4553 env->cur_pmmask &= UINT32_MAX;
4554 }
4555 }
4556 env->mmte |= EXT_STATUS_DIRTY;
4557
4558 /* Set XS and SD bits, since PM CSRs are dirty */
4559 mstatus = env->mstatus | MSTATUS_XS;
4560 write_mstatus(env, csrno, mstatus);
4561 return RISCV_EXCP_NONE;
4562 }
4563
read_mpmbase(CPURISCVState * env,int csrno,target_ulong * val)4564 static RISCVException read_mpmbase(CPURISCVState *env, int csrno,
4565 target_ulong *val)
4566 {
4567 *val = env->mpmbase;
4568 return RISCV_EXCP_NONE;
4569 }
4570
write_mpmbase(CPURISCVState * env,int csrno,target_ulong val)4571 static RISCVException write_mpmbase(CPURISCVState *env, int csrno,
4572 target_ulong val)
4573 {
4574 uint64_t mstatus;
4575
4576 env->mpmbase = val;
4577 if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) {
4578 env->cur_pmbase = val;
4579 }
4580 env->mmte |= EXT_STATUS_DIRTY;
4581
4582 /* Set XS and SD bits, since PM CSRs are dirty */
4583 mstatus = env->mstatus | MSTATUS_XS;
4584 write_mstatus(env, csrno, mstatus);
4585 return RISCV_EXCP_NONE;
4586 }
4587
read_spmbase(CPURISCVState * env,int csrno,target_ulong * val)4588 static RISCVException read_spmbase(CPURISCVState *env, int csrno,
4589 target_ulong *val)
4590 {
4591 *val = env->spmbase;
4592 return RISCV_EXCP_NONE;
4593 }
4594
write_spmbase(CPURISCVState * env,int csrno,target_ulong val)4595 static RISCVException write_spmbase(CPURISCVState *env, int csrno,
4596 target_ulong val)
4597 {
4598 uint64_t mstatus;
4599
4600 /* if pm.current==0 we can't modify current PM CSRs */
4601 if (check_pm_current_disabled(env, csrno)) {
4602 return RISCV_EXCP_NONE;
4603 }
4604 env->spmbase = val;
4605 if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) {
4606 env->cur_pmbase = val;
4607 if (cpu_get_xl(env, PRV_S) == MXL_RV32) {
4608 env->cur_pmbase &= UINT32_MAX;
4609 }
4610 }
4611 env->mmte |= EXT_STATUS_DIRTY;
4612
4613 /* Set XS and SD bits, since PM CSRs are dirty */
4614 mstatus = env->mstatus | MSTATUS_XS;
4615 write_mstatus(env, csrno, mstatus);
4616 return RISCV_EXCP_NONE;
4617 }
4618
read_upmbase(CPURISCVState * env,int csrno,target_ulong * val)4619 static RISCVException read_upmbase(CPURISCVState *env, int csrno,
4620 target_ulong *val)
4621 {
4622 *val = env->upmbase;
4623 return RISCV_EXCP_NONE;
4624 }
4625
write_upmbase(CPURISCVState * env,int csrno,target_ulong val)4626 static RISCVException write_upmbase(CPURISCVState *env, int csrno,
4627 target_ulong val)
4628 {
4629 uint64_t mstatus;
4630
4631 /* if pm.current==0 we can't modify current PM CSRs */
4632 if (check_pm_current_disabled(env, csrno)) {
4633 return RISCV_EXCP_NONE;
4634 }
4635 env->upmbase = val;
4636 if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) {
4637 env->cur_pmbase = val;
4638 if (cpu_get_xl(env, PRV_U) == MXL_RV32) {
4639 env->cur_pmbase &= UINT32_MAX;
4640 }
4641 }
4642 env->mmte |= EXT_STATUS_DIRTY;
4643
4644 /* Set XS and SD bits, since PM CSRs are dirty */
4645 mstatus = env->mstatus | MSTATUS_XS;
4646 write_mstatus(env, csrno, mstatus);
4647 return RISCV_EXCP_NONE;
4648 }
4649
4650 #endif
4651
4652 /* Crypto Extension */
riscv_new_csr_seed(target_ulong new_value,target_ulong write_mask)4653 target_ulong riscv_new_csr_seed(target_ulong new_value,
4654 target_ulong write_mask)
4655 {
4656 uint16_t random_v;
4657 Error *random_e = NULL;
4658 int random_r;
4659 target_ulong rval;
4660
4661 random_r = qemu_guest_getrandom(&random_v, 2, &random_e);
4662 if (unlikely(random_r < 0)) {
4663 /*
4664 * Failed, for unknown reasons in the crypto subsystem.
4665 * The best we can do is log the reason and return a
4666 * failure indication to the guest. There is no reason
4667 * we know to expect the failure to be transitory, so
4668 * indicate DEAD to avoid having the guest spin on WAIT.
4669 */
4670 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
4671 __func__, error_get_pretty(random_e));
4672 error_free(random_e);
4673 rval = SEED_OPST_DEAD;
4674 } else {
4675 rval = random_v | SEED_OPST_ES16;
4676 }
4677
4678 return rval;
4679 }
4680
rmw_seed(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)4681 static RISCVException rmw_seed(CPURISCVState *env, int csrno,
4682 target_ulong *ret_value,
4683 target_ulong new_value,
4684 target_ulong write_mask)
4685 {
4686 target_ulong rval;
4687
4688 rval = riscv_new_csr_seed(new_value, write_mask);
4689
4690 if (ret_value) {
4691 *ret_value = rval;
4692 }
4693
4694 return RISCV_EXCP_NONE;
4695 }
4696
4697 /*
4698 * riscv_csrrw - read and/or update control and status register
4699 *
4700 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0);
4701 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1);
4702 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value);
4703 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
4704 */
4705
riscv_csrrw_check(CPURISCVState * env,int csrno,bool write)4706 static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
4707 int csrno,
4708 bool write)
4709 {
4710 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
4711 bool read_only = get_field(csrno, 0xC00) == 3;
4712 int csr_min_priv = csr_ops[csrno].min_priv_ver;
4713
4714 /* ensure the CSR extension is enabled */
4715 if (!riscv_cpu_cfg(env)->ext_zicsr) {
4716 return RISCV_EXCP_ILLEGAL_INST;
4717 }
4718
4719 /* ensure CSR is implemented by checking predicate */
4720 if (!csr_ops[csrno].predicate) {
4721 return RISCV_EXCP_ILLEGAL_INST;
4722 }
4723
4724 /* privileged spec version check */
4725 if (env->priv_ver < csr_min_priv) {
4726 return RISCV_EXCP_ILLEGAL_INST;
4727 }
4728
4729 /* read / write check */
4730 if (write && read_only) {
4731 return RISCV_EXCP_ILLEGAL_INST;
4732 }
4733
4734 /*
4735 * The predicate() not only does existence check but also does some
4736 * access control check which triggers for example virtual instruction
4737 * exception in some cases. When writing read-only CSRs in those cases
4738 * illegal instruction exception should be triggered instead of virtual
4739 * instruction exception. Hence this comes after the read / write check.
4740 */
4741 RISCVException ret = csr_ops[csrno].predicate(env, csrno);
4742 if (ret != RISCV_EXCP_NONE) {
4743 return ret;
4744 }
4745
4746 #if !defined(CONFIG_USER_ONLY)
4747 int csr_priv, effective_priv = env->priv;
4748
4749 if (riscv_has_ext(env, RVH) && env->priv == PRV_S &&
4750 !env->virt_enabled) {
4751 /*
4752 * We are in HS mode. Add 1 to the effective privilege level to
4753 * allow us to access the Hypervisor CSRs.
4754 */
4755 effective_priv++;
4756 }
4757
4758 csr_priv = get_field(csrno, 0x300);
4759 if (!env->debugger && (effective_priv < csr_priv)) {
4760 if (csr_priv == (PRV_S + 1) && env->virt_enabled) {
4761 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
4762 }
4763 return RISCV_EXCP_ILLEGAL_INST;
4764 }
4765 #endif
4766 return RISCV_EXCP_NONE;
4767 }
4768
riscv_csrrw_do64(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)4769 static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
4770 target_ulong *ret_value,
4771 target_ulong new_value,
4772 target_ulong write_mask)
4773 {
4774 RISCVException ret;
4775 target_ulong old_value = 0;
4776
4777 /* execute combined read/write operation if it exists */
4778 if (csr_ops[csrno].op) {
4779 return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask);
4780 }
4781
4782 /*
4783 * ret_value == NULL means that rd=x0 and we're coming from helper_csrw()
4784 * and we can't throw side effects caused by CSR reads.
4785 */
4786 if (ret_value) {
4787 /* if no accessor exists then return failure */
4788 if (!csr_ops[csrno].read) {
4789 return RISCV_EXCP_ILLEGAL_INST;
4790 }
4791 /* read old value */
4792 ret = csr_ops[csrno].read(env, csrno, &old_value);
4793 if (ret != RISCV_EXCP_NONE) {
4794 return ret;
4795 }
4796 }
4797
4798 /* write value if writable and write mask set, otherwise drop writes */
4799 if (write_mask) {
4800 new_value = (old_value & ~write_mask) | (new_value & write_mask);
4801 if (csr_ops[csrno].write) {
4802 ret = csr_ops[csrno].write(env, csrno, new_value);
4803 if (ret != RISCV_EXCP_NONE) {
4804 return ret;
4805 }
4806 }
4807 }
4808
4809 /* return old value */
4810 if (ret_value) {
4811 *ret_value = old_value;
4812 }
4813
4814 return RISCV_EXCP_NONE;
4815 }
4816
riscv_csrr(CPURISCVState * env,int csrno,target_ulong * ret_value)4817 RISCVException riscv_csrr(CPURISCVState *env, int csrno,
4818 target_ulong *ret_value)
4819 {
4820 RISCVException ret = riscv_csrrw_check(env, csrno, false);
4821 if (ret != RISCV_EXCP_NONE) {
4822 return ret;
4823 }
4824
4825 return riscv_csrrw_do64(env, csrno, ret_value, 0, 0);
4826 }
4827
riscv_csrrw(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)4828 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
4829 target_ulong *ret_value,
4830 target_ulong new_value, target_ulong write_mask)
4831 {
4832 RISCVException ret = riscv_csrrw_check(env, csrno, true);
4833 if (ret != RISCV_EXCP_NONE) {
4834 return ret;
4835 }
4836
4837 return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask);
4838 }
4839
riscv_csrrw_do128(CPURISCVState * env,int csrno,Int128 * ret_value,Int128 new_value,Int128 write_mask)4840 static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
4841 Int128 *ret_value,
4842 Int128 new_value,
4843 Int128 write_mask)
4844 {
4845 RISCVException ret;
4846 Int128 old_value;
4847
4848 /* read old value */
4849 ret = csr_ops[csrno].read128(env, csrno, &old_value);
4850 if (ret != RISCV_EXCP_NONE) {
4851 return ret;
4852 }
4853
4854 /* write value if writable and write mask set, otherwise drop writes */
4855 if (int128_nz(write_mask)) {
4856 new_value = int128_or(int128_and(old_value, int128_not(write_mask)),
4857 int128_and(new_value, write_mask));
4858 if (csr_ops[csrno].write128) {
4859 ret = csr_ops[csrno].write128(env, csrno, new_value);
4860 if (ret != RISCV_EXCP_NONE) {
4861 return ret;
4862 }
4863 } else if (csr_ops[csrno].write) {
4864 /* avoids having to write wrappers for all registers */
4865 ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value));
4866 if (ret != RISCV_EXCP_NONE) {
4867 return ret;
4868 }
4869 }
4870 }
4871
4872 /* return old value */
4873 if (ret_value) {
4874 *ret_value = old_value;
4875 }
4876
4877 return RISCV_EXCP_NONE;
4878 }
4879
riscv_csrr_i128(CPURISCVState * env,int csrno,Int128 * ret_value)4880 RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
4881 Int128 *ret_value)
4882 {
4883 RISCVException ret;
4884
4885 ret = riscv_csrrw_check(env, csrno, false);
4886 if (ret != RISCV_EXCP_NONE) {
4887 return ret;
4888 }
4889
4890 if (csr_ops[csrno].read128) {
4891 return riscv_csrrw_do128(env, csrno, ret_value,
4892 int128_zero(), int128_zero());
4893 }
4894
4895 /*
4896 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
4897 * at all defined.
4898 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
4899 * significant), for those, this fallback is correctly handling the
4900 * accesses
4901 */
4902 target_ulong old_value;
4903 ret = riscv_csrrw_do64(env, csrno, &old_value,
4904 (target_ulong)0,
4905 (target_ulong)0);
4906 if (ret == RISCV_EXCP_NONE && ret_value) {
4907 *ret_value = int128_make64(old_value);
4908 }
4909 return ret;
4910 }
4911
riscv_csrrw_i128(CPURISCVState * env,int csrno,Int128 * ret_value,Int128 new_value,Int128 write_mask)4912 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
4913 Int128 *ret_value,
4914 Int128 new_value, Int128 write_mask)
4915 {
4916 RISCVException ret;
4917
4918 ret = riscv_csrrw_check(env, csrno, true);
4919 if (ret != RISCV_EXCP_NONE) {
4920 return ret;
4921 }
4922
4923 if (csr_ops[csrno].read128) {
4924 return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask);
4925 }
4926
4927 /*
4928 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
4929 * at all defined.
4930 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
4931 * significant), for those, this fallback is correctly handling the
4932 * accesses
4933 */
4934 target_ulong old_value;
4935 ret = riscv_csrrw_do64(env, csrno, &old_value,
4936 int128_getlo(new_value),
4937 int128_getlo(write_mask));
4938 if (ret == RISCV_EXCP_NONE && ret_value) {
4939 *ret_value = int128_make64(old_value);
4940 }
4941 return ret;
4942 }
4943
4944 /*
4945 * Debugger support. If not in user mode, set env->debugger before the
4946 * riscv_csrrw call and clear it after the call.
4947 */
riscv_csrrw_debug(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)4948 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
4949 target_ulong *ret_value,
4950 target_ulong new_value,
4951 target_ulong write_mask)
4952 {
4953 RISCVException ret;
4954 #if !defined(CONFIG_USER_ONLY)
4955 env->debugger = true;
4956 #endif
4957 if (!write_mask) {
4958 ret = riscv_csrr(env, csrno, ret_value);
4959 } else {
4960 ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask);
4961 }
4962 #if !defined(CONFIG_USER_ONLY)
4963 env->debugger = false;
4964 #endif
4965 return ret;
4966 }
4967
read_jvt(CPURISCVState * env,int csrno,target_ulong * val)4968 static RISCVException read_jvt(CPURISCVState *env, int csrno,
4969 target_ulong *val)
4970 {
4971 *val = env->jvt;
4972 return RISCV_EXCP_NONE;
4973 }
4974
write_jvt(CPURISCVState * env,int csrno,target_ulong val)4975 static RISCVException write_jvt(CPURISCVState *env, int csrno,
4976 target_ulong val)
4977 {
4978 env->jvt = val;
4979 return RISCV_EXCP_NONE;
4980 }
4981
4982 /*
4983 * Control and Status Register function table
4984 * riscv_csr_operations::predicate() must be provided for an implemented CSR
4985 */
4986 riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
4987 /* User Floating-Point CSRs */
4988 [CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags },
4989 [CSR_FRM] = { "frm", fs, read_frm, write_frm },
4990 [CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr },
4991 /* Vector CSRs */
4992 [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart },
4993 [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat },
4994 [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm },
4995 [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr },
4996 [CSR_VL] = { "vl", vs, read_vl },
4997 [CSR_VTYPE] = { "vtype", vs, read_vtype },
4998 [CSR_VLENB] = { "vlenb", vs, read_vlenb },
4999 /* User Timers and Counters */
5000 [CSR_CYCLE] = { "cycle", ctr, read_hpmcounter },
5001 [CSR_INSTRET] = { "instret", ctr, read_hpmcounter },
5002 [CSR_CYCLEH] = { "cycleh", ctr32, read_hpmcounterh },
5003 [CSR_INSTRETH] = { "instreth", ctr32, read_hpmcounterh },
5004
5005 /*
5006 * In privileged mode, the monitor will have to emulate TIME CSRs only if
5007 * rdtime callback is not provided by machine/platform emulation.
5008 */
5009 [CSR_TIME] = { "time", ctr, read_time },
5010 [CSR_TIMEH] = { "timeh", ctr32, read_timeh },
5011
5012 /* Crypto Extension */
5013 [CSR_SEED] = { "seed", seed, NULL, NULL, rmw_seed },
5014
5015 /* Zcmt Extension */
5016 [CSR_JVT] = {"jvt", zcmt, read_jvt, write_jvt},
5017
5018 /* zicfiss Extension, shadow stack register */
5019 [CSR_SSP] = { "ssp", cfi_ss, read_ssp, write_ssp },
5020
5021 #if !defined(CONFIG_USER_ONLY)
5022 /* Machine Timers and Counters */
5023 [CSR_MCYCLE] = { "mcycle", any, read_hpmcounter,
5024 write_mhpmcounter },
5025 [CSR_MINSTRET] = { "minstret", any, read_hpmcounter,
5026 write_mhpmcounter },
5027 [CSR_MCYCLEH] = { "mcycleh", any32, read_hpmcounterh,
5028 write_mhpmcounterh },
5029 [CSR_MINSTRETH] = { "minstreth", any32, read_hpmcounterh,
5030 write_mhpmcounterh },
5031
5032 /* Machine Information Registers */
5033 [CSR_MVENDORID] = { "mvendorid", any, read_mvendorid },
5034 [CSR_MARCHID] = { "marchid", any, read_marchid },
5035 [CSR_MIMPID] = { "mimpid", any, read_mimpid },
5036 [CSR_MHARTID] = { "mhartid", any, read_mhartid },
5037
5038 [CSR_MCONFIGPTR] = { "mconfigptr", any, read_zero,
5039 .min_priv_ver = PRIV_VERSION_1_12_0 },
5040 /* Machine Trap Setup */
5041 [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus,
5042 NULL, read_mstatus_i128 },
5043 [CSR_MISA] = { "misa", any, read_misa, write_misa,
5044 NULL, read_misa_i128 },
5045 [CSR_MIDELEG] = { "mideleg", any, NULL, NULL, rmw_mideleg },
5046 [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg },
5047 [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie },
5048 [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec },
5049 [CSR_MCOUNTEREN] = { "mcounteren", umode, read_mcounteren,
5050 write_mcounteren },
5051
5052 [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush,
5053 write_mstatush },
5054 [CSR_MEDELEGH] = { "medelegh", any32, read_zero, write_ignore,
5055 .min_priv_ver = PRIV_VERSION_1_13_0 },
5056 [CSR_HEDELEGH] = { "hedelegh", hmode32, read_hedelegh, write_hedelegh,
5057 .min_priv_ver = PRIV_VERSION_1_13_0 },
5058
5059 /* Machine Trap Handling */
5060 [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch,
5061 NULL, read_mscratch_i128, write_mscratch_i128 },
5062 [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc },
5063 [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause },
5064 [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval },
5065 [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
5066
5067 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */
5068 [CSR_MISELECT] = { "miselect", aia_any, NULL, NULL, rmw_xiselect },
5069 [CSR_MIREG] = { "mireg", aia_any, NULL, NULL, rmw_xireg },
5070
5071 /* Machine-Level Interrupts (AIA) */
5072 [CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei },
5073 [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi },
5074
5075 /* Virtual Interrupts for Supervisor Level (AIA) */
5076 [CSR_MVIEN] = { "mvien", aia_any, NULL, NULL, rmw_mvien },
5077 [CSR_MVIP] = { "mvip", aia_any, NULL, NULL, rmw_mvip },
5078
5079 /* Machine-Level High-Half CSRs (AIA) */
5080 [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh },
5081 [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh },
5082 [CSR_MVIENH] = { "mvienh", aia_any32, NULL, NULL, rmw_mvienh },
5083 [CSR_MVIPH] = { "mviph", aia_any32, NULL, NULL, rmw_mviph },
5084 [CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph },
5085
5086 /* Execution environment configuration */
5087 [CSR_MENVCFG] = { "menvcfg", umode, read_menvcfg, write_menvcfg,
5088 .min_priv_ver = PRIV_VERSION_1_12_0 },
5089 [CSR_MENVCFGH] = { "menvcfgh", umode32, read_menvcfgh, write_menvcfgh,
5090 .min_priv_ver = PRIV_VERSION_1_12_0 },
5091 [CSR_SENVCFG] = { "senvcfg", smode, read_senvcfg, write_senvcfg,
5092 .min_priv_ver = PRIV_VERSION_1_12_0 },
5093 [CSR_HENVCFG] = { "henvcfg", hmode, read_henvcfg, write_henvcfg,
5094 .min_priv_ver = PRIV_VERSION_1_12_0 },
5095 [CSR_HENVCFGH] = { "henvcfgh", hmode32, read_henvcfgh, write_henvcfgh,
5096 .min_priv_ver = PRIV_VERSION_1_12_0 },
5097
5098 /* Smstateen extension CSRs */
5099 [CSR_MSTATEEN0] = { "mstateen0", mstateen, read_mstateen, write_mstateen0,
5100 .min_priv_ver = PRIV_VERSION_1_12_0 },
5101 [CSR_MSTATEEN0H] = { "mstateen0h", mstateen, read_mstateenh,
5102 write_mstateen0h,
5103 .min_priv_ver = PRIV_VERSION_1_12_0 },
5104 [CSR_MSTATEEN1] = { "mstateen1", mstateen, read_mstateen,
5105 write_mstateen_1_3,
5106 .min_priv_ver = PRIV_VERSION_1_12_0 },
5107 [CSR_MSTATEEN1H] = { "mstateen1h", mstateen, read_mstateenh,
5108 write_mstateenh_1_3,
5109 .min_priv_ver = PRIV_VERSION_1_12_0 },
5110 [CSR_MSTATEEN2] = { "mstateen2", mstateen, read_mstateen,
5111 write_mstateen_1_3,
5112 .min_priv_ver = PRIV_VERSION_1_12_0 },
5113 [CSR_MSTATEEN2H] = { "mstateen2h", mstateen, read_mstateenh,
5114 write_mstateenh_1_3,
5115 .min_priv_ver = PRIV_VERSION_1_12_0 },
5116 [CSR_MSTATEEN3] = { "mstateen3", mstateen, read_mstateen,
5117 write_mstateen_1_3,
5118 .min_priv_ver = PRIV_VERSION_1_12_0 },
5119 [CSR_MSTATEEN3H] = { "mstateen3h", mstateen, read_mstateenh,
5120 write_mstateenh_1_3,
5121 .min_priv_ver = PRIV_VERSION_1_12_0 },
5122 [CSR_HSTATEEN0] = { "hstateen0", hstateen, read_hstateen, write_hstateen0,
5123 .min_priv_ver = PRIV_VERSION_1_12_0 },
5124 [CSR_HSTATEEN0H] = { "hstateen0h", hstateenh, read_hstateenh,
5125 write_hstateen0h,
5126 .min_priv_ver = PRIV_VERSION_1_12_0 },
5127 [CSR_HSTATEEN1] = { "hstateen1", hstateen, read_hstateen,
5128 write_hstateen_1_3,
5129 .min_priv_ver = PRIV_VERSION_1_12_0 },
5130 [CSR_HSTATEEN1H] = { "hstateen1h", hstateenh, read_hstateenh,
5131 write_hstateenh_1_3,
5132 .min_priv_ver = PRIV_VERSION_1_12_0 },
5133 [CSR_HSTATEEN2] = { "hstateen2", hstateen, read_hstateen,
5134 write_hstateen_1_3,
5135 .min_priv_ver = PRIV_VERSION_1_12_0 },
5136 [CSR_HSTATEEN2H] = { "hstateen2h", hstateenh, read_hstateenh,
5137 write_hstateenh_1_3,
5138 .min_priv_ver = PRIV_VERSION_1_12_0 },
5139 [CSR_HSTATEEN3] = { "hstateen3", hstateen, read_hstateen,
5140 write_hstateen_1_3,
5141 .min_priv_ver = PRIV_VERSION_1_12_0 },
5142 [CSR_HSTATEEN3H] = { "hstateen3h", hstateenh, read_hstateenh,
5143 write_hstateenh_1_3,
5144 .min_priv_ver = PRIV_VERSION_1_12_0 },
5145 [CSR_SSTATEEN0] = { "sstateen0", sstateen, read_sstateen, write_sstateen0,
5146 .min_priv_ver = PRIV_VERSION_1_12_0 },
5147 [CSR_SSTATEEN1] = { "sstateen1", sstateen, read_sstateen,
5148 write_sstateen_1_3,
5149 .min_priv_ver = PRIV_VERSION_1_12_0 },
5150 [CSR_SSTATEEN2] = { "sstateen2", sstateen, read_sstateen,
5151 write_sstateen_1_3,
5152 .min_priv_ver = PRIV_VERSION_1_12_0 },
5153 [CSR_SSTATEEN3] = { "sstateen3", sstateen, read_sstateen,
5154 write_sstateen_1_3,
5155 .min_priv_ver = PRIV_VERSION_1_12_0 },
5156
5157 /* Supervisor Trap Setup */
5158 [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus,
5159 NULL, read_sstatus_i128 },
5160 [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie },
5161 [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec },
5162 [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren,
5163 write_scounteren },
5164
5165 /* Supervisor Trap Handling */
5166 [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch,
5167 NULL, read_sscratch_i128, write_sscratch_i128 },
5168 [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc },
5169 [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause },
5170 [CSR_STVAL] = { "stval", smode, read_stval, write_stval },
5171 [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip },
5172 [CSR_STIMECMP] = { "stimecmp", sstc, read_stimecmp, write_stimecmp,
5173 .min_priv_ver = PRIV_VERSION_1_12_0 },
5174 [CSR_STIMECMPH] = { "stimecmph", sstc_32, read_stimecmph, write_stimecmph,
5175 .min_priv_ver = PRIV_VERSION_1_12_0 },
5176 [CSR_VSTIMECMP] = { "vstimecmp", sstc, read_vstimecmp,
5177 write_vstimecmp,
5178 .min_priv_ver = PRIV_VERSION_1_12_0 },
5179 [CSR_VSTIMECMPH] = { "vstimecmph", sstc_32, read_vstimecmph,
5180 write_vstimecmph,
5181 .min_priv_ver = PRIV_VERSION_1_12_0 },
5182
5183 /* Supervisor Protection and Translation */
5184 [CSR_SATP] = { "satp", satp, read_satp, write_satp },
5185
5186 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
5187 [CSR_SISELECT] = { "siselect", aia_smode, NULL, NULL, rmw_xiselect },
5188 [CSR_SIREG] = { "sireg", aia_smode, NULL, NULL, rmw_xireg },
5189
5190 /* Supervisor-Level Interrupts (AIA) */
5191 [CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei },
5192 [CSR_STOPI] = { "stopi", aia_smode, read_stopi },
5193
5194 /* Supervisor-Level High-Half CSRs (AIA) */
5195 [CSR_SIEH] = { "sieh", aia_smode32, NULL, NULL, rmw_sieh },
5196 [CSR_SIPH] = { "siph", aia_smode32, NULL, NULL, rmw_siph },
5197
5198 [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus,
5199 .min_priv_ver = PRIV_VERSION_1_12_0 },
5200 [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg,
5201 .min_priv_ver = PRIV_VERSION_1_12_0 },
5202 [CSR_HIDELEG] = { "hideleg", hmode, NULL, NULL, rmw_hideleg,
5203 .min_priv_ver = PRIV_VERSION_1_12_0 },
5204 [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip,
5205 .min_priv_ver = PRIV_VERSION_1_12_0 },
5206 [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip,
5207 .min_priv_ver = PRIV_VERSION_1_12_0 },
5208 [CSR_HIE] = { "hie", hmode, NULL, NULL, rmw_hie,
5209 .min_priv_ver = PRIV_VERSION_1_12_0 },
5210 [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren,
5211 write_hcounteren,
5212 .min_priv_ver = PRIV_VERSION_1_12_0 },
5213 [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie,
5214 .min_priv_ver = PRIV_VERSION_1_12_0 },
5215 [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval,
5216 .min_priv_ver = PRIV_VERSION_1_12_0 },
5217 [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst,
5218 .min_priv_ver = PRIV_VERSION_1_12_0 },
5219 [CSR_HGEIP] = { "hgeip", hmode, read_hgeip,
5220 .min_priv_ver = PRIV_VERSION_1_12_0 },
5221 [CSR_HGATP] = { "hgatp", hgatp, read_hgatp, write_hgatp,
5222 .min_priv_ver = PRIV_VERSION_1_12_0 },
5223 [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta,
5224 write_htimedelta,
5225 .min_priv_ver = PRIV_VERSION_1_12_0 },
5226 [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah,
5227 write_htimedeltah,
5228 .min_priv_ver = PRIV_VERSION_1_12_0 },
5229
5230 [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus,
5231 write_vsstatus,
5232 .min_priv_ver = PRIV_VERSION_1_12_0 },
5233 [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip,
5234 .min_priv_ver = PRIV_VERSION_1_12_0 },
5235 [CSR_VSIE] = { "vsie", hmode, NULL, NULL, rmw_vsie ,
5236 .min_priv_ver = PRIV_VERSION_1_12_0 },
5237 [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec,
5238 .min_priv_ver = PRIV_VERSION_1_12_0 },
5239 [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch,
5240 write_vsscratch,
5241 .min_priv_ver = PRIV_VERSION_1_12_0 },
5242 [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc,
5243 .min_priv_ver = PRIV_VERSION_1_12_0 },
5244 [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause,
5245 .min_priv_ver = PRIV_VERSION_1_12_0 },
5246 [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval,
5247 .min_priv_ver = PRIV_VERSION_1_12_0 },
5248 [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp,
5249 .min_priv_ver = PRIV_VERSION_1_12_0 },
5250
5251 [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2,
5252 .min_priv_ver = PRIV_VERSION_1_12_0 },
5253 [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst,
5254 .min_priv_ver = PRIV_VERSION_1_12_0 },
5255
5256 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
5257 [CSR_HVIEN] = { "hvien", aia_hmode, NULL, NULL, rmw_hvien },
5258 [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl,
5259 write_hvictl },
5260 [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1,
5261 write_hviprio1 },
5262 [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2,
5263 write_hviprio2 },
5264 /*
5265 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
5266 */
5267 [CSR_VSISELECT] = { "vsiselect", aia_hmode, NULL, NULL,
5268 rmw_xiselect },
5269 [CSR_VSIREG] = { "vsireg", aia_hmode, NULL, NULL, rmw_xireg },
5270
5271 /* VS-Level Interrupts (H-extension with AIA) */
5272 [CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei },
5273 [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi },
5274
5275 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
5276 [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL,
5277 rmw_hidelegh },
5278 [CSR_HVIENH] = { "hvienh", aia_hmode32, NULL, NULL, rmw_hvienh },
5279 [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph },
5280 [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h,
5281 write_hviprio1h },
5282 [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h,
5283 write_hviprio2h },
5284 [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh },
5285 [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph },
5286
5287 /* Physical Memory Protection */
5288 [CSR_MSECCFG] = { "mseccfg", have_mseccfg, read_mseccfg, write_mseccfg,
5289 .min_priv_ver = PRIV_VERSION_1_11_0 },
5290 [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg },
5291 [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg },
5292 [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg },
5293 [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg },
5294 [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr },
5295 [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr },
5296 [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr },
5297 [CSR_PMPADDR3] = { "pmpaddr3", pmp, read_pmpaddr, write_pmpaddr },
5298 [CSR_PMPADDR4] = { "pmpaddr4", pmp, read_pmpaddr, write_pmpaddr },
5299 [CSR_PMPADDR5] = { "pmpaddr5", pmp, read_pmpaddr, write_pmpaddr },
5300 [CSR_PMPADDR6] = { "pmpaddr6", pmp, read_pmpaddr, write_pmpaddr },
5301 [CSR_PMPADDR7] = { "pmpaddr7", pmp, read_pmpaddr, write_pmpaddr },
5302 [CSR_PMPADDR8] = { "pmpaddr8", pmp, read_pmpaddr, write_pmpaddr },
5303 [CSR_PMPADDR9] = { "pmpaddr9", pmp, read_pmpaddr, write_pmpaddr },
5304 [CSR_PMPADDR10] = { "pmpaddr10", pmp, read_pmpaddr, write_pmpaddr },
5305 [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr },
5306 [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr },
5307 [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr },
5308 [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
5309 [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
5310
5311 /* Debug CSRs */
5312 [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect },
5313 [CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata },
5314 [CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata },
5315 [CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata },
5316 [CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore },
5317 [CSR_MCONTEXT] = { "mcontext", debug, read_mcontext, write_mcontext },
5318
5319 /* User Pointer Masking */
5320 [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte },
5321 [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask,
5322 write_upmmask },
5323 [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase,
5324 write_upmbase },
5325 /* Machine Pointer Masking */
5326 [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte },
5327 [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask,
5328 write_mpmmask },
5329 [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase,
5330 write_mpmbase },
5331 /* Supervisor Pointer Masking */
5332 [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte },
5333 [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask,
5334 write_spmmask },
5335 [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase,
5336 write_spmbase },
5337
5338 /* Performance Counters */
5339 [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter },
5340 [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter },
5341 [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_hpmcounter },
5342 [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_hpmcounter },
5343 [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_hpmcounter },
5344 [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_hpmcounter },
5345 [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_hpmcounter },
5346 [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_hpmcounter },
5347 [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_hpmcounter },
5348 [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_hpmcounter },
5349 [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_hpmcounter },
5350 [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_hpmcounter },
5351 [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_hpmcounter },
5352 [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_hpmcounter },
5353 [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_hpmcounter },
5354 [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_hpmcounter },
5355 [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_hpmcounter },
5356 [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_hpmcounter },
5357 [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_hpmcounter },
5358 [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_hpmcounter },
5359 [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_hpmcounter },
5360 [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_hpmcounter },
5361 [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_hpmcounter },
5362 [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_hpmcounter },
5363 [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_hpmcounter },
5364 [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_hpmcounter },
5365 [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_hpmcounter },
5366 [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_hpmcounter },
5367 [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_hpmcounter },
5368
5369 [CSR_MHPMCOUNTER3] = { "mhpmcounter3", mctr, read_hpmcounter,
5370 write_mhpmcounter },
5371 [CSR_MHPMCOUNTER4] = { "mhpmcounter4", mctr, read_hpmcounter,
5372 write_mhpmcounter },
5373 [CSR_MHPMCOUNTER5] = { "mhpmcounter5", mctr, read_hpmcounter,
5374 write_mhpmcounter },
5375 [CSR_MHPMCOUNTER6] = { "mhpmcounter6", mctr, read_hpmcounter,
5376 write_mhpmcounter },
5377 [CSR_MHPMCOUNTER7] = { "mhpmcounter7", mctr, read_hpmcounter,
5378 write_mhpmcounter },
5379 [CSR_MHPMCOUNTER8] = { "mhpmcounter8", mctr, read_hpmcounter,
5380 write_mhpmcounter },
5381 [CSR_MHPMCOUNTER9] = { "mhpmcounter9", mctr, read_hpmcounter,
5382 write_mhpmcounter },
5383 [CSR_MHPMCOUNTER10] = { "mhpmcounter10", mctr, read_hpmcounter,
5384 write_mhpmcounter },
5385 [CSR_MHPMCOUNTER11] = { "mhpmcounter11", mctr, read_hpmcounter,
5386 write_mhpmcounter },
5387 [CSR_MHPMCOUNTER12] = { "mhpmcounter12", mctr, read_hpmcounter,
5388 write_mhpmcounter },
5389 [CSR_MHPMCOUNTER13] = { "mhpmcounter13", mctr, read_hpmcounter,
5390 write_mhpmcounter },
5391 [CSR_MHPMCOUNTER14] = { "mhpmcounter14", mctr, read_hpmcounter,
5392 write_mhpmcounter },
5393 [CSR_MHPMCOUNTER15] = { "mhpmcounter15", mctr, read_hpmcounter,
5394 write_mhpmcounter },
5395 [CSR_MHPMCOUNTER16] = { "mhpmcounter16", mctr, read_hpmcounter,
5396 write_mhpmcounter },
5397 [CSR_MHPMCOUNTER17] = { "mhpmcounter17", mctr, read_hpmcounter,
5398 write_mhpmcounter },
5399 [CSR_MHPMCOUNTER18] = { "mhpmcounter18", mctr, read_hpmcounter,
5400 write_mhpmcounter },
5401 [CSR_MHPMCOUNTER19] = { "mhpmcounter19", mctr, read_hpmcounter,
5402 write_mhpmcounter },
5403 [CSR_MHPMCOUNTER20] = { "mhpmcounter20", mctr, read_hpmcounter,
5404 write_mhpmcounter },
5405 [CSR_MHPMCOUNTER21] = { "mhpmcounter21", mctr, read_hpmcounter,
5406 write_mhpmcounter },
5407 [CSR_MHPMCOUNTER22] = { "mhpmcounter22", mctr, read_hpmcounter,
5408 write_mhpmcounter },
5409 [CSR_MHPMCOUNTER23] = { "mhpmcounter23", mctr, read_hpmcounter,
5410 write_mhpmcounter },
5411 [CSR_MHPMCOUNTER24] = { "mhpmcounter24", mctr, read_hpmcounter,
5412 write_mhpmcounter },
5413 [CSR_MHPMCOUNTER25] = { "mhpmcounter25", mctr, read_hpmcounter,
5414 write_mhpmcounter },
5415 [CSR_MHPMCOUNTER26] = { "mhpmcounter26", mctr, read_hpmcounter,
5416 write_mhpmcounter },
5417 [CSR_MHPMCOUNTER27] = { "mhpmcounter27", mctr, read_hpmcounter,
5418 write_mhpmcounter },
5419 [CSR_MHPMCOUNTER28] = { "mhpmcounter28", mctr, read_hpmcounter,
5420 write_mhpmcounter },
5421 [CSR_MHPMCOUNTER29] = { "mhpmcounter29", mctr, read_hpmcounter,
5422 write_mhpmcounter },
5423 [CSR_MHPMCOUNTER30] = { "mhpmcounter30", mctr, read_hpmcounter,
5424 write_mhpmcounter },
5425 [CSR_MHPMCOUNTER31] = { "mhpmcounter31", mctr, read_hpmcounter,
5426 write_mhpmcounter },
5427
5428 [CSR_MCOUNTINHIBIT] = { "mcountinhibit", any, read_mcountinhibit,
5429 write_mcountinhibit,
5430 .min_priv_ver = PRIV_VERSION_1_11_0 },
5431
5432 [CSR_MCYCLECFG] = { "mcyclecfg", smcntrpmf, read_mcyclecfg,
5433 write_mcyclecfg,
5434 .min_priv_ver = PRIV_VERSION_1_12_0 },
5435 [CSR_MINSTRETCFG] = { "minstretcfg", smcntrpmf, read_minstretcfg,
5436 write_minstretcfg,
5437 .min_priv_ver = PRIV_VERSION_1_12_0 },
5438
5439 [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent,
5440 write_mhpmevent },
5441 [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent,
5442 write_mhpmevent },
5443 [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_mhpmevent,
5444 write_mhpmevent },
5445 [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_mhpmevent,
5446 write_mhpmevent },
5447 [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_mhpmevent,
5448 write_mhpmevent },
5449 [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_mhpmevent,
5450 write_mhpmevent },
5451 [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_mhpmevent,
5452 write_mhpmevent },
5453 [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_mhpmevent,
5454 write_mhpmevent },
5455 [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_mhpmevent,
5456 write_mhpmevent },
5457 [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_mhpmevent,
5458 write_mhpmevent },
5459 [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_mhpmevent,
5460 write_mhpmevent },
5461 [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_mhpmevent,
5462 write_mhpmevent },
5463 [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_mhpmevent,
5464 write_mhpmevent },
5465 [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_mhpmevent,
5466 write_mhpmevent },
5467 [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_mhpmevent,
5468 write_mhpmevent },
5469 [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_mhpmevent,
5470 write_mhpmevent },
5471 [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_mhpmevent,
5472 write_mhpmevent },
5473 [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_mhpmevent,
5474 write_mhpmevent },
5475 [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_mhpmevent,
5476 write_mhpmevent },
5477 [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_mhpmevent,
5478 write_mhpmevent },
5479 [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_mhpmevent,
5480 write_mhpmevent },
5481 [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_mhpmevent,
5482 write_mhpmevent },
5483 [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_mhpmevent,
5484 write_mhpmevent },
5485 [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_mhpmevent,
5486 write_mhpmevent },
5487 [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_mhpmevent,
5488 write_mhpmevent },
5489 [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_mhpmevent,
5490 write_mhpmevent },
5491 [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_mhpmevent,
5492 write_mhpmevent },
5493 [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_mhpmevent,
5494 write_mhpmevent },
5495 [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent,
5496 write_mhpmevent },
5497
5498 [CSR_MCYCLECFGH] = { "mcyclecfgh", smcntrpmf_32, read_mcyclecfgh,
5499 write_mcyclecfgh,
5500 .min_priv_ver = PRIV_VERSION_1_12_0 },
5501 [CSR_MINSTRETCFGH] = { "minstretcfgh", smcntrpmf_32, read_minstretcfgh,
5502 write_minstretcfgh,
5503 .min_priv_ver = PRIV_VERSION_1_12_0 },
5504
5505 [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf_32, read_mhpmeventh,
5506 write_mhpmeventh,
5507 .min_priv_ver = PRIV_VERSION_1_12_0 },
5508 [CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf_32, read_mhpmeventh,
5509 write_mhpmeventh,
5510 .min_priv_ver = PRIV_VERSION_1_12_0 },
5511 [CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf_32, read_mhpmeventh,
5512 write_mhpmeventh,
5513 .min_priv_ver = PRIV_VERSION_1_12_0 },
5514 [CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf_32, read_mhpmeventh,
5515 write_mhpmeventh,
5516 .min_priv_ver = PRIV_VERSION_1_12_0 },
5517 [CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf_32, read_mhpmeventh,
5518 write_mhpmeventh,
5519 .min_priv_ver = PRIV_VERSION_1_12_0 },
5520 [CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf_32, read_mhpmeventh,
5521 write_mhpmeventh,
5522 .min_priv_ver = PRIV_VERSION_1_12_0 },
5523 [CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf_32, read_mhpmeventh,
5524 write_mhpmeventh,
5525 .min_priv_ver = PRIV_VERSION_1_12_0 },
5526 [CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf_32, read_mhpmeventh,
5527 write_mhpmeventh,
5528 .min_priv_ver = PRIV_VERSION_1_12_0 },
5529 [CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf_32, read_mhpmeventh,
5530 write_mhpmeventh,
5531 .min_priv_ver = PRIV_VERSION_1_12_0 },
5532 [CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf_32, read_mhpmeventh,
5533 write_mhpmeventh,
5534 .min_priv_ver = PRIV_VERSION_1_12_0 },
5535 [CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf_32, read_mhpmeventh,
5536 write_mhpmeventh,
5537 .min_priv_ver = PRIV_VERSION_1_12_0 },
5538 [CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf_32, read_mhpmeventh,
5539 write_mhpmeventh,
5540 .min_priv_ver = PRIV_VERSION_1_12_0 },
5541 [CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf_32, read_mhpmeventh,
5542 write_mhpmeventh,
5543 .min_priv_ver = PRIV_VERSION_1_12_0 },
5544 [CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf_32, read_mhpmeventh,
5545 write_mhpmeventh,
5546 .min_priv_ver = PRIV_VERSION_1_12_0 },
5547 [CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf_32, read_mhpmeventh,
5548 write_mhpmeventh,
5549 .min_priv_ver = PRIV_VERSION_1_12_0 },
5550 [CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf_32, read_mhpmeventh,
5551 write_mhpmeventh,
5552 .min_priv_ver = PRIV_VERSION_1_12_0 },
5553 [CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf_32, read_mhpmeventh,
5554 write_mhpmeventh,
5555 .min_priv_ver = PRIV_VERSION_1_12_0 },
5556 [CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf_32, read_mhpmeventh,
5557 write_mhpmeventh,
5558 .min_priv_ver = PRIV_VERSION_1_12_0 },
5559 [CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf_32, read_mhpmeventh,
5560 write_mhpmeventh,
5561 .min_priv_ver = PRIV_VERSION_1_12_0 },
5562 [CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf_32, read_mhpmeventh,
5563 write_mhpmeventh,
5564 .min_priv_ver = PRIV_VERSION_1_12_0 },
5565 [CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf_32, read_mhpmeventh,
5566 write_mhpmeventh,
5567 .min_priv_ver = PRIV_VERSION_1_12_0 },
5568 [CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf_32, read_mhpmeventh,
5569 write_mhpmeventh,
5570 .min_priv_ver = PRIV_VERSION_1_12_0 },
5571 [CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf_32, read_mhpmeventh,
5572 write_mhpmeventh,
5573 .min_priv_ver = PRIV_VERSION_1_12_0 },
5574 [CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf_32, read_mhpmeventh,
5575 write_mhpmeventh,
5576 .min_priv_ver = PRIV_VERSION_1_12_0 },
5577 [CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf_32, read_mhpmeventh,
5578 write_mhpmeventh,
5579 .min_priv_ver = PRIV_VERSION_1_12_0 },
5580 [CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf_32, read_mhpmeventh,
5581 write_mhpmeventh,
5582 .min_priv_ver = PRIV_VERSION_1_12_0 },
5583 [CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf_32, read_mhpmeventh,
5584 write_mhpmeventh,
5585 .min_priv_ver = PRIV_VERSION_1_12_0 },
5586 [CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf_32, read_mhpmeventh,
5587 write_mhpmeventh,
5588 .min_priv_ver = PRIV_VERSION_1_12_0 },
5589 [CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf_32, read_mhpmeventh,
5590 write_mhpmeventh,
5591 .min_priv_ver = PRIV_VERSION_1_12_0 },
5592
5593 [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_hpmcounterh },
5594 [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_hpmcounterh },
5595 [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_hpmcounterh },
5596 [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_hpmcounterh },
5597 [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_hpmcounterh },
5598 [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_hpmcounterh },
5599 [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_hpmcounterh },
5600 [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_hpmcounterh },
5601 [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_hpmcounterh },
5602 [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_hpmcounterh },
5603 [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_hpmcounterh },
5604 [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_hpmcounterh },
5605 [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_hpmcounterh },
5606 [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_hpmcounterh },
5607 [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_hpmcounterh },
5608 [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_hpmcounterh },
5609 [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_hpmcounterh },
5610 [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_hpmcounterh },
5611 [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_hpmcounterh },
5612 [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_hpmcounterh },
5613 [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_hpmcounterh },
5614 [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_hpmcounterh },
5615 [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_hpmcounterh },
5616 [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_hpmcounterh },
5617 [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_hpmcounterh },
5618 [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_hpmcounterh },
5619 [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_hpmcounterh },
5620 [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_hpmcounterh },
5621 [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_hpmcounterh },
5622
5623 [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", mctr32, read_hpmcounterh,
5624 write_mhpmcounterh },
5625 [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", mctr32, read_hpmcounterh,
5626 write_mhpmcounterh },
5627 [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", mctr32, read_hpmcounterh,
5628 write_mhpmcounterh },
5629 [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", mctr32, read_hpmcounterh,
5630 write_mhpmcounterh },
5631 [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", mctr32, read_hpmcounterh,
5632 write_mhpmcounterh },
5633 [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", mctr32, read_hpmcounterh,
5634 write_mhpmcounterh },
5635 [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", mctr32, read_hpmcounterh,
5636 write_mhpmcounterh },
5637 [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", mctr32, read_hpmcounterh,
5638 write_mhpmcounterh },
5639 [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", mctr32, read_hpmcounterh,
5640 write_mhpmcounterh },
5641 [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", mctr32, read_hpmcounterh,
5642 write_mhpmcounterh },
5643 [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", mctr32, read_hpmcounterh,
5644 write_mhpmcounterh },
5645 [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", mctr32, read_hpmcounterh,
5646 write_mhpmcounterh },
5647 [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", mctr32, read_hpmcounterh,
5648 write_mhpmcounterh },
5649 [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", mctr32, read_hpmcounterh,
5650 write_mhpmcounterh },
5651 [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", mctr32, read_hpmcounterh,
5652 write_mhpmcounterh },
5653 [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", mctr32, read_hpmcounterh,
5654 write_mhpmcounterh },
5655 [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", mctr32, read_hpmcounterh,
5656 write_mhpmcounterh },
5657 [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", mctr32, read_hpmcounterh,
5658 write_mhpmcounterh },
5659 [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", mctr32, read_hpmcounterh,
5660 write_mhpmcounterh },
5661 [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", mctr32, read_hpmcounterh,
5662 write_mhpmcounterh },
5663 [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", mctr32, read_hpmcounterh,
5664 write_mhpmcounterh },
5665 [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", mctr32, read_hpmcounterh,
5666 write_mhpmcounterh },
5667 [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", mctr32, read_hpmcounterh,
5668 write_mhpmcounterh },
5669 [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", mctr32, read_hpmcounterh,
5670 write_mhpmcounterh },
5671 [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", mctr32, read_hpmcounterh,
5672 write_mhpmcounterh },
5673 [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", mctr32, read_hpmcounterh,
5674 write_mhpmcounterh },
5675 [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", mctr32, read_hpmcounterh,
5676 write_mhpmcounterh },
5677 [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", mctr32, read_hpmcounterh,
5678 write_mhpmcounterh },
5679 [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", mctr32, read_hpmcounterh,
5680 write_mhpmcounterh },
5681 [CSR_SCOUNTOVF] = { "scountovf", sscofpmf, read_scountovf,
5682 .min_priv_ver = PRIV_VERSION_1_12_0 },
5683
5684 #endif /* !CONFIG_USER_ONLY */
5685 };
5686