1 /*
2 * RISC-V Control and Status Registers.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/timer.h"
23 #include "cpu.h"
24 #include "tcg/tcg-cpu.h"
25 #include "pmu.h"
26 #include "time_helper.h"
27 #include "exec/exec-all.h"
28 #include "exec/tb-flush.h"
29 #include "sysemu/cpu-timers.h"
30 #include "qemu/guest-random.h"
31 #include "qapi/error.h"
32
33
34 /* CSR function table public API */
riscv_get_csr_ops(int csrno,riscv_csr_operations * ops)35 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
36 {
37 *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)];
38 }
39
riscv_set_csr_ops(int csrno,riscv_csr_operations * ops)40 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
41 {
42 csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops;
43 }
44
45 /* Predicates */
46 #if !defined(CONFIG_USER_ONLY)
smstateen_acc_ok(CPURISCVState * env,int index,uint64_t bit)47 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit)
48 {
49 bool virt = env->virt_enabled;
50
51 if (env->priv == PRV_M || !riscv_cpu_cfg(env)->ext_smstateen) {
52 return RISCV_EXCP_NONE;
53 }
54
55 if (!(env->mstateen[index] & bit)) {
56 return RISCV_EXCP_ILLEGAL_INST;
57 }
58
59 if (virt) {
60 if (!(env->hstateen[index] & bit)) {
61 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
62 }
63
64 if (env->priv == PRV_U && !(env->sstateen[index] & bit)) {
65 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
66 }
67 }
68
69 if (env->priv == PRV_U && riscv_has_ext(env, RVS)) {
70 if (!(env->sstateen[index] & bit)) {
71 return RISCV_EXCP_ILLEGAL_INST;
72 }
73 }
74
75 return RISCV_EXCP_NONE;
76 }
77 #endif
78
fs(CPURISCVState * env,int csrno)79 static RISCVException fs(CPURISCVState *env, int csrno)
80 {
81 #if !defined(CONFIG_USER_ONLY)
82 if (!env->debugger && !riscv_cpu_fp_enabled(env) &&
83 !riscv_cpu_cfg(env)->ext_zfinx) {
84 return RISCV_EXCP_ILLEGAL_INST;
85 }
86
87 if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
88 return smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR);
89 }
90 #endif
91 return RISCV_EXCP_NONE;
92 }
93
vs(CPURISCVState * env,int csrno)94 static RISCVException vs(CPURISCVState *env, int csrno)
95 {
96 if (riscv_cpu_cfg(env)->ext_zve32f) {
97 #if !defined(CONFIG_USER_ONLY)
98 if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
99 return RISCV_EXCP_ILLEGAL_INST;
100 }
101 #endif
102 return RISCV_EXCP_NONE;
103 }
104 return RISCV_EXCP_ILLEGAL_INST;
105 }
106
ctr(CPURISCVState * env,int csrno)107 static RISCVException ctr(CPURISCVState *env, int csrno)
108 {
109 #if !defined(CONFIG_USER_ONLY)
110 RISCVCPU *cpu = env_archcpu(env);
111 int ctr_index;
112 target_ulong ctr_mask;
113 int base_csrno = CSR_CYCLE;
114 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
115
116 if (rv32 && csrno >= CSR_CYCLEH) {
117 /* Offset for RV32 hpmcounternh counters */
118 base_csrno += 0x80;
119 }
120 ctr_index = csrno - base_csrno;
121 ctr_mask = BIT(ctr_index);
122
123 if ((csrno >= CSR_CYCLE && csrno <= CSR_INSTRET) ||
124 (csrno >= CSR_CYCLEH && csrno <= CSR_INSTRETH)) {
125 if (!riscv_cpu_cfg(env)->ext_zicntr) {
126 return RISCV_EXCP_ILLEGAL_INST;
127 }
128
129 goto skip_ext_pmu_check;
130 }
131
132 if (!(cpu->pmu_avail_ctrs & ctr_mask)) {
133 /* No counter is enabled in PMU or the counter is out of range */
134 return RISCV_EXCP_ILLEGAL_INST;
135 }
136
137 skip_ext_pmu_check:
138
139 if (env->debugger) {
140 return RISCV_EXCP_NONE;
141 }
142
143 if (env->priv < PRV_M && !get_field(env->mcounteren, ctr_mask)) {
144 return RISCV_EXCP_ILLEGAL_INST;
145 }
146
147 if (env->virt_enabled) {
148 if (!get_field(env->hcounteren, ctr_mask) ||
149 (env->priv == PRV_U && !get_field(env->scounteren, ctr_mask))) {
150 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
151 }
152 }
153
154 if (riscv_has_ext(env, RVS) && env->priv == PRV_U &&
155 !get_field(env->scounteren, ctr_mask)) {
156 return RISCV_EXCP_ILLEGAL_INST;
157 }
158
159 #endif
160 return RISCV_EXCP_NONE;
161 }
162
ctr32(CPURISCVState * env,int csrno)163 static RISCVException ctr32(CPURISCVState *env, int csrno)
164 {
165 if (riscv_cpu_mxl(env) != MXL_RV32) {
166 return RISCV_EXCP_ILLEGAL_INST;
167 }
168
169 return ctr(env, csrno);
170 }
171
zcmt(CPURISCVState * env,int csrno)172 static RISCVException zcmt(CPURISCVState *env, int csrno)
173 {
174 if (!riscv_cpu_cfg(env)->ext_zcmt) {
175 return RISCV_EXCP_ILLEGAL_INST;
176 }
177
178 #if !defined(CONFIG_USER_ONLY)
179 RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_JVT);
180 if (ret != RISCV_EXCP_NONE) {
181 return ret;
182 }
183 #endif
184
185 return RISCV_EXCP_NONE;
186 }
187
188 #if !defined(CONFIG_USER_ONLY)
mctr(CPURISCVState * env,int csrno)189 static RISCVException mctr(CPURISCVState *env, int csrno)
190 {
191 RISCVCPU *cpu = env_archcpu(env);
192 uint32_t pmu_avail_ctrs = cpu->pmu_avail_ctrs;
193 int ctr_index;
194 int base_csrno = CSR_MHPMCOUNTER3;
195
196 if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) {
197 /* Offset for RV32 mhpmcounternh counters */
198 base_csrno += 0x80;
199 }
200 ctr_index = csrno - base_csrno;
201 if ((BIT(ctr_index) & pmu_avail_ctrs >> 3) == 0) {
202 /* The PMU is not enabled or counter is out of range */
203 return RISCV_EXCP_ILLEGAL_INST;
204 }
205
206 return RISCV_EXCP_NONE;
207 }
208
mctr32(CPURISCVState * env,int csrno)209 static RISCVException mctr32(CPURISCVState *env, int csrno)
210 {
211 if (riscv_cpu_mxl(env) != MXL_RV32) {
212 return RISCV_EXCP_ILLEGAL_INST;
213 }
214
215 return mctr(env, csrno);
216 }
217
sscofpmf(CPURISCVState * env,int csrno)218 static RISCVException sscofpmf(CPURISCVState *env, int csrno)
219 {
220 if (!riscv_cpu_cfg(env)->ext_sscofpmf) {
221 return RISCV_EXCP_ILLEGAL_INST;
222 }
223
224 return RISCV_EXCP_NONE;
225 }
226
any(CPURISCVState * env,int csrno)227 static RISCVException any(CPURISCVState *env, int csrno)
228 {
229 return RISCV_EXCP_NONE;
230 }
231
any32(CPURISCVState * env,int csrno)232 static RISCVException any32(CPURISCVState *env, int csrno)
233 {
234 if (riscv_cpu_mxl(env) != MXL_RV32) {
235 return RISCV_EXCP_ILLEGAL_INST;
236 }
237
238 return any(env, csrno);
239
240 }
241
aia_any(CPURISCVState * env,int csrno)242 static int aia_any(CPURISCVState *env, int csrno)
243 {
244 if (!riscv_cpu_cfg(env)->ext_smaia) {
245 return RISCV_EXCP_ILLEGAL_INST;
246 }
247
248 return any(env, csrno);
249 }
250
aia_any32(CPURISCVState * env,int csrno)251 static int aia_any32(CPURISCVState *env, int csrno)
252 {
253 if (!riscv_cpu_cfg(env)->ext_smaia) {
254 return RISCV_EXCP_ILLEGAL_INST;
255 }
256
257 return any32(env, csrno);
258 }
259
smode(CPURISCVState * env,int csrno)260 static RISCVException smode(CPURISCVState *env, int csrno)
261 {
262 if (riscv_has_ext(env, RVS)) {
263 return RISCV_EXCP_NONE;
264 }
265
266 return RISCV_EXCP_ILLEGAL_INST;
267 }
268
smode32(CPURISCVState * env,int csrno)269 static int smode32(CPURISCVState *env, int csrno)
270 {
271 if (riscv_cpu_mxl(env) != MXL_RV32) {
272 return RISCV_EXCP_ILLEGAL_INST;
273 }
274
275 return smode(env, csrno);
276 }
277
aia_smode(CPURISCVState * env,int csrno)278 static int aia_smode(CPURISCVState *env, int csrno)
279 {
280 if (!riscv_cpu_cfg(env)->ext_ssaia) {
281 return RISCV_EXCP_ILLEGAL_INST;
282 }
283
284 return smode(env, csrno);
285 }
286
aia_smode32(CPURISCVState * env,int csrno)287 static int aia_smode32(CPURISCVState *env, int csrno)
288 {
289 if (!riscv_cpu_cfg(env)->ext_ssaia) {
290 return RISCV_EXCP_ILLEGAL_INST;
291 }
292
293 return smode32(env, csrno);
294 }
295
hmode(CPURISCVState * env,int csrno)296 static RISCVException hmode(CPURISCVState *env, int csrno)
297 {
298 if (riscv_has_ext(env, RVH)) {
299 return RISCV_EXCP_NONE;
300 }
301
302 return RISCV_EXCP_ILLEGAL_INST;
303 }
304
hmode32(CPURISCVState * env,int csrno)305 static RISCVException hmode32(CPURISCVState *env, int csrno)
306 {
307 if (riscv_cpu_mxl(env) != MXL_RV32) {
308 return RISCV_EXCP_ILLEGAL_INST;
309 }
310
311 return hmode(env, csrno);
312
313 }
314
umode(CPURISCVState * env,int csrno)315 static RISCVException umode(CPURISCVState *env, int csrno)
316 {
317 if (riscv_has_ext(env, RVU)) {
318 return RISCV_EXCP_NONE;
319 }
320
321 return RISCV_EXCP_ILLEGAL_INST;
322 }
323
umode32(CPURISCVState * env,int csrno)324 static RISCVException umode32(CPURISCVState *env, int csrno)
325 {
326 if (riscv_cpu_mxl(env) != MXL_RV32) {
327 return RISCV_EXCP_ILLEGAL_INST;
328 }
329
330 return umode(env, csrno);
331 }
332
mstateen(CPURISCVState * env,int csrno)333 static RISCVException mstateen(CPURISCVState *env, int csrno)
334 {
335 if (!riscv_cpu_cfg(env)->ext_smstateen) {
336 return RISCV_EXCP_ILLEGAL_INST;
337 }
338
339 return any(env, csrno);
340 }
341
hstateen_pred(CPURISCVState * env,int csrno,int base)342 static RISCVException hstateen_pred(CPURISCVState *env, int csrno, int base)
343 {
344 if (!riscv_cpu_cfg(env)->ext_smstateen) {
345 return RISCV_EXCP_ILLEGAL_INST;
346 }
347
348 RISCVException ret = hmode(env, csrno);
349 if (ret != RISCV_EXCP_NONE) {
350 return ret;
351 }
352
353 if (env->debugger) {
354 return RISCV_EXCP_NONE;
355 }
356
357 if (env->priv < PRV_M) {
358 if (!(env->mstateen[csrno - base] & SMSTATEEN_STATEEN)) {
359 return RISCV_EXCP_ILLEGAL_INST;
360 }
361 }
362
363 return RISCV_EXCP_NONE;
364 }
365
hstateen(CPURISCVState * env,int csrno)366 static RISCVException hstateen(CPURISCVState *env, int csrno)
367 {
368 return hstateen_pred(env, csrno, CSR_HSTATEEN0);
369 }
370
hstateenh(CPURISCVState * env,int csrno)371 static RISCVException hstateenh(CPURISCVState *env, int csrno)
372 {
373 return hstateen_pred(env, csrno, CSR_HSTATEEN0H);
374 }
375
sstateen(CPURISCVState * env,int csrno)376 static RISCVException sstateen(CPURISCVState *env, int csrno)
377 {
378 bool virt = env->virt_enabled;
379 int index = csrno - CSR_SSTATEEN0;
380
381 if (!riscv_cpu_cfg(env)->ext_smstateen) {
382 return RISCV_EXCP_ILLEGAL_INST;
383 }
384
385 RISCVException ret = smode(env, csrno);
386 if (ret != RISCV_EXCP_NONE) {
387 return ret;
388 }
389
390 if (env->debugger) {
391 return RISCV_EXCP_NONE;
392 }
393
394 if (env->priv < PRV_M) {
395 if (!(env->mstateen[index] & SMSTATEEN_STATEEN)) {
396 return RISCV_EXCP_ILLEGAL_INST;
397 }
398
399 if (virt) {
400 if (!(env->hstateen[index] & SMSTATEEN_STATEEN)) {
401 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
402 }
403 }
404 }
405
406 return RISCV_EXCP_NONE;
407 }
408
sstc(CPURISCVState * env,int csrno)409 static RISCVException sstc(CPURISCVState *env, int csrno)
410 {
411 bool hmode_check = false;
412
413 if (!riscv_cpu_cfg(env)->ext_sstc || !env->rdtime_fn) {
414 return RISCV_EXCP_ILLEGAL_INST;
415 }
416
417 if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) {
418 hmode_check = true;
419 }
420
421 RISCVException ret = hmode_check ? hmode(env, csrno) : smode(env, csrno);
422 if (ret != RISCV_EXCP_NONE) {
423 return ret;
424 }
425
426 if (env->debugger) {
427 return RISCV_EXCP_NONE;
428 }
429
430 if (env->priv == PRV_M) {
431 return RISCV_EXCP_NONE;
432 }
433
434 /*
435 * No need of separate function for rv32 as menvcfg stores both menvcfg
436 * menvcfgh for RV32.
437 */
438 if (!(get_field(env->mcounteren, COUNTEREN_TM) &&
439 get_field(env->menvcfg, MENVCFG_STCE))) {
440 return RISCV_EXCP_ILLEGAL_INST;
441 }
442
443 if (env->virt_enabled) {
444 if (!(get_field(env->hcounteren, COUNTEREN_TM) &&
445 get_field(env->henvcfg, HENVCFG_STCE))) {
446 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
447 }
448 }
449
450 return RISCV_EXCP_NONE;
451 }
452
sstc_32(CPURISCVState * env,int csrno)453 static RISCVException sstc_32(CPURISCVState *env, int csrno)
454 {
455 if (riscv_cpu_mxl(env) != MXL_RV32) {
456 return RISCV_EXCP_ILLEGAL_INST;
457 }
458
459 return sstc(env, csrno);
460 }
461
satp(CPURISCVState * env,int csrno)462 static RISCVException satp(CPURISCVState *env, int csrno)
463 {
464 if (env->priv == PRV_S && !env->virt_enabled &&
465 get_field(env->mstatus, MSTATUS_TVM)) {
466 return RISCV_EXCP_ILLEGAL_INST;
467 }
468 if (env->priv == PRV_S && env->virt_enabled &&
469 get_field(env->hstatus, HSTATUS_VTVM)) {
470 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
471 }
472
473 return smode(env, csrno);
474 }
475
hgatp(CPURISCVState * env,int csrno)476 static RISCVException hgatp(CPURISCVState *env, int csrno)
477 {
478 if (env->priv == PRV_S && !env->virt_enabled &&
479 get_field(env->mstatus, MSTATUS_TVM)) {
480 return RISCV_EXCP_ILLEGAL_INST;
481 }
482
483 return hmode(env, csrno);
484 }
485
486 /* Checks if PointerMasking registers could be accessed */
pointer_masking(CPURISCVState * env,int csrno)487 static RISCVException pointer_masking(CPURISCVState *env, int csrno)
488 {
489 /* Check if j-ext is present */
490 if (riscv_has_ext(env, RVJ)) {
491 return RISCV_EXCP_NONE;
492 }
493 return RISCV_EXCP_ILLEGAL_INST;
494 }
495
aia_hmode(CPURISCVState * env,int csrno)496 static int aia_hmode(CPURISCVState *env, int csrno)
497 {
498 if (!riscv_cpu_cfg(env)->ext_ssaia) {
499 return RISCV_EXCP_ILLEGAL_INST;
500 }
501
502 return hmode(env, csrno);
503 }
504
aia_hmode32(CPURISCVState * env,int csrno)505 static int aia_hmode32(CPURISCVState *env, int csrno)
506 {
507 if (!riscv_cpu_cfg(env)->ext_ssaia) {
508 return RISCV_EXCP_ILLEGAL_INST;
509 }
510
511 return hmode32(env, csrno);
512 }
513
pmp(CPURISCVState * env,int csrno)514 static RISCVException pmp(CPURISCVState *env, int csrno)
515 {
516 if (riscv_cpu_cfg(env)->pmp) {
517 if (csrno <= CSR_PMPCFG3) {
518 uint32_t reg_index = csrno - CSR_PMPCFG0;
519
520 /* TODO: RV128 restriction check */
521 if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) {
522 return RISCV_EXCP_ILLEGAL_INST;
523 }
524 }
525
526 return RISCV_EXCP_NONE;
527 }
528
529 return RISCV_EXCP_ILLEGAL_INST;
530 }
531
have_mseccfg(CPURISCVState * env,int csrno)532 static RISCVException have_mseccfg(CPURISCVState *env, int csrno)
533 {
534 if (riscv_cpu_cfg(env)->ext_smepmp) {
535 return RISCV_EXCP_NONE;
536 }
537 if (riscv_cpu_cfg(env)->ext_zkr) {
538 return RISCV_EXCP_NONE;
539 }
540
541 return RISCV_EXCP_ILLEGAL_INST;
542 }
543
debug(CPURISCVState * env,int csrno)544 static RISCVException debug(CPURISCVState *env, int csrno)
545 {
546 if (riscv_cpu_cfg(env)->debug) {
547 return RISCV_EXCP_NONE;
548 }
549
550 return RISCV_EXCP_ILLEGAL_INST;
551 }
552 #endif
553
seed(CPURISCVState * env,int csrno)554 static RISCVException seed(CPURISCVState *env, int csrno)
555 {
556 if (!riscv_cpu_cfg(env)->ext_zkr) {
557 return RISCV_EXCP_ILLEGAL_INST;
558 }
559
560 #if !defined(CONFIG_USER_ONLY)
561 if (env->debugger) {
562 return RISCV_EXCP_NONE;
563 }
564
565 /*
566 * With a CSR read-write instruction:
567 * 1) The seed CSR is always available in machine mode as normal.
568 * 2) Attempted access to seed from virtual modes VS and VU always raises
569 * an exception(virtual instruction exception only if mseccfg.sseed=1).
570 * 3) Without the corresponding access control bit set to 1, any attempted
571 * access to seed from U, S or HS modes will raise an illegal instruction
572 * exception.
573 */
574 if (env->priv == PRV_M) {
575 return RISCV_EXCP_NONE;
576 } else if (env->virt_enabled) {
577 if (env->mseccfg & MSECCFG_SSEED) {
578 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
579 } else {
580 return RISCV_EXCP_ILLEGAL_INST;
581 }
582 } else {
583 if (env->priv == PRV_S && (env->mseccfg & MSECCFG_SSEED)) {
584 return RISCV_EXCP_NONE;
585 } else if (env->priv == PRV_U && (env->mseccfg & MSECCFG_USEED)) {
586 return RISCV_EXCP_NONE;
587 } else {
588 return RISCV_EXCP_ILLEGAL_INST;
589 }
590 }
591 #else
592 return RISCV_EXCP_NONE;
593 #endif
594 }
595
596 /* User Floating-Point CSRs */
read_fflags(CPURISCVState * env,int csrno,target_ulong * val)597 static RISCVException read_fflags(CPURISCVState *env, int csrno,
598 target_ulong *val)
599 {
600 *val = riscv_cpu_get_fflags(env);
601 return RISCV_EXCP_NONE;
602 }
603
write_fflags(CPURISCVState * env,int csrno,target_ulong val)604 static RISCVException write_fflags(CPURISCVState *env, int csrno,
605 target_ulong val)
606 {
607 #if !defined(CONFIG_USER_ONLY)
608 if (riscv_has_ext(env, RVF)) {
609 env->mstatus |= MSTATUS_FS;
610 }
611 #endif
612 riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT));
613 return RISCV_EXCP_NONE;
614 }
615
read_frm(CPURISCVState * env,int csrno,target_ulong * val)616 static RISCVException read_frm(CPURISCVState *env, int csrno,
617 target_ulong *val)
618 {
619 *val = env->frm;
620 return RISCV_EXCP_NONE;
621 }
622
write_frm(CPURISCVState * env,int csrno,target_ulong val)623 static RISCVException write_frm(CPURISCVState *env, int csrno,
624 target_ulong val)
625 {
626 #if !defined(CONFIG_USER_ONLY)
627 if (riscv_has_ext(env, RVF)) {
628 env->mstatus |= MSTATUS_FS;
629 }
630 #endif
631 env->frm = val & (FSR_RD >> FSR_RD_SHIFT);
632 return RISCV_EXCP_NONE;
633 }
634
read_fcsr(CPURISCVState * env,int csrno,target_ulong * val)635 static RISCVException read_fcsr(CPURISCVState *env, int csrno,
636 target_ulong *val)
637 {
638 *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT)
639 | (env->frm << FSR_RD_SHIFT);
640 return RISCV_EXCP_NONE;
641 }
642
write_fcsr(CPURISCVState * env,int csrno,target_ulong val)643 static RISCVException write_fcsr(CPURISCVState *env, int csrno,
644 target_ulong val)
645 {
646 #if !defined(CONFIG_USER_ONLY)
647 if (riscv_has_ext(env, RVF)) {
648 env->mstatus |= MSTATUS_FS;
649 }
650 #endif
651 env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
652 riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
653 return RISCV_EXCP_NONE;
654 }
655
read_vtype(CPURISCVState * env,int csrno,target_ulong * val)656 static RISCVException read_vtype(CPURISCVState *env, int csrno,
657 target_ulong *val)
658 {
659 uint64_t vill;
660 switch (env->xl) {
661 case MXL_RV32:
662 vill = (uint32_t)env->vill << 31;
663 break;
664 case MXL_RV64:
665 vill = (uint64_t)env->vill << 63;
666 break;
667 default:
668 g_assert_not_reached();
669 }
670 *val = (target_ulong)vill | env->vtype;
671 return RISCV_EXCP_NONE;
672 }
673
read_vl(CPURISCVState * env,int csrno,target_ulong * val)674 static RISCVException read_vl(CPURISCVState *env, int csrno,
675 target_ulong *val)
676 {
677 *val = env->vl;
678 return RISCV_EXCP_NONE;
679 }
680
read_vlenb(CPURISCVState * env,int csrno,target_ulong * val)681 static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val)
682 {
683 *val = riscv_cpu_cfg(env)->vlen >> 3;
684 return RISCV_EXCP_NONE;
685 }
686
read_vxrm(CPURISCVState * env,int csrno,target_ulong * val)687 static RISCVException read_vxrm(CPURISCVState *env, int csrno,
688 target_ulong *val)
689 {
690 *val = env->vxrm;
691 return RISCV_EXCP_NONE;
692 }
693
write_vxrm(CPURISCVState * env,int csrno,target_ulong val)694 static RISCVException write_vxrm(CPURISCVState *env, int csrno,
695 target_ulong val)
696 {
697 #if !defined(CONFIG_USER_ONLY)
698 env->mstatus |= MSTATUS_VS;
699 #endif
700 env->vxrm = val;
701 return RISCV_EXCP_NONE;
702 }
703
read_vxsat(CPURISCVState * env,int csrno,target_ulong * val)704 static RISCVException read_vxsat(CPURISCVState *env, int csrno,
705 target_ulong *val)
706 {
707 *val = env->vxsat;
708 return RISCV_EXCP_NONE;
709 }
710
write_vxsat(CPURISCVState * env,int csrno,target_ulong val)711 static RISCVException write_vxsat(CPURISCVState *env, int csrno,
712 target_ulong val)
713 {
714 #if !defined(CONFIG_USER_ONLY)
715 env->mstatus |= MSTATUS_VS;
716 #endif
717 env->vxsat = val;
718 return RISCV_EXCP_NONE;
719 }
720
read_vstart(CPURISCVState * env,int csrno,target_ulong * val)721 static RISCVException read_vstart(CPURISCVState *env, int csrno,
722 target_ulong *val)
723 {
724 *val = env->vstart;
725 return RISCV_EXCP_NONE;
726 }
727
write_vstart(CPURISCVState * env,int csrno,target_ulong val)728 static RISCVException write_vstart(CPURISCVState *env, int csrno,
729 target_ulong val)
730 {
731 #if !defined(CONFIG_USER_ONLY)
732 env->mstatus |= MSTATUS_VS;
733 #endif
734 /*
735 * The vstart CSR is defined to have only enough writable bits
736 * to hold the largest element index, i.e. lg2(VLEN) bits.
737 */
738 env->vstart = val & ~(~0ULL << ctzl(riscv_cpu_cfg(env)->vlen));
739 return RISCV_EXCP_NONE;
740 }
741
read_vcsr(CPURISCVState * env,int csrno,target_ulong * val)742 static int read_vcsr(CPURISCVState *env, int csrno, target_ulong *val)
743 {
744 *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT);
745 return RISCV_EXCP_NONE;
746 }
747
write_vcsr(CPURISCVState * env,int csrno,target_ulong val)748 static int write_vcsr(CPURISCVState *env, int csrno, target_ulong val)
749 {
750 #if !defined(CONFIG_USER_ONLY)
751 env->mstatus |= MSTATUS_VS;
752 #endif
753 env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT;
754 env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT;
755 return RISCV_EXCP_NONE;
756 }
757
758 /* User Timers and Counters */
get_ticks(bool shift)759 static target_ulong get_ticks(bool shift)
760 {
761 int64_t val;
762 target_ulong result;
763
764 #if !defined(CONFIG_USER_ONLY)
765 if (icount_enabled()) {
766 val = icount_get();
767 } else {
768 val = cpu_get_host_ticks();
769 }
770 #else
771 val = cpu_get_host_ticks();
772 #endif
773
774 if (shift) {
775 result = val >> 32;
776 } else {
777 result = val;
778 }
779
780 return result;
781 }
782
783 #if defined(CONFIG_USER_ONLY)
read_time(CPURISCVState * env,int csrno,target_ulong * val)784 static RISCVException read_time(CPURISCVState *env, int csrno,
785 target_ulong *val)
786 {
787 *val = cpu_get_host_ticks();
788 return RISCV_EXCP_NONE;
789 }
790
read_timeh(CPURISCVState * env,int csrno,target_ulong * val)791 static RISCVException read_timeh(CPURISCVState *env, int csrno,
792 target_ulong *val)
793 {
794 *val = cpu_get_host_ticks() >> 32;
795 return RISCV_EXCP_NONE;
796 }
797
read_hpmcounter(CPURISCVState * env,int csrno,target_ulong * val)798 static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val)
799 {
800 *val = get_ticks(false);
801 return RISCV_EXCP_NONE;
802 }
803
read_hpmcounterh(CPURISCVState * env,int csrno,target_ulong * val)804 static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val)
805 {
806 *val = get_ticks(true);
807 return RISCV_EXCP_NONE;
808 }
809
810 #else /* CONFIG_USER_ONLY */
811
read_mhpmevent(CPURISCVState * env,int csrno,target_ulong * val)812 static int read_mhpmevent(CPURISCVState *env, int csrno, target_ulong *val)
813 {
814 int evt_index = csrno - CSR_MCOUNTINHIBIT;
815
816 *val = env->mhpmevent_val[evt_index];
817
818 return RISCV_EXCP_NONE;
819 }
820
write_mhpmevent(CPURISCVState * env,int csrno,target_ulong val)821 static int write_mhpmevent(CPURISCVState *env, int csrno, target_ulong val)
822 {
823 int evt_index = csrno - CSR_MCOUNTINHIBIT;
824 uint64_t mhpmevt_val = val;
825
826 env->mhpmevent_val[evt_index] = val;
827
828 if (riscv_cpu_mxl(env) == MXL_RV32) {
829 mhpmevt_val = mhpmevt_val |
830 ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
831 }
832 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
833
834 return RISCV_EXCP_NONE;
835 }
836
read_mhpmeventh(CPURISCVState * env,int csrno,target_ulong * val)837 static int read_mhpmeventh(CPURISCVState *env, int csrno, target_ulong *val)
838 {
839 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
840
841 *val = env->mhpmeventh_val[evt_index];
842
843 return RISCV_EXCP_NONE;
844 }
845
write_mhpmeventh(CPURISCVState * env,int csrno,target_ulong val)846 static int write_mhpmeventh(CPURISCVState *env, int csrno, target_ulong val)
847 {
848 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
849 uint64_t mhpmevth_val = val;
850 uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
851
852 mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
853 env->mhpmeventh_val[evt_index] = val;
854
855 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
856
857 return RISCV_EXCP_NONE;
858 }
859
write_mhpmcounter(CPURISCVState * env,int csrno,target_ulong val)860 static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val)
861 {
862 int ctr_idx = csrno - CSR_MCYCLE;
863 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
864 uint64_t mhpmctr_val = val;
865
866 counter->mhpmcounter_val = val;
867 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
868 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
869 counter->mhpmcounter_prev = get_ticks(false);
870 if (ctr_idx > 2) {
871 if (riscv_cpu_mxl(env) == MXL_RV32) {
872 mhpmctr_val = mhpmctr_val |
873 ((uint64_t)counter->mhpmcounterh_val << 32);
874 }
875 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
876 }
877 } else {
878 /* Other counters can keep incrementing from the given value */
879 counter->mhpmcounter_prev = val;
880 }
881
882 return RISCV_EXCP_NONE;
883 }
884
write_mhpmcounterh(CPURISCVState * env,int csrno,target_ulong val)885 static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val)
886 {
887 int ctr_idx = csrno - CSR_MCYCLEH;
888 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
889 uint64_t mhpmctr_val = counter->mhpmcounter_val;
890 uint64_t mhpmctrh_val = val;
891
892 counter->mhpmcounterh_val = val;
893 mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
894 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
895 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
896 counter->mhpmcounterh_prev = get_ticks(true);
897 if (ctr_idx > 2) {
898 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
899 }
900 } else {
901 counter->mhpmcounterh_prev = val;
902 }
903
904 return RISCV_EXCP_NONE;
905 }
906
riscv_pmu_read_ctr(CPURISCVState * env,target_ulong * val,bool upper_half,uint32_t ctr_idx)907 static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
908 bool upper_half, uint32_t ctr_idx)
909 {
910 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
911 target_ulong ctr_prev = upper_half ? counter->mhpmcounterh_prev :
912 counter->mhpmcounter_prev;
913 target_ulong ctr_val = upper_half ? counter->mhpmcounterh_val :
914 counter->mhpmcounter_val;
915
916 if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
917 /*
918 * Counter should not increment if inhibit bit is set. We can't really
919 * stop the icount counting. Just return the counter value written by
920 * the supervisor to indicate that counter was not incremented.
921 */
922 if (!counter->started) {
923 *val = ctr_val;
924 return RISCV_EXCP_NONE;
925 } else {
926 /* Mark that the counter has been stopped */
927 counter->started = false;
928 }
929 }
930
931 /*
932 * The kernel computes the perf delta by subtracting the current value from
933 * the value it initialized previously (ctr_val).
934 */
935 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
936 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
937 *val = get_ticks(upper_half) - ctr_prev + ctr_val;
938 } else {
939 *val = ctr_val;
940 }
941
942 return RISCV_EXCP_NONE;
943 }
944
read_hpmcounter(CPURISCVState * env,int csrno,target_ulong * val)945 static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val)
946 {
947 uint16_t ctr_index;
948
949 if (csrno >= CSR_MCYCLE && csrno <= CSR_MHPMCOUNTER31) {
950 ctr_index = csrno - CSR_MCYCLE;
951 } else if (csrno >= CSR_CYCLE && csrno <= CSR_HPMCOUNTER31) {
952 ctr_index = csrno - CSR_CYCLE;
953 } else {
954 return RISCV_EXCP_ILLEGAL_INST;
955 }
956
957 return riscv_pmu_read_ctr(env, val, false, ctr_index);
958 }
959
read_hpmcounterh(CPURISCVState * env,int csrno,target_ulong * val)960 static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val)
961 {
962 uint16_t ctr_index;
963
964 if (csrno >= CSR_MCYCLEH && csrno <= CSR_MHPMCOUNTER31H) {
965 ctr_index = csrno - CSR_MCYCLEH;
966 } else if (csrno >= CSR_CYCLEH && csrno <= CSR_HPMCOUNTER31H) {
967 ctr_index = csrno - CSR_CYCLEH;
968 } else {
969 return RISCV_EXCP_ILLEGAL_INST;
970 }
971
972 return riscv_pmu_read_ctr(env, val, true, ctr_index);
973 }
974
read_scountovf(CPURISCVState * env,int csrno,target_ulong * val)975 static int read_scountovf(CPURISCVState *env, int csrno, target_ulong *val)
976 {
977 int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT;
978 int i;
979 *val = 0;
980 target_ulong *mhpm_evt_val;
981 uint64_t of_bit_mask;
982
983 if (riscv_cpu_mxl(env) == MXL_RV32) {
984 mhpm_evt_val = env->mhpmeventh_val;
985 of_bit_mask = MHPMEVENTH_BIT_OF;
986 } else {
987 mhpm_evt_val = env->mhpmevent_val;
988 of_bit_mask = MHPMEVENT_BIT_OF;
989 }
990
991 for (i = mhpmevt_start; i < RV_MAX_MHPMEVENTS; i++) {
992 if ((get_field(env->mcounteren, BIT(i))) &&
993 (mhpm_evt_val[i] & of_bit_mask)) {
994 *val |= BIT(i);
995 }
996 }
997
998 return RISCV_EXCP_NONE;
999 }
1000
read_time(CPURISCVState * env,int csrno,target_ulong * val)1001 static RISCVException read_time(CPURISCVState *env, int csrno,
1002 target_ulong *val)
1003 {
1004 uint64_t delta = env->virt_enabled ? env->htimedelta : 0;
1005
1006 if (!env->rdtime_fn) {
1007 return RISCV_EXCP_ILLEGAL_INST;
1008 }
1009
1010 *val = env->rdtime_fn(env->rdtime_fn_arg) + delta;
1011 return RISCV_EXCP_NONE;
1012 }
1013
read_timeh(CPURISCVState * env,int csrno,target_ulong * val)1014 static RISCVException read_timeh(CPURISCVState *env, int csrno,
1015 target_ulong *val)
1016 {
1017 uint64_t delta = env->virt_enabled ? env->htimedelta : 0;
1018
1019 if (!env->rdtime_fn) {
1020 return RISCV_EXCP_ILLEGAL_INST;
1021 }
1022
1023 *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32;
1024 return RISCV_EXCP_NONE;
1025 }
1026
read_vstimecmp(CPURISCVState * env,int csrno,target_ulong * val)1027 static RISCVException read_vstimecmp(CPURISCVState *env, int csrno,
1028 target_ulong *val)
1029 {
1030 *val = env->vstimecmp;
1031
1032 return RISCV_EXCP_NONE;
1033 }
1034
read_vstimecmph(CPURISCVState * env,int csrno,target_ulong * val)1035 static RISCVException read_vstimecmph(CPURISCVState *env, int csrno,
1036 target_ulong *val)
1037 {
1038 *val = env->vstimecmp >> 32;
1039
1040 return RISCV_EXCP_NONE;
1041 }
1042
write_vstimecmp(CPURISCVState * env,int csrno,target_ulong val)1043 static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
1044 target_ulong val)
1045 {
1046 if (riscv_cpu_mxl(env) == MXL_RV32) {
1047 env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val);
1048 } else {
1049 env->vstimecmp = val;
1050 }
1051
1052 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
1053 env->htimedelta, MIP_VSTIP);
1054
1055 return RISCV_EXCP_NONE;
1056 }
1057
write_vstimecmph(CPURISCVState * env,int csrno,target_ulong val)1058 static RISCVException write_vstimecmph(CPURISCVState *env, int csrno,
1059 target_ulong val)
1060 {
1061 env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val);
1062 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
1063 env->htimedelta, MIP_VSTIP);
1064
1065 return RISCV_EXCP_NONE;
1066 }
1067
read_stimecmp(CPURISCVState * env,int csrno,target_ulong * val)1068 static RISCVException read_stimecmp(CPURISCVState *env, int csrno,
1069 target_ulong *val)
1070 {
1071 if (env->virt_enabled) {
1072 *val = env->vstimecmp;
1073 } else {
1074 *val = env->stimecmp;
1075 }
1076
1077 return RISCV_EXCP_NONE;
1078 }
1079
read_stimecmph(CPURISCVState * env,int csrno,target_ulong * val)1080 static RISCVException read_stimecmph(CPURISCVState *env, int csrno,
1081 target_ulong *val)
1082 {
1083 if (env->virt_enabled) {
1084 *val = env->vstimecmp >> 32;
1085 } else {
1086 *val = env->stimecmp >> 32;
1087 }
1088
1089 return RISCV_EXCP_NONE;
1090 }
1091
write_stimecmp(CPURISCVState * env,int csrno,target_ulong val)1092 static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
1093 target_ulong val)
1094 {
1095 if (env->virt_enabled) {
1096 if (env->hvictl & HVICTL_VTI) {
1097 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1098 }
1099 return write_vstimecmp(env, csrno, val);
1100 }
1101
1102 if (riscv_cpu_mxl(env) == MXL_RV32) {
1103 env->stimecmp = deposit64(env->stimecmp, 0, 32, (uint64_t)val);
1104 } else {
1105 env->stimecmp = val;
1106 }
1107
1108 riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP);
1109
1110 return RISCV_EXCP_NONE;
1111 }
1112
write_stimecmph(CPURISCVState * env,int csrno,target_ulong val)1113 static RISCVException write_stimecmph(CPURISCVState *env, int csrno,
1114 target_ulong val)
1115 {
1116 if (env->virt_enabled) {
1117 if (env->hvictl & HVICTL_VTI) {
1118 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1119 }
1120 return write_vstimecmph(env, csrno, val);
1121 }
1122
1123 env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val);
1124 riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP);
1125
1126 return RISCV_EXCP_NONE;
1127 }
1128
1129 #define VSTOPI_NUM_SRCS 5
1130
1131 #define LOCAL_INTERRUPTS (~0x1FFF)
1132
1133 static const uint64_t delegable_ints =
1134 S_MODE_INTERRUPTS | VS_MODE_INTERRUPTS | MIP_LCOFIP;
1135 static const uint64_t vs_delegable_ints =
1136 (VS_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & ~MIP_LCOFIP;
1137 static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
1138 HS_MODE_INTERRUPTS | LOCAL_INTERRUPTS;
1139 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
1140 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
1141 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
1142 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \
1143 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \
1144 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \
1145 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \
1146 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \
1147 (1ULL << (RISCV_EXCP_U_ECALL)) | \
1148 (1ULL << (RISCV_EXCP_S_ECALL)) | \
1149 (1ULL << (RISCV_EXCP_VS_ECALL)) | \
1150 (1ULL << (RISCV_EXCP_M_ECALL)) | \
1151 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
1152 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
1153 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
1154 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
1155 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
1156 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
1157 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)))
1158 static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
1159 ~((1ULL << (RISCV_EXCP_S_ECALL)) |
1160 (1ULL << (RISCV_EXCP_VS_ECALL)) |
1161 (1ULL << (RISCV_EXCP_M_ECALL)) |
1162 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) |
1163 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) |
1164 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) |
1165 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)));
1166 static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
1167 SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
1168 SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
1169
1170 /*
1171 * Spec allows for bits 13:63 to be either read-only or writable.
1172 * So far we have interrupt LCOFIP in that region which is writable.
1173 *
1174 * Also, spec allows to inject virtual interrupts in this region even
1175 * without any hardware interrupts for that interrupt number.
1176 *
1177 * For now interrupt in 13:63 region are all kept writable. 13 being
1178 * LCOFIP and 14:63 being virtual only. Change this in future if we
1179 * introduce more interrupts that are not writable.
1180 */
1181
1182 /* Bit STIP can be an alias of mip.STIP that's why it's writable in mvip. */
1183 static const target_ulong mvip_writable_mask = MIP_SSIP | MIP_STIP | MIP_SEIP |
1184 LOCAL_INTERRUPTS;
1185 static const target_ulong mvien_writable_mask = MIP_SSIP | MIP_SEIP |
1186 LOCAL_INTERRUPTS;
1187
1188 static const target_ulong sip_writable_mask = SIP_SSIP | LOCAL_INTERRUPTS;
1189 static const target_ulong hip_writable_mask = MIP_VSSIP;
1190 static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP |
1191 MIP_VSEIP | LOCAL_INTERRUPTS;
1192 static const target_ulong hvien_writable_mask = LOCAL_INTERRUPTS;
1193
1194 static const target_ulong vsip_writable_mask = MIP_VSSIP | LOCAL_INTERRUPTS;
1195
1196 const bool valid_vm_1_10_32[16] = {
1197 [VM_1_10_MBARE] = true,
1198 [VM_1_10_SV32] = true
1199 };
1200
1201 const bool valid_vm_1_10_64[16] = {
1202 [VM_1_10_MBARE] = true,
1203 [VM_1_10_SV39] = true,
1204 [VM_1_10_SV48] = true,
1205 [VM_1_10_SV57] = true
1206 };
1207
1208 /* Machine Information Registers */
read_zero(CPURISCVState * env,int csrno,target_ulong * val)1209 static RISCVException read_zero(CPURISCVState *env, int csrno,
1210 target_ulong *val)
1211 {
1212 *val = 0;
1213 return RISCV_EXCP_NONE;
1214 }
1215
write_ignore(CPURISCVState * env,int csrno,target_ulong val)1216 static RISCVException write_ignore(CPURISCVState *env, int csrno,
1217 target_ulong val)
1218 {
1219 return RISCV_EXCP_NONE;
1220 }
1221
read_mvendorid(CPURISCVState * env,int csrno,target_ulong * val)1222 static RISCVException read_mvendorid(CPURISCVState *env, int csrno,
1223 target_ulong *val)
1224 {
1225 *val = riscv_cpu_cfg(env)->mvendorid;
1226 return RISCV_EXCP_NONE;
1227 }
1228
read_marchid(CPURISCVState * env,int csrno,target_ulong * val)1229 static RISCVException read_marchid(CPURISCVState *env, int csrno,
1230 target_ulong *val)
1231 {
1232 *val = riscv_cpu_cfg(env)->marchid;
1233 return RISCV_EXCP_NONE;
1234 }
1235
read_mimpid(CPURISCVState * env,int csrno,target_ulong * val)1236 static RISCVException read_mimpid(CPURISCVState *env, int csrno,
1237 target_ulong *val)
1238 {
1239 *val = riscv_cpu_cfg(env)->mimpid;
1240 return RISCV_EXCP_NONE;
1241 }
1242
read_mhartid(CPURISCVState * env,int csrno,target_ulong * val)1243 static RISCVException read_mhartid(CPURISCVState *env, int csrno,
1244 target_ulong *val)
1245 {
1246 *val = env->mhartid;
1247 return RISCV_EXCP_NONE;
1248 }
1249
1250 /* Machine Trap Setup */
1251
1252 /* We do not store SD explicitly, only compute it on demand. */
add_status_sd(RISCVMXL xl,uint64_t status)1253 static uint64_t add_status_sd(RISCVMXL xl, uint64_t status)
1254 {
1255 if ((status & MSTATUS_FS) == MSTATUS_FS ||
1256 (status & MSTATUS_VS) == MSTATUS_VS ||
1257 (status & MSTATUS_XS) == MSTATUS_XS) {
1258 switch (xl) {
1259 case MXL_RV32:
1260 return status | MSTATUS32_SD;
1261 case MXL_RV64:
1262 return status | MSTATUS64_SD;
1263 case MXL_RV128:
1264 return MSTATUSH128_SD;
1265 default:
1266 g_assert_not_reached();
1267 }
1268 }
1269 return status;
1270 }
1271
read_mstatus(CPURISCVState * env,int csrno,target_ulong * val)1272 static RISCVException read_mstatus(CPURISCVState *env, int csrno,
1273 target_ulong *val)
1274 {
1275 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus);
1276 return RISCV_EXCP_NONE;
1277 }
1278
validate_vm(CPURISCVState * env,target_ulong vm)1279 static bool validate_vm(CPURISCVState *env, target_ulong vm)
1280 {
1281 return (vm & 0xf) <=
1282 satp_mode_max_from_map(riscv_cpu_cfg(env)->satp_mode.map);
1283 }
1284
legalize_mpp(CPURISCVState * env,target_ulong old_mpp,target_ulong val)1285 static target_ulong legalize_mpp(CPURISCVState *env, target_ulong old_mpp,
1286 target_ulong val)
1287 {
1288 bool valid = false;
1289 target_ulong new_mpp = get_field(val, MSTATUS_MPP);
1290
1291 switch (new_mpp) {
1292 case PRV_M:
1293 valid = true;
1294 break;
1295 case PRV_S:
1296 valid = riscv_has_ext(env, RVS);
1297 break;
1298 case PRV_U:
1299 valid = riscv_has_ext(env, RVU);
1300 break;
1301 }
1302
1303 /* Remain field unchanged if new_mpp value is invalid */
1304 if (!valid) {
1305 val = set_field(val, MSTATUS_MPP, old_mpp);
1306 }
1307
1308 return val;
1309 }
1310
write_mstatus(CPURISCVState * env,int csrno,target_ulong val)1311 static RISCVException write_mstatus(CPURISCVState *env, int csrno,
1312 target_ulong val)
1313 {
1314 uint64_t mstatus = env->mstatus;
1315 uint64_t mask = 0;
1316 RISCVMXL xl = riscv_cpu_mxl(env);
1317
1318 /*
1319 * MPP field have been made WARL since priv version 1.11. However,
1320 * legalization for it will not break any software running on 1.10.
1321 */
1322 val = legalize_mpp(env, get_field(mstatus, MSTATUS_MPP), val);
1323
1324 /* flush tlb on mstatus fields that affect VM */
1325 if ((val ^ mstatus) & MSTATUS_MXR) {
1326 tlb_flush(env_cpu(env));
1327 }
1328 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
1329 MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM |
1330 MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
1331 MSTATUS_TW | MSTATUS_VS;
1332
1333 if (riscv_has_ext(env, RVF)) {
1334 mask |= MSTATUS_FS;
1335 }
1336
1337 if (xl != MXL_RV32 || env->debugger) {
1338 if (riscv_has_ext(env, RVH)) {
1339 mask |= MSTATUS_MPV | MSTATUS_GVA;
1340 }
1341 if ((val & MSTATUS64_UXL) != 0) {
1342 mask |= MSTATUS64_UXL;
1343 }
1344 }
1345
1346 mstatus = (mstatus & ~mask) | (val & mask);
1347
1348 env->mstatus = mstatus;
1349
1350 /*
1351 * Except in debug mode, UXL/SXL can only be modified by higher
1352 * privilege mode. So xl will not be changed in normal mode.
1353 */
1354 if (env->debugger) {
1355 env->xl = cpu_recompute_xl(env);
1356 }
1357
1358 riscv_cpu_update_mask(env);
1359 return RISCV_EXCP_NONE;
1360 }
1361
read_mstatush(CPURISCVState * env,int csrno,target_ulong * val)1362 static RISCVException read_mstatush(CPURISCVState *env, int csrno,
1363 target_ulong *val)
1364 {
1365 *val = env->mstatus >> 32;
1366 return RISCV_EXCP_NONE;
1367 }
1368
write_mstatush(CPURISCVState * env,int csrno,target_ulong val)1369 static RISCVException write_mstatush(CPURISCVState *env, int csrno,
1370 target_ulong val)
1371 {
1372 uint64_t valh = (uint64_t)val << 32;
1373 uint64_t mask = riscv_has_ext(env, RVH) ? MSTATUS_MPV | MSTATUS_GVA : 0;
1374
1375 env->mstatus = (env->mstatus & ~mask) | (valh & mask);
1376
1377 return RISCV_EXCP_NONE;
1378 }
1379
read_mstatus_i128(CPURISCVState * env,int csrno,Int128 * val)1380 static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno,
1381 Int128 *val)
1382 {
1383 *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128,
1384 env->mstatus));
1385 return RISCV_EXCP_NONE;
1386 }
1387
read_misa_i128(CPURISCVState * env,int csrno,Int128 * val)1388 static RISCVException read_misa_i128(CPURISCVState *env, int csrno,
1389 Int128 *val)
1390 {
1391 *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62);
1392 return RISCV_EXCP_NONE;
1393 }
1394
read_misa(CPURISCVState * env,int csrno,target_ulong * val)1395 static RISCVException read_misa(CPURISCVState *env, int csrno,
1396 target_ulong *val)
1397 {
1398 target_ulong misa;
1399
1400 switch (env->misa_mxl) {
1401 case MXL_RV32:
1402 misa = (target_ulong)MXL_RV32 << 30;
1403 break;
1404 #ifdef TARGET_RISCV64
1405 case MXL_RV64:
1406 misa = (target_ulong)MXL_RV64 << 62;
1407 break;
1408 #endif
1409 default:
1410 g_assert_not_reached();
1411 }
1412
1413 *val = misa | env->misa_ext;
1414 return RISCV_EXCP_NONE;
1415 }
1416
write_misa(CPURISCVState * env,int csrno,target_ulong val)1417 static RISCVException write_misa(CPURISCVState *env, int csrno,
1418 target_ulong val)
1419 {
1420 RISCVCPU *cpu = env_archcpu(env);
1421 uint32_t orig_misa_ext = env->misa_ext;
1422 Error *local_err = NULL;
1423
1424 if (!riscv_cpu_cfg(env)->misa_w) {
1425 /* drop write to misa */
1426 return RISCV_EXCP_NONE;
1427 }
1428
1429 /* Mask extensions that are not supported by this hart */
1430 val &= env->misa_ext_mask;
1431
1432 /*
1433 * Suppress 'C' if next instruction is not aligned
1434 * TODO: this should check next_pc
1435 */
1436 if ((val & RVC) && (GETPC() & ~3) != 0) {
1437 val &= ~RVC;
1438 }
1439
1440 /* Disable RVG if any of its dependencies are disabled */
1441 if (!(val & RVI && val & RVM && val & RVA &&
1442 val & RVF && val & RVD)) {
1443 val &= ~RVG;
1444 }
1445
1446 /* If nothing changed, do nothing. */
1447 if (val == env->misa_ext) {
1448 return RISCV_EXCP_NONE;
1449 }
1450
1451 env->misa_ext = val;
1452 riscv_cpu_validate_set_extensions(cpu, &local_err);
1453 if (local_err != NULL) {
1454 /* Rollback on validation error */
1455 qemu_log_mask(LOG_GUEST_ERROR, "Unable to write MISA ext value "
1456 "0x%x, keeping existing MISA ext 0x%x\n",
1457 env->misa_ext, orig_misa_ext);
1458
1459 env->misa_ext = orig_misa_ext;
1460
1461 return RISCV_EXCP_NONE;
1462 }
1463
1464 if (!(env->misa_ext & RVF)) {
1465 env->mstatus &= ~MSTATUS_FS;
1466 }
1467
1468 /* flush translation cache */
1469 tb_flush(env_cpu(env));
1470 env->xl = riscv_cpu_mxl(env);
1471 return RISCV_EXCP_NONE;
1472 }
1473
read_medeleg(CPURISCVState * env,int csrno,target_ulong * val)1474 static RISCVException read_medeleg(CPURISCVState *env, int csrno,
1475 target_ulong *val)
1476 {
1477 *val = env->medeleg;
1478 return RISCV_EXCP_NONE;
1479 }
1480
write_medeleg(CPURISCVState * env,int csrno,target_ulong val)1481 static RISCVException write_medeleg(CPURISCVState *env, int csrno,
1482 target_ulong val)
1483 {
1484 env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS);
1485 return RISCV_EXCP_NONE;
1486 }
1487
rmw_mideleg64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)1488 static RISCVException rmw_mideleg64(CPURISCVState *env, int csrno,
1489 uint64_t *ret_val,
1490 uint64_t new_val, uint64_t wr_mask)
1491 {
1492 uint64_t mask = wr_mask & delegable_ints;
1493
1494 if (ret_val) {
1495 *ret_val = env->mideleg;
1496 }
1497
1498 env->mideleg = (env->mideleg & ~mask) | (new_val & mask);
1499
1500 if (riscv_has_ext(env, RVH)) {
1501 env->mideleg |= HS_MODE_INTERRUPTS;
1502 }
1503
1504 return RISCV_EXCP_NONE;
1505 }
1506
rmw_mideleg(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1507 static RISCVException rmw_mideleg(CPURISCVState *env, int csrno,
1508 target_ulong *ret_val,
1509 target_ulong new_val, target_ulong wr_mask)
1510 {
1511 uint64_t rval;
1512 RISCVException ret;
1513
1514 ret = rmw_mideleg64(env, csrno, &rval, new_val, wr_mask);
1515 if (ret_val) {
1516 *ret_val = rval;
1517 }
1518
1519 return ret;
1520 }
1521
rmw_midelegh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1522 static RISCVException rmw_midelegh(CPURISCVState *env, int csrno,
1523 target_ulong *ret_val,
1524 target_ulong new_val,
1525 target_ulong wr_mask)
1526 {
1527 uint64_t rval;
1528 RISCVException ret;
1529
1530 ret = rmw_mideleg64(env, csrno, &rval,
1531 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1532 if (ret_val) {
1533 *ret_val = rval >> 32;
1534 }
1535
1536 return ret;
1537 }
1538
rmw_mie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)1539 static RISCVException rmw_mie64(CPURISCVState *env, int csrno,
1540 uint64_t *ret_val,
1541 uint64_t new_val, uint64_t wr_mask)
1542 {
1543 uint64_t mask = wr_mask & all_ints;
1544
1545 if (ret_val) {
1546 *ret_val = env->mie;
1547 }
1548
1549 env->mie = (env->mie & ~mask) | (new_val & mask);
1550
1551 if (!riscv_has_ext(env, RVH)) {
1552 env->mie &= ~((uint64_t)HS_MODE_INTERRUPTS);
1553 }
1554
1555 return RISCV_EXCP_NONE;
1556 }
1557
rmw_mie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1558 static RISCVException rmw_mie(CPURISCVState *env, int csrno,
1559 target_ulong *ret_val,
1560 target_ulong new_val, target_ulong wr_mask)
1561 {
1562 uint64_t rval;
1563 RISCVException ret;
1564
1565 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask);
1566 if (ret_val) {
1567 *ret_val = rval;
1568 }
1569
1570 return ret;
1571 }
1572
rmw_mieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1573 static RISCVException rmw_mieh(CPURISCVState *env, int csrno,
1574 target_ulong *ret_val,
1575 target_ulong new_val, target_ulong wr_mask)
1576 {
1577 uint64_t rval;
1578 RISCVException ret;
1579
1580 ret = rmw_mie64(env, csrno, &rval,
1581 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1582 if (ret_val) {
1583 *ret_val = rval >> 32;
1584 }
1585
1586 return ret;
1587 }
1588
rmw_mvien64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)1589 static RISCVException rmw_mvien64(CPURISCVState *env, int csrno,
1590 uint64_t *ret_val,
1591 uint64_t new_val, uint64_t wr_mask)
1592 {
1593 uint64_t mask = wr_mask & mvien_writable_mask;
1594
1595 if (ret_val) {
1596 *ret_val = env->mvien;
1597 }
1598
1599 env->mvien = (env->mvien & ~mask) | (new_val & mask);
1600
1601 return RISCV_EXCP_NONE;
1602 }
1603
rmw_mvien(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1604 static RISCVException rmw_mvien(CPURISCVState *env, int csrno,
1605 target_ulong *ret_val,
1606 target_ulong new_val, target_ulong wr_mask)
1607 {
1608 uint64_t rval;
1609 RISCVException ret;
1610
1611 ret = rmw_mvien64(env, csrno, &rval, new_val, wr_mask);
1612 if (ret_val) {
1613 *ret_val = rval;
1614 }
1615
1616 return ret;
1617 }
1618
rmw_mvienh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)1619 static RISCVException rmw_mvienh(CPURISCVState *env, int csrno,
1620 target_ulong *ret_val,
1621 target_ulong new_val, target_ulong wr_mask)
1622 {
1623 uint64_t rval;
1624 RISCVException ret;
1625
1626 ret = rmw_mvien64(env, csrno, &rval,
1627 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1628 if (ret_val) {
1629 *ret_val = rval >> 32;
1630 }
1631
1632 return ret;
1633 }
1634
read_mtopi(CPURISCVState * env,int csrno,target_ulong * val)1635 static int read_mtopi(CPURISCVState *env, int csrno, target_ulong *val)
1636 {
1637 int irq;
1638 uint8_t iprio;
1639
1640 irq = riscv_cpu_mirq_pending(env);
1641 if (irq <= 0 || irq > 63) {
1642 *val = 0;
1643 } else {
1644 iprio = env->miprio[irq];
1645 if (!iprio) {
1646 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_M) {
1647 iprio = IPRIO_MMAXIPRIO;
1648 }
1649 }
1650 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
1651 *val |= iprio;
1652 }
1653
1654 return RISCV_EXCP_NONE;
1655 }
1656
aia_xlate_vs_csrno(CPURISCVState * env,int csrno)1657 static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
1658 {
1659 if (!env->virt_enabled) {
1660 return csrno;
1661 }
1662
1663 switch (csrno) {
1664 case CSR_SISELECT:
1665 return CSR_VSISELECT;
1666 case CSR_SIREG:
1667 return CSR_VSIREG;
1668 case CSR_STOPEI:
1669 return CSR_VSTOPEI;
1670 default:
1671 return csrno;
1672 };
1673 }
1674
rmw_xiselect(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1675 static int rmw_xiselect(CPURISCVState *env, int csrno, target_ulong *val,
1676 target_ulong new_val, target_ulong wr_mask)
1677 {
1678 target_ulong *iselect;
1679
1680 /* Translate CSR number for VS-mode */
1681 csrno = aia_xlate_vs_csrno(env, csrno);
1682
1683 /* Find the iselect CSR based on CSR number */
1684 switch (csrno) {
1685 case CSR_MISELECT:
1686 iselect = &env->miselect;
1687 break;
1688 case CSR_SISELECT:
1689 iselect = &env->siselect;
1690 break;
1691 case CSR_VSISELECT:
1692 iselect = &env->vsiselect;
1693 break;
1694 default:
1695 return RISCV_EXCP_ILLEGAL_INST;
1696 };
1697
1698 if (val) {
1699 *val = *iselect;
1700 }
1701
1702 wr_mask &= ISELECT_MASK;
1703 if (wr_mask) {
1704 *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask);
1705 }
1706
1707 return RISCV_EXCP_NONE;
1708 }
1709
rmw_iprio(target_ulong xlen,target_ulong iselect,uint8_t * iprio,target_ulong * val,target_ulong new_val,target_ulong wr_mask,int ext_irq_no)1710 static int rmw_iprio(target_ulong xlen,
1711 target_ulong iselect, uint8_t *iprio,
1712 target_ulong *val, target_ulong new_val,
1713 target_ulong wr_mask, int ext_irq_no)
1714 {
1715 int i, firq, nirqs;
1716 target_ulong old_val;
1717
1718 if (iselect < ISELECT_IPRIO0 || ISELECT_IPRIO15 < iselect) {
1719 return -EINVAL;
1720 }
1721 if (xlen != 32 && iselect & 0x1) {
1722 return -EINVAL;
1723 }
1724
1725 nirqs = 4 * (xlen / 32);
1726 firq = ((iselect - ISELECT_IPRIO0) / (xlen / 32)) * (nirqs);
1727
1728 old_val = 0;
1729 for (i = 0; i < nirqs; i++) {
1730 old_val |= ((target_ulong)iprio[firq + i]) << (IPRIO_IRQ_BITS * i);
1731 }
1732
1733 if (val) {
1734 *val = old_val;
1735 }
1736
1737 if (wr_mask) {
1738 new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
1739 for (i = 0; i < nirqs; i++) {
1740 /*
1741 * M-level and S-level external IRQ priority always read-only
1742 * zero. This means default priority order is always preferred
1743 * for M-level and S-level external IRQs.
1744 */
1745 if ((firq + i) == ext_irq_no) {
1746 continue;
1747 }
1748 iprio[firq + i] = (new_val >> (IPRIO_IRQ_BITS * i)) & 0xff;
1749 }
1750 }
1751
1752 return 0;
1753 }
1754
rmw_xireg(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1755 static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val,
1756 target_ulong new_val, target_ulong wr_mask)
1757 {
1758 bool virt, isel_reserved;
1759 uint8_t *iprio;
1760 int ret = -EINVAL;
1761 target_ulong priv, isel, vgein;
1762
1763 /* Translate CSR number for VS-mode */
1764 csrno = aia_xlate_vs_csrno(env, csrno);
1765
1766 /* Decode register details from CSR number */
1767 virt = false;
1768 isel_reserved = false;
1769 switch (csrno) {
1770 case CSR_MIREG:
1771 iprio = env->miprio;
1772 isel = env->miselect;
1773 priv = PRV_M;
1774 break;
1775 case CSR_SIREG:
1776 if (env->priv == PRV_S && env->mvien & MIP_SEIP &&
1777 env->siselect >= ISELECT_IMSIC_EIDELIVERY &&
1778 env->siselect <= ISELECT_IMSIC_EIE63) {
1779 goto done;
1780 }
1781 iprio = env->siprio;
1782 isel = env->siselect;
1783 priv = PRV_S;
1784 break;
1785 case CSR_VSIREG:
1786 iprio = env->hviprio;
1787 isel = env->vsiselect;
1788 priv = PRV_S;
1789 virt = true;
1790 break;
1791 default:
1792 goto done;
1793 };
1794
1795 /* Find the selected guest interrupt file */
1796 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
1797
1798 if (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) {
1799 /* Local interrupt priority registers not available for VS-mode */
1800 if (!virt) {
1801 ret = rmw_iprio(riscv_cpu_mxl_bits(env),
1802 isel, iprio, val, new_val, wr_mask,
1803 (priv == PRV_M) ? IRQ_M_EXT : IRQ_S_EXT);
1804 }
1805 } else if (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST) {
1806 /* IMSIC registers only available when machine implements it. */
1807 if (env->aia_ireg_rmw_fn[priv]) {
1808 /* Selected guest interrupt file should not be zero */
1809 if (virt && (!vgein || env->geilen < vgein)) {
1810 goto done;
1811 }
1812 /* Call machine specific IMSIC register emulation */
1813 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
1814 AIA_MAKE_IREG(isel, priv, virt, vgein,
1815 riscv_cpu_mxl_bits(env)),
1816 val, new_val, wr_mask);
1817 }
1818 } else {
1819 isel_reserved = true;
1820 }
1821
1822 done:
1823 if (ret) {
1824 return (env->virt_enabled && virt && !isel_reserved) ?
1825 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
1826 }
1827 return RISCV_EXCP_NONE;
1828 }
1829
rmw_xtopei(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1830 static int rmw_xtopei(CPURISCVState *env, int csrno, target_ulong *val,
1831 target_ulong new_val, target_ulong wr_mask)
1832 {
1833 bool virt;
1834 int ret = -EINVAL;
1835 target_ulong priv, vgein;
1836
1837 /* Translate CSR number for VS-mode */
1838 csrno = aia_xlate_vs_csrno(env, csrno);
1839
1840 /* Decode register details from CSR number */
1841 virt = false;
1842 switch (csrno) {
1843 case CSR_MTOPEI:
1844 priv = PRV_M;
1845 break;
1846 case CSR_STOPEI:
1847 if (env->mvien & MIP_SEIP && env->priv == PRV_S) {
1848 goto done;
1849 }
1850 priv = PRV_S;
1851 break;
1852 case CSR_VSTOPEI:
1853 priv = PRV_S;
1854 virt = true;
1855 break;
1856 default:
1857 goto done;
1858 };
1859
1860 /* IMSIC CSRs only available when machine implements IMSIC. */
1861 if (!env->aia_ireg_rmw_fn[priv]) {
1862 goto done;
1863 }
1864
1865 /* Find the selected guest interrupt file */
1866 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
1867
1868 /* Selected guest interrupt file should be valid */
1869 if (virt && (!vgein || env->geilen < vgein)) {
1870 goto done;
1871 }
1872
1873 /* Call machine specific IMSIC register emulation for TOPEI */
1874 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
1875 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, priv, virt, vgein,
1876 riscv_cpu_mxl_bits(env)),
1877 val, new_val, wr_mask);
1878
1879 done:
1880 if (ret) {
1881 return (env->virt_enabled && virt) ?
1882 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
1883 }
1884 return RISCV_EXCP_NONE;
1885 }
1886
read_mtvec(CPURISCVState * env,int csrno,target_ulong * val)1887 static RISCVException read_mtvec(CPURISCVState *env, int csrno,
1888 target_ulong *val)
1889 {
1890 *val = env->mtvec;
1891 return RISCV_EXCP_NONE;
1892 }
1893
write_mtvec(CPURISCVState * env,int csrno,target_ulong val)1894 static RISCVException write_mtvec(CPURISCVState *env, int csrno,
1895 target_ulong val)
1896 {
1897 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
1898 if ((val & 3) < 2) {
1899 env->mtvec = val;
1900 } else {
1901 qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n");
1902 }
1903 return RISCV_EXCP_NONE;
1904 }
1905
read_mcountinhibit(CPURISCVState * env,int csrno,target_ulong * val)1906 static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno,
1907 target_ulong *val)
1908 {
1909 *val = env->mcountinhibit;
1910 return RISCV_EXCP_NONE;
1911 }
1912
write_mcountinhibit(CPURISCVState * env,int csrno,target_ulong val)1913 static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
1914 target_ulong val)
1915 {
1916 int cidx;
1917 PMUCTRState *counter;
1918 RISCVCPU *cpu = env_archcpu(env);
1919
1920 /* WARL register - disable unavailable counters; TM bit is always 0 */
1921 env->mcountinhibit =
1922 val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_IR);
1923
1924 /* Check if any other counter is also monitoring cycles/instructions */
1925 for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
1926 if (!get_field(env->mcountinhibit, BIT(cidx))) {
1927 counter = &env->pmu_ctrs[cidx];
1928 counter->started = true;
1929 }
1930 }
1931
1932 return RISCV_EXCP_NONE;
1933 }
1934
read_mcounteren(CPURISCVState * env,int csrno,target_ulong * val)1935 static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
1936 target_ulong *val)
1937 {
1938 *val = env->mcounteren;
1939 return RISCV_EXCP_NONE;
1940 }
1941
write_mcounteren(CPURISCVState * env,int csrno,target_ulong val)1942 static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
1943 target_ulong val)
1944 {
1945 RISCVCPU *cpu = env_archcpu(env);
1946
1947 /* WARL register - disable unavailable counters */
1948 env->mcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
1949 COUNTEREN_IR);
1950 return RISCV_EXCP_NONE;
1951 }
1952
1953 /* Machine Trap Handling */
read_mscratch_i128(CPURISCVState * env,int csrno,Int128 * val)1954 static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno,
1955 Int128 *val)
1956 {
1957 *val = int128_make128(env->mscratch, env->mscratchh);
1958 return RISCV_EXCP_NONE;
1959 }
1960
write_mscratch_i128(CPURISCVState * env,int csrno,Int128 val)1961 static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno,
1962 Int128 val)
1963 {
1964 env->mscratch = int128_getlo(val);
1965 env->mscratchh = int128_gethi(val);
1966 return RISCV_EXCP_NONE;
1967 }
1968
read_mscratch(CPURISCVState * env,int csrno,target_ulong * val)1969 static RISCVException read_mscratch(CPURISCVState *env, int csrno,
1970 target_ulong *val)
1971 {
1972 *val = env->mscratch;
1973 return RISCV_EXCP_NONE;
1974 }
1975
write_mscratch(CPURISCVState * env,int csrno,target_ulong val)1976 static RISCVException write_mscratch(CPURISCVState *env, int csrno,
1977 target_ulong val)
1978 {
1979 env->mscratch = val;
1980 return RISCV_EXCP_NONE;
1981 }
1982
read_mepc(CPURISCVState * env,int csrno,target_ulong * val)1983 static RISCVException read_mepc(CPURISCVState *env, int csrno,
1984 target_ulong *val)
1985 {
1986 *val = env->mepc;
1987 return RISCV_EXCP_NONE;
1988 }
1989
write_mepc(CPURISCVState * env,int csrno,target_ulong val)1990 static RISCVException write_mepc(CPURISCVState *env, int csrno,
1991 target_ulong val)
1992 {
1993 env->mepc = val;
1994 return RISCV_EXCP_NONE;
1995 }
1996
read_mcause(CPURISCVState * env,int csrno,target_ulong * val)1997 static RISCVException read_mcause(CPURISCVState *env, int csrno,
1998 target_ulong *val)
1999 {
2000 *val = env->mcause;
2001 return RISCV_EXCP_NONE;
2002 }
2003
write_mcause(CPURISCVState * env,int csrno,target_ulong val)2004 static RISCVException write_mcause(CPURISCVState *env, int csrno,
2005 target_ulong val)
2006 {
2007 env->mcause = val;
2008 return RISCV_EXCP_NONE;
2009 }
2010
read_mtval(CPURISCVState * env,int csrno,target_ulong * val)2011 static RISCVException read_mtval(CPURISCVState *env, int csrno,
2012 target_ulong *val)
2013 {
2014 *val = env->mtval;
2015 return RISCV_EXCP_NONE;
2016 }
2017
write_mtval(CPURISCVState * env,int csrno,target_ulong val)2018 static RISCVException write_mtval(CPURISCVState *env, int csrno,
2019 target_ulong val)
2020 {
2021 env->mtval = val;
2022 return RISCV_EXCP_NONE;
2023 }
2024
2025 /* Execution environment configuration setup */
read_menvcfg(CPURISCVState * env,int csrno,target_ulong * val)2026 static RISCVException read_menvcfg(CPURISCVState *env, int csrno,
2027 target_ulong *val)
2028 {
2029 *val = env->menvcfg;
2030 return RISCV_EXCP_NONE;
2031 }
2032
write_menvcfg(CPURISCVState * env,int csrno,target_ulong val)2033 static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
2034 target_ulong val)
2035 {
2036 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
2037 uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE;
2038
2039 if (riscv_cpu_mxl(env) == MXL_RV64) {
2040 mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
2041 (cfg->ext_sstc ? MENVCFG_STCE : 0) |
2042 (cfg->ext_svadu ? MENVCFG_ADUE : 0);
2043 }
2044 env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
2045
2046 return RISCV_EXCP_NONE;
2047 }
2048
read_menvcfgh(CPURISCVState * env,int csrno,target_ulong * val)2049 static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
2050 target_ulong *val)
2051 {
2052 *val = env->menvcfg >> 32;
2053 return RISCV_EXCP_NONE;
2054 }
2055
write_menvcfgh(CPURISCVState * env,int csrno,target_ulong val)2056 static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
2057 target_ulong val)
2058 {
2059 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
2060 uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
2061 (cfg->ext_sstc ? MENVCFG_STCE : 0) |
2062 (cfg->ext_svadu ? MENVCFG_ADUE : 0);
2063 uint64_t valh = (uint64_t)val << 32;
2064
2065 env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
2066
2067 return RISCV_EXCP_NONE;
2068 }
2069
read_senvcfg(CPURISCVState * env,int csrno,target_ulong * val)2070 static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
2071 target_ulong *val)
2072 {
2073 RISCVException ret;
2074
2075 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2076 if (ret != RISCV_EXCP_NONE) {
2077 return ret;
2078 }
2079
2080 *val = env->senvcfg;
2081 return RISCV_EXCP_NONE;
2082 }
2083
write_senvcfg(CPURISCVState * env,int csrno,target_ulong val)2084 static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
2085 target_ulong val)
2086 {
2087 uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE;
2088 RISCVException ret;
2089
2090 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2091 if (ret != RISCV_EXCP_NONE) {
2092 return ret;
2093 }
2094
2095 env->senvcfg = (env->senvcfg & ~mask) | (val & mask);
2096 return RISCV_EXCP_NONE;
2097 }
2098
read_henvcfg(CPURISCVState * env,int csrno,target_ulong * val)2099 static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
2100 target_ulong *val)
2101 {
2102 RISCVException ret;
2103
2104 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2105 if (ret != RISCV_EXCP_NONE) {
2106 return ret;
2107 }
2108
2109 /*
2110 * henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0
2111 * henvcfg.stce is read_only 0 when menvcfg.stce = 0
2112 * henvcfg.hade is read_only 0 when menvcfg.hade = 0
2113 */
2114 *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
2115 env->menvcfg);
2116 return RISCV_EXCP_NONE;
2117 }
2118
write_henvcfg(CPURISCVState * env,int csrno,target_ulong val)2119 static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
2120 target_ulong val)
2121 {
2122 uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE;
2123 RISCVException ret;
2124
2125 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2126 if (ret != RISCV_EXCP_NONE) {
2127 return ret;
2128 }
2129
2130 if (riscv_cpu_mxl(env) == MXL_RV64) {
2131 mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE);
2132 }
2133
2134 env->henvcfg = (env->henvcfg & ~mask) | (val & mask);
2135
2136 return RISCV_EXCP_NONE;
2137 }
2138
read_henvcfgh(CPURISCVState * env,int csrno,target_ulong * val)2139 static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
2140 target_ulong *val)
2141 {
2142 RISCVException ret;
2143
2144 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2145 if (ret != RISCV_EXCP_NONE) {
2146 return ret;
2147 }
2148
2149 *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
2150 env->menvcfg)) >> 32;
2151 return RISCV_EXCP_NONE;
2152 }
2153
write_henvcfgh(CPURISCVState * env,int csrno,target_ulong val)2154 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
2155 target_ulong val)
2156 {
2157 uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE |
2158 HENVCFG_ADUE);
2159 uint64_t valh = (uint64_t)val << 32;
2160 RISCVException ret;
2161
2162 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2163 if (ret != RISCV_EXCP_NONE) {
2164 return ret;
2165 }
2166
2167 env->henvcfg = (env->henvcfg & ~mask) | (valh & mask);
2168 return RISCV_EXCP_NONE;
2169 }
2170
read_mstateen(CPURISCVState * env,int csrno,target_ulong * val)2171 static RISCVException read_mstateen(CPURISCVState *env, int csrno,
2172 target_ulong *val)
2173 {
2174 *val = env->mstateen[csrno - CSR_MSTATEEN0];
2175
2176 return RISCV_EXCP_NONE;
2177 }
2178
write_mstateen(CPURISCVState * env,int csrno,uint64_t wr_mask,target_ulong new_val)2179 static RISCVException write_mstateen(CPURISCVState *env, int csrno,
2180 uint64_t wr_mask, target_ulong new_val)
2181 {
2182 uint64_t *reg;
2183
2184 reg = &env->mstateen[csrno - CSR_MSTATEEN0];
2185 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2186
2187 return RISCV_EXCP_NONE;
2188 }
2189
write_mstateen0(CPURISCVState * env,int csrno,target_ulong new_val)2190 static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
2191 target_ulong new_val)
2192 {
2193 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2194 if (!riscv_has_ext(env, RVF)) {
2195 wr_mask |= SMSTATEEN0_FCSR;
2196 }
2197
2198 return write_mstateen(env, csrno, wr_mask, new_val);
2199 }
2200
write_mstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val)2201 static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno,
2202 target_ulong new_val)
2203 {
2204 return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2205 }
2206
read_mstateenh(CPURISCVState * env,int csrno,target_ulong * val)2207 static RISCVException read_mstateenh(CPURISCVState *env, int csrno,
2208 target_ulong *val)
2209 {
2210 *val = env->mstateen[csrno - CSR_MSTATEEN0H] >> 32;
2211
2212 return RISCV_EXCP_NONE;
2213 }
2214
write_mstateenh(CPURISCVState * env,int csrno,uint64_t wr_mask,target_ulong new_val)2215 static RISCVException write_mstateenh(CPURISCVState *env, int csrno,
2216 uint64_t wr_mask, target_ulong new_val)
2217 {
2218 uint64_t *reg, val;
2219
2220 reg = &env->mstateen[csrno - CSR_MSTATEEN0H];
2221 val = (uint64_t)new_val << 32;
2222 val |= *reg & 0xFFFFFFFF;
2223 *reg = (*reg & ~wr_mask) | (val & wr_mask);
2224
2225 return RISCV_EXCP_NONE;
2226 }
2227
write_mstateen0h(CPURISCVState * env,int csrno,target_ulong new_val)2228 static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
2229 target_ulong new_val)
2230 {
2231 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2232
2233 return write_mstateenh(env, csrno, wr_mask, new_val);
2234 }
2235
write_mstateenh_1_3(CPURISCVState * env,int csrno,target_ulong new_val)2236 static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno,
2237 target_ulong new_val)
2238 {
2239 return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
2240 }
2241
read_hstateen(CPURISCVState * env,int csrno,target_ulong * val)2242 static RISCVException read_hstateen(CPURISCVState *env, int csrno,
2243 target_ulong *val)
2244 {
2245 int index = csrno - CSR_HSTATEEN0;
2246
2247 *val = env->hstateen[index] & env->mstateen[index];
2248
2249 return RISCV_EXCP_NONE;
2250 }
2251
write_hstateen(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)2252 static RISCVException write_hstateen(CPURISCVState *env, int csrno,
2253 uint64_t mask, target_ulong new_val)
2254 {
2255 int index = csrno - CSR_HSTATEEN0;
2256 uint64_t *reg, wr_mask;
2257
2258 reg = &env->hstateen[index];
2259 wr_mask = env->mstateen[index] & mask;
2260 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2261
2262 return RISCV_EXCP_NONE;
2263 }
2264
write_hstateen0(CPURISCVState * env,int csrno,target_ulong new_val)2265 static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
2266 target_ulong new_val)
2267 {
2268 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2269
2270 if (!riscv_has_ext(env, RVF)) {
2271 wr_mask |= SMSTATEEN0_FCSR;
2272 }
2273
2274 return write_hstateen(env, csrno, wr_mask, new_val);
2275 }
2276
write_hstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val)2277 static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno,
2278 target_ulong new_val)
2279 {
2280 return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2281 }
2282
read_hstateenh(CPURISCVState * env,int csrno,target_ulong * val)2283 static RISCVException read_hstateenh(CPURISCVState *env, int csrno,
2284 target_ulong *val)
2285 {
2286 int index = csrno - CSR_HSTATEEN0H;
2287
2288 *val = (env->hstateen[index] >> 32) & (env->mstateen[index] >> 32);
2289
2290 return RISCV_EXCP_NONE;
2291 }
2292
write_hstateenh(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)2293 static RISCVException write_hstateenh(CPURISCVState *env, int csrno,
2294 uint64_t mask, target_ulong new_val)
2295 {
2296 int index = csrno - CSR_HSTATEEN0H;
2297 uint64_t *reg, wr_mask, val;
2298
2299 reg = &env->hstateen[index];
2300 val = (uint64_t)new_val << 32;
2301 val |= *reg & 0xFFFFFFFF;
2302 wr_mask = env->mstateen[index] & mask;
2303 *reg = (*reg & ~wr_mask) | (val & wr_mask);
2304
2305 return RISCV_EXCP_NONE;
2306 }
2307
write_hstateen0h(CPURISCVState * env,int csrno,target_ulong new_val)2308 static RISCVException write_hstateen0h(CPURISCVState *env, int csrno,
2309 target_ulong new_val)
2310 {
2311 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2312
2313 return write_hstateenh(env, csrno, wr_mask, new_val);
2314 }
2315
write_hstateenh_1_3(CPURISCVState * env,int csrno,target_ulong new_val)2316 static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno,
2317 target_ulong new_val)
2318 {
2319 return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
2320 }
2321
read_sstateen(CPURISCVState * env,int csrno,target_ulong * val)2322 static RISCVException read_sstateen(CPURISCVState *env, int csrno,
2323 target_ulong *val)
2324 {
2325 bool virt = env->virt_enabled;
2326 int index = csrno - CSR_SSTATEEN0;
2327
2328 *val = env->sstateen[index] & env->mstateen[index];
2329 if (virt) {
2330 *val &= env->hstateen[index];
2331 }
2332
2333 return RISCV_EXCP_NONE;
2334 }
2335
write_sstateen(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)2336 static RISCVException write_sstateen(CPURISCVState *env, int csrno,
2337 uint64_t mask, target_ulong new_val)
2338 {
2339 bool virt = env->virt_enabled;
2340 int index = csrno - CSR_SSTATEEN0;
2341 uint64_t wr_mask;
2342 uint64_t *reg;
2343
2344 wr_mask = env->mstateen[index] & mask;
2345 if (virt) {
2346 wr_mask &= env->hstateen[index];
2347 }
2348
2349 reg = &env->sstateen[index];
2350 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2351
2352 return RISCV_EXCP_NONE;
2353 }
2354
write_sstateen0(CPURISCVState * env,int csrno,target_ulong new_val)2355 static RISCVException write_sstateen0(CPURISCVState *env, int csrno,
2356 target_ulong new_val)
2357 {
2358 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2359
2360 if (!riscv_has_ext(env, RVF)) {
2361 wr_mask |= SMSTATEEN0_FCSR;
2362 }
2363
2364 return write_sstateen(env, csrno, wr_mask, new_val);
2365 }
2366
write_sstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val)2367 static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno,
2368 target_ulong new_val)
2369 {
2370 return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2371 }
2372
rmw_mip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2373 static RISCVException rmw_mip64(CPURISCVState *env, int csrno,
2374 uint64_t *ret_val,
2375 uint64_t new_val, uint64_t wr_mask)
2376 {
2377 uint64_t old_mip, mask = wr_mask & delegable_ints;
2378 uint32_t gin;
2379
2380 if (mask & MIP_SEIP) {
2381 env->software_seip = new_val & MIP_SEIP;
2382 new_val |= env->external_seip * MIP_SEIP;
2383 }
2384
2385 if (riscv_cpu_cfg(env)->ext_sstc && (env->priv == PRV_M) &&
2386 get_field(env->menvcfg, MENVCFG_STCE)) {
2387 /* sstc extension forbids STIP & VSTIP to be writeable in mip */
2388 mask = mask & ~(MIP_STIP | MIP_VSTIP);
2389 }
2390
2391 if (mask) {
2392 old_mip = riscv_cpu_update_mip(env, mask, (new_val & mask));
2393 } else {
2394 old_mip = env->mip;
2395 }
2396
2397 if (csrno != CSR_HVIP) {
2398 gin = get_field(env->hstatus, HSTATUS_VGEIN);
2399 old_mip |= (env->hgeip & ((target_ulong)1 << gin)) ? MIP_VSEIP : 0;
2400 old_mip |= env->vstime_irq ? MIP_VSTIP : 0;
2401 }
2402
2403 if (ret_val) {
2404 *ret_val = old_mip;
2405 }
2406
2407 return RISCV_EXCP_NONE;
2408 }
2409
rmw_mip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2410 static RISCVException rmw_mip(CPURISCVState *env, int csrno,
2411 target_ulong *ret_val,
2412 target_ulong new_val, target_ulong wr_mask)
2413 {
2414 uint64_t rval;
2415 RISCVException ret;
2416
2417 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask);
2418 if (ret_val) {
2419 *ret_val = rval;
2420 }
2421
2422 return ret;
2423 }
2424
rmw_miph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2425 static RISCVException rmw_miph(CPURISCVState *env, int csrno,
2426 target_ulong *ret_val,
2427 target_ulong new_val, target_ulong wr_mask)
2428 {
2429 uint64_t rval;
2430 RISCVException ret;
2431
2432 ret = rmw_mip64(env, csrno, &rval,
2433 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2434 if (ret_val) {
2435 *ret_val = rval >> 32;
2436 }
2437
2438 return ret;
2439 }
2440
2441 /*
2442 * The function is written for two use-cases:
2443 * 1- To access mvip csr as is for m-mode access.
2444 * 2- To access sip as a combination of mip and mvip for s-mode.
2445 *
2446 * Both report bits 1, 5, 9 and 13:63 but with the exception of
2447 * STIP being read-only zero in case of mvip when sstc extension
2448 * is present.
2449 * Also, sip needs to be read-only zero when both mideleg[i] and
2450 * mvien[i] are zero but mvip needs to be an alias of mip.
2451 */
rmw_mvip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2452 static RISCVException rmw_mvip64(CPURISCVState *env, int csrno,
2453 uint64_t *ret_val,
2454 uint64_t new_val, uint64_t wr_mask)
2455 {
2456 RISCVCPU *cpu = env_archcpu(env);
2457 target_ulong ret_mip = 0;
2458 RISCVException ret;
2459 uint64_t old_mvip;
2460
2461 /*
2462 * mideleg[i] mvien[i]
2463 * 0 0 No delegation. mvip[i] is alias of mip[i].
2464 * 0 1 mvip[i] becomes source of interrupt, mip bypassed.
2465 * 1 X mip[i] is source of interrupt and mvip[i] aliases
2466 * mip[i].
2467 *
2468 * So alias condition would be for bits:
2469 * ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (mideleg | ~mvien)) |
2470 * (!sstc & MIP_STIP)
2471 *
2472 * Non-alias condition will be for bits:
2473 * (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (~mideleg & mvien)
2474 *
2475 * alias_mask denotes the bits that come from mip nalias_mask denotes bits
2476 * that come from hvip.
2477 */
2478 uint64_t alias_mask = ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
2479 (env->mideleg | ~env->mvien)) | MIP_STIP;
2480 uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
2481 (~env->mideleg & env->mvien);
2482 uint64_t wr_mask_mvip;
2483 uint64_t wr_mask_mip;
2484
2485 /*
2486 * mideleg[i] mvien[i]
2487 * 0 0 sip[i] read-only zero.
2488 * 0 1 sip[i] alias of mvip[i].
2489 * 1 X sip[i] alias of mip[i].
2490 *
2491 * Both alias and non-alias mask remain same for sip except for bits
2492 * which are zero in both mideleg and mvien.
2493 */
2494 if (csrno == CSR_SIP) {
2495 /* Remove bits that are zero in both mideleg and mvien. */
2496 alias_mask &= (env->mideleg | env->mvien);
2497 nalias_mask &= (env->mideleg | env->mvien);
2498 }
2499
2500 /*
2501 * If sstc is present, mvip.STIP is not an alias of mip.STIP so clear
2502 * that our in mip returned value.
2503 */
2504 if (cpu->cfg.ext_sstc && (env->priv == PRV_M) &&
2505 get_field(env->menvcfg, MENVCFG_STCE)) {
2506 alias_mask &= ~MIP_STIP;
2507 }
2508
2509 wr_mask_mip = wr_mask & alias_mask & mvip_writable_mask;
2510 wr_mask_mvip = wr_mask & nalias_mask & mvip_writable_mask;
2511
2512 /*
2513 * For bits set in alias_mask, mvip needs to be alias of mip, so forward
2514 * this to rmw_mip.
2515 */
2516 ret = rmw_mip(env, CSR_MIP, &ret_mip, new_val, wr_mask_mip);
2517 if (ret != RISCV_EXCP_NONE) {
2518 return ret;
2519 }
2520
2521 old_mvip = env->mvip;
2522
2523 /*
2524 * Write to mvip. Update only non-alias bits. Alias bits were updated
2525 * in mip in rmw_mip above.
2526 */
2527 if (wr_mask_mvip) {
2528 env->mvip = (env->mvip & ~wr_mask_mvip) | (new_val & wr_mask_mvip);
2529
2530 /*
2531 * Given mvip is separate source from mip, we need to trigger interrupt
2532 * from here separately. Normally this happen from riscv_cpu_update_mip.
2533 */
2534 riscv_cpu_interrupt(env);
2535 }
2536
2537 if (ret_val) {
2538 ret_mip &= alias_mask;
2539 old_mvip &= nalias_mask;
2540
2541 *ret_val = old_mvip | ret_mip;
2542 }
2543
2544 return RISCV_EXCP_NONE;
2545 }
2546
rmw_mvip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2547 static RISCVException rmw_mvip(CPURISCVState *env, int csrno,
2548 target_ulong *ret_val,
2549 target_ulong new_val, target_ulong wr_mask)
2550 {
2551 uint64_t rval;
2552 RISCVException ret;
2553
2554 ret = rmw_mvip64(env, csrno, &rval, new_val, wr_mask);
2555 if (ret_val) {
2556 *ret_val = rval;
2557 }
2558
2559 return ret;
2560 }
2561
rmw_mviph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2562 static RISCVException rmw_mviph(CPURISCVState *env, int csrno,
2563 target_ulong *ret_val,
2564 target_ulong new_val, target_ulong wr_mask)
2565 {
2566 uint64_t rval;
2567 RISCVException ret;
2568
2569 ret = rmw_mvip64(env, csrno, &rval,
2570 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2571 if (ret_val) {
2572 *ret_val = rval >> 32;
2573 }
2574
2575 return ret;
2576 }
2577
2578 /* Supervisor Trap Setup */
read_sstatus_i128(CPURISCVState * env,int csrno,Int128 * val)2579 static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
2580 Int128 *val)
2581 {
2582 uint64_t mask = sstatus_v1_10_mask;
2583 uint64_t sstatus = env->mstatus & mask;
2584 if (env->xl != MXL_RV32 || env->debugger) {
2585 mask |= SSTATUS64_UXL;
2586 }
2587
2588 *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
2589 return RISCV_EXCP_NONE;
2590 }
2591
read_sstatus(CPURISCVState * env,int csrno,target_ulong * val)2592 static RISCVException read_sstatus(CPURISCVState *env, int csrno,
2593 target_ulong *val)
2594 {
2595 target_ulong mask = (sstatus_v1_10_mask);
2596 if (env->xl != MXL_RV32 || env->debugger) {
2597 mask |= SSTATUS64_UXL;
2598 }
2599 /* TODO: Use SXL not MXL. */
2600 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
2601 return RISCV_EXCP_NONE;
2602 }
2603
write_sstatus(CPURISCVState * env,int csrno,target_ulong val)2604 static RISCVException write_sstatus(CPURISCVState *env, int csrno,
2605 target_ulong val)
2606 {
2607 target_ulong mask = (sstatus_v1_10_mask);
2608
2609 if (env->xl != MXL_RV32 || env->debugger) {
2610 if ((val & SSTATUS64_UXL) != 0) {
2611 mask |= SSTATUS64_UXL;
2612 }
2613 }
2614 target_ulong newval = (env->mstatus & ~mask) | (val & mask);
2615 return write_mstatus(env, CSR_MSTATUS, newval);
2616 }
2617
rmw_vsie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2618 static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
2619 uint64_t *ret_val,
2620 uint64_t new_val, uint64_t wr_mask)
2621 {
2622 uint64_t alias_mask = (LOCAL_INTERRUPTS | VS_MODE_INTERRUPTS) &
2623 env->hideleg;
2624 uint64_t nalias_mask = LOCAL_INTERRUPTS & (~env->hideleg & env->hvien);
2625 uint64_t rval, rval_vs, vsbits;
2626 uint64_t wr_mask_vsie;
2627 uint64_t wr_mask_mie;
2628 RISCVException ret;
2629
2630 /* Bring VS-level bits to correct position */
2631 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
2632 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
2633 new_val |= vsbits << 1;
2634
2635 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
2636 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
2637 wr_mask |= vsbits << 1;
2638
2639 wr_mask_mie = wr_mask & alias_mask;
2640 wr_mask_vsie = wr_mask & nalias_mask;
2641
2642 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask_mie);
2643
2644 rval_vs = env->vsie & nalias_mask;
2645 env->vsie = (env->vsie & ~wr_mask_vsie) | (new_val & wr_mask_vsie);
2646
2647 if (ret_val) {
2648 rval &= alias_mask;
2649 vsbits = rval & VS_MODE_INTERRUPTS;
2650 rval &= ~VS_MODE_INTERRUPTS;
2651 *ret_val = rval | (vsbits >> 1) | rval_vs;
2652 }
2653
2654 return ret;
2655 }
2656
rmw_vsie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2657 static RISCVException rmw_vsie(CPURISCVState *env, int csrno,
2658 target_ulong *ret_val,
2659 target_ulong new_val, target_ulong wr_mask)
2660 {
2661 uint64_t rval;
2662 RISCVException ret;
2663
2664 ret = rmw_vsie64(env, csrno, &rval, new_val, wr_mask);
2665 if (ret_val) {
2666 *ret_val = rval;
2667 }
2668
2669 return ret;
2670 }
2671
rmw_vsieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2672 static RISCVException rmw_vsieh(CPURISCVState *env, int csrno,
2673 target_ulong *ret_val,
2674 target_ulong new_val, target_ulong wr_mask)
2675 {
2676 uint64_t rval;
2677 RISCVException ret;
2678
2679 ret = rmw_vsie64(env, csrno, &rval,
2680 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2681 if (ret_val) {
2682 *ret_val = rval >> 32;
2683 }
2684
2685 return ret;
2686 }
2687
rmw_sie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2688 static RISCVException rmw_sie64(CPURISCVState *env, int csrno,
2689 uint64_t *ret_val,
2690 uint64_t new_val, uint64_t wr_mask)
2691 {
2692 uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
2693 (~env->mideleg & env->mvien);
2694 uint64_t alias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & env->mideleg;
2695 uint64_t sie_mask = wr_mask & nalias_mask;
2696 RISCVException ret;
2697
2698 /*
2699 * mideleg[i] mvien[i]
2700 * 0 0 sie[i] read-only zero.
2701 * 0 1 sie[i] is a separate writable bit.
2702 * 1 X sie[i] alias of mie[i].
2703 *
2704 * Both alias and non-alias mask remain same for sip except for bits
2705 * which are zero in both mideleg and mvien.
2706 */
2707 if (env->virt_enabled) {
2708 if (env->hvictl & HVICTL_VTI) {
2709 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
2710 }
2711 ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask);
2712 if (ret_val) {
2713 *ret_val &= alias_mask;
2714 }
2715 } else {
2716 ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & alias_mask);
2717 if (ret_val) {
2718 *ret_val &= alias_mask;
2719 *ret_val |= env->sie & nalias_mask;
2720 }
2721
2722 env->sie = (env->sie & ~sie_mask) | (new_val & sie_mask);
2723 }
2724
2725 return ret;
2726 }
2727
rmw_sie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2728 static RISCVException rmw_sie(CPURISCVState *env, int csrno,
2729 target_ulong *ret_val,
2730 target_ulong new_val, target_ulong wr_mask)
2731 {
2732 uint64_t rval;
2733 RISCVException ret;
2734
2735 ret = rmw_sie64(env, csrno, &rval, new_val, wr_mask);
2736 if (ret == RISCV_EXCP_NONE && ret_val) {
2737 *ret_val = rval;
2738 }
2739
2740 return ret;
2741 }
2742
rmw_sieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2743 static RISCVException rmw_sieh(CPURISCVState *env, int csrno,
2744 target_ulong *ret_val,
2745 target_ulong new_val, target_ulong wr_mask)
2746 {
2747 uint64_t rval;
2748 RISCVException ret;
2749
2750 ret = rmw_sie64(env, csrno, &rval,
2751 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2752 if (ret_val) {
2753 *ret_val = rval >> 32;
2754 }
2755
2756 return ret;
2757 }
2758
read_stvec(CPURISCVState * env,int csrno,target_ulong * val)2759 static RISCVException read_stvec(CPURISCVState *env, int csrno,
2760 target_ulong *val)
2761 {
2762 *val = env->stvec;
2763 return RISCV_EXCP_NONE;
2764 }
2765
write_stvec(CPURISCVState * env,int csrno,target_ulong val)2766 static RISCVException write_stvec(CPURISCVState *env, int csrno,
2767 target_ulong val)
2768 {
2769 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
2770 if ((val & 3) < 2) {
2771 env->stvec = val;
2772 } else {
2773 qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n");
2774 }
2775 return RISCV_EXCP_NONE;
2776 }
2777
read_scounteren(CPURISCVState * env,int csrno,target_ulong * val)2778 static RISCVException read_scounteren(CPURISCVState *env, int csrno,
2779 target_ulong *val)
2780 {
2781 *val = env->scounteren;
2782 return RISCV_EXCP_NONE;
2783 }
2784
write_scounteren(CPURISCVState * env,int csrno,target_ulong val)2785 static RISCVException write_scounteren(CPURISCVState *env, int csrno,
2786 target_ulong val)
2787 {
2788 env->scounteren = val;
2789 return RISCV_EXCP_NONE;
2790 }
2791
2792 /* Supervisor Trap Handling */
read_sscratch_i128(CPURISCVState * env,int csrno,Int128 * val)2793 static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno,
2794 Int128 *val)
2795 {
2796 *val = int128_make128(env->sscratch, env->sscratchh);
2797 return RISCV_EXCP_NONE;
2798 }
2799
write_sscratch_i128(CPURISCVState * env,int csrno,Int128 val)2800 static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno,
2801 Int128 val)
2802 {
2803 env->sscratch = int128_getlo(val);
2804 env->sscratchh = int128_gethi(val);
2805 return RISCV_EXCP_NONE;
2806 }
2807
read_sscratch(CPURISCVState * env,int csrno,target_ulong * val)2808 static RISCVException read_sscratch(CPURISCVState *env, int csrno,
2809 target_ulong *val)
2810 {
2811 *val = env->sscratch;
2812 return RISCV_EXCP_NONE;
2813 }
2814
write_sscratch(CPURISCVState * env,int csrno,target_ulong val)2815 static RISCVException write_sscratch(CPURISCVState *env, int csrno,
2816 target_ulong val)
2817 {
2818 env->sscratch = val;
2819 return RISCV_EXCP_NONE;
2820 }
2821
read_sepc(CPURISCVState * env,int csrno,target_ulong * val)2822 static RISCVException read_sepc(CPURISCVState *env, int csrno,
2823 target_ulong *val)
2824 {
2825 *val = env->sepc;
2826 return RISCV_EXCP_NONE;
2827 }
2828
write_sepc(CPURISCVState * env,int csrno,target_ulong val)2829 static RISCVException write_sepc(CPURISCVState *env, int csrno,
2830 target_ulong val)
2831 {
2832 env->sepc = val;
2833 return RISCV_EXCP_NONE;
2834 }
2835
read_scause(CPURISCVState * env,int csrno,target_ulong * val)2836 static RISCVException read_scause(CPURISCVState *env, int csrno,
2837 target_ulong *val)
2838 {
2839 *val = env->scause;
2840 return RISCV_EXCP_NONE;
2841 }
2842
write_scause(CPURISCVState * env,int csrno,target_ulong val)2843 static RISCVException write_scause(CPURISCVState *env, int csrno,
2844 target_ulong val)
2845 {
2846 env->scause = val;
2847 return RISCV_EXCP_NONE;
2848 }
2849
read_stval(CPURISCVState * env,int csrno,target_ulong * val)2850 static RISCVException read_stval(CPURISCVState *env, int csrno,
2851 target_ulong *val)
2852 {
2853 *val = env->stval;
2854 return RISCV_EXCP_NONE;
2855 }
2856
write_stval(CPURISCVState * env,int csrno,target_ulong val)2857 static RISCVException write_stval(CPURISCVState *env, int csrno,
2858 target_ulong val)
2859 {
2860 env->stval = val;
2861 return RISCV_EXCP_NONE;
2862 }
2863
2864 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
2865 uint64_t *ret_val,
2866 uint64_t new_val, uint64_t wr_mask);
2867
rmw_vsip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2868 static RISCVException rmw_vsip64(CPURISCVState *env, int csrno,
2869 uint64_t *ret_val,
2870 uint64_t new_val, uint64_t wr_mask)
2871 {
2872 RISCVException ret;
2873 uint64_t rval, mask = env->hideleg & VS_MODE_INTERRUPTS;
2874 uint64_t vsbits;
2875
2876 /* Add virtualized bits into vsip mask. */
2877 mask |= env->hvien & ~env->hideleg;
2878
2879 /* Bring VS-level bits to correct position */
2880 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
2881 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
2882 new_val |= vsbits << 1;
2883 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
2884 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
2885 wr_mask |= vsbits << 1;
2886
2887 ret = rmw_hvip64(env, csrno, &rval, new_val,
2888 wr_mask & mask & vsip_writable_mask);
2889 if (ret_val) {
2890 rval &= mask;
2891 vsbits = rval & VS_MODE_INTERRUPTS;
2892 rval &= ~VS_MODE_INTERRUPTS;
2893 *ret_val = rval | (vsbits >> 1);
2894 }
2895
2896 return ret;
2897 }
2898
rmw_vsip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2899 static RISCVException rmw_vsip(CPURISCVState *env, int csrno,
2900 target_ulong *ret_val,
2901 target_ulong new_val, target_ulong wr_mask)
2902 {
2903 uint64_t rval;
2904 RISCVException ret;
2905
2906 ret = rmw_vsip64(env, csrno, &rval, new_val, wr_mask);
2907 if (ret_val) {
2908 *ret_val = rval;
2909 }
2910
2911 return ret;
2912 }
2913
rmw_vsiph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2914 static RISCVException rmw_vsiph(CPURISCVState *env, int csrno,
2915 target_ulong *ret_val,
2916 target_ulong new_val, target_ulong wr_mask)
2917 {
2918 uint64_t rval;
2919 RISCVException ret;
2920
2921 ret = rmw_vsip64(env, csrno, &rval,
2922 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2923 if (ret_val) {
2924 *ret_val = rval >> 32;
2925 }
2926
2927 return ret;
2928 }
2929
rmw_sip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2930 static RISCVException rmw_sip64(CPURISCVState *env, int csrno,
2931 uint64_t *ret_val,
2932 uint64_t new_val, uint64_t wr_mask)
2933 {
2934 RISCVException ret;
2935 uint64_t mask = (env->mideleg | env->mvien) & sip_writable_mask;
2936
2937 if (env->virt_enabled) {
2938 if (env->hvictl & HVICTL_VTI) {
2939 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
2940 }
2941 ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask);
2942 } else {
2943 ret = rmw_mvip64(env, csrno, ret_val, new_val, wr_mask & mask);
2944 }
2945
2946 if (ret_val) {
2947 *ret_val &= (env->mideleg | env->mvien) &
2948 (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS);
2949 }
2950
2951 return ret;
2952 }
2953
rmw_sip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2954 static RISCVException rmw_sip(CPURISCVState *env, int csrno,
2955 target_ulong *ret_val,
2956 target_ulong new_val, target_ulong wr_mask)
2957 {
2958 uint64_t rval;
2959 RISCVException ret;
2960
2961 ret = rmw_sip64(env, csrno, &rval, new_val, wr_mask);
2962 if (ret_val) {
2963 *ret_val = rval;
2964 }
2965
2966 return ret;
2967 }
2968
rmw_siph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2969 static RISCVException rmw_siph(CPURISCVState *env, int csrno,
2970 target_ulong *ret_val,
2971 target_ulong new_val, target_ulong wr_mask)
2972 {
2973 uint64_t rval;
2974 RISCVException ret;
2975
2976 ret = rmw_sip64(env, csrno, &rval,
2977 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2978 if (ret_val) {
2979 *ret_val = rval >> 32;
2980 }
2981
2982 return ret;
2983 }
2984
2985 /* Supervisor Protection and Translation */
read_satp(CPURISCVState * env,int csrno,target_ulong * val)2986 static RISCVException read_satp(CPURISCVState *env, int csrno,
2987 target_ulong *val)
2988 {
2989 if (!riscv_cpu_cfg(env)->mmu) {
2990 *val = 0;
2991 return RISCV_EXCP_NONE;
2992 }
2993 *val = env->satp;
2994 return RISCV_EXCP_NONE;
2995 }
2996
write_satp(CPURISCVState * env,int csrno,target_ulong val)2997 static RISCVException write_satp(CPURISCVState *env, int csrno,
2998 target_ulong val)
2999 {
3000 target_ulong mask;
3001 bool vm;
3002
3003 if (!riscv_cpu_cfg(env)->mmu) {
3004 return RISCV_EXCP_NONE;
3005 }
3006
3007 if (riscv_cpu_mxl(env) == MXL_RV32) {
3008 vm = validate_vm(env, get_field(val, SATP32_MODE));
3009 mask = (val ^ env->satp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN);
3010 } else {
3011 vm = validate_vm(env, get_field(val, SATP64_MODE));
3012 mask = (val ^ env->satp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN);
3013 }
3014
3015 if (vm && mask) {
3016 /*
3017 * The ISA defines SATP.MODE=Bare as "no translation", but we still
3018 * pass these through QEMU's TLB emulation as it improves
3019 * performance. Flushing the TLB on SATP writes with paging
3020 * enabled avoids leaking those invalid cached mappings.
3021 */
3022 tlb_flush(env_cpu(env));
3023 env->satp = val;
3024 }
3025 return RISCV_EXCP_NONE;
3026 }
3027
read_vstopi(CPURISCVState * env,int csrno,target_ulong * val)3028 static int read_vstopi(CPURISCVState *env, int csrno, target_ulong *val)
3029 {
3030 int irq, ret;
3031 target_ulong topei;
3032 uint64_t vseip, vsgein;
3033 uint32_t iid, iprio, hviid, hviprio, gein;
3034 uint32_t s, scount = 0, siid[VSTOPI_NUM_SRCS], siprio[VSTOPI_NUM_SRCS];
3035
3036 gein = get_field(env->hstatus, HSTATUS_VGEIN);
3037 hviid = get_field(env->hvictl, HVICTL_IID);
3038 hviprio = get_field(env->hvictl, HVICTL_IPRIO);
3039
3040 if (gein) {
3041 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
3042 vseip = env->mie & (env->mip | vsgein) & MIP_VSEIP;
3043 if (gein <= env->geilen && vseip) {
3044 siid[scount] = IRQ_S_EXT;
3045 siprio[scount] = IPRIO_MMAXIPRIO + 1;
3046 if (env->aia_ireg_rmw_fn[PRV_S]) {
3047 /*
3048 * Call machine specific IMSIC register emulation for
3049 * reading TOPEI.
3050 */
3051 ret = env->aia_ireg_rmw_fn[PRV_S](
3052 env->aia_ireg_rmw_fn_arg[PRV_S],
3053 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, PRV_S, true, gein,
3054 riscv_cpu_mxl_bits(env)),
3055 &topei, 0, 0);
3056 if (!ret && topei) {
3057 siprio[scount] = topei & IMSIC_TOPEI_IPRIO_MASK;
3058 }
3059 }
3060 scount++;
3061 }
3062 } else {
3063 if (hviid == IRQ_S_EXT && hviprio) {
3064 siid[scount] = IRQ_S_EXT;
3065 siprio[scount] = hviprio;
3066 scount++;
3067 }
3068 }
3069
3070 if (env->hvictl & HVICTL_VTI) {
3071 if (hviid != IRQ_S_EXT) {
3072 siid[scount] = hviid;
3073 siprio[scount] = hviprio;
3074 scount++;
3075 }
3076 } else {
3077 irq = riscv_cpu_vsirq_pending(env);
3078 if (irq != IRQ_S_EXT && 0 < irq && irq <= 63) {
3079 siid[scount] = irq;
3080 siprio[scount] = env->hviprio[irq];
3081 scount++;
3082 }
3083 }
3084
3085 iid = 0;
3086 iprio = UINT_MAX;
3087 for (s = 0; s < scount; s++) {
3088 if (siprio[s] < iprio) {
3089 iid = siid[s];
3090 iprio = siprio[s];
3091 }
3092 }
3093
3094 if (iid) {
3095 if (env->hvictl & HVICTL_IPRIOM) {
3096 if (iprio > IPRIO_MMAXIPRIO) {
3097 iprio = IPRIO_MMAXIPRIO;
3098 }
3099 if (!iprio) {
3100 if (riscv_cpu_default_priority(iid) > IPRIO_DEFAULT_S) {
3101 iprio = IPRIO_MMAXIPRIO;
3102 }
3103 }
3104 } else {
3105 iprio = 1;
3106 }
3107 } else {
3108 iprio = 0;
3109 }
3110
3111 *val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT;
3112 *val |= iprio;
3113
3114 return RISCV_EXCP_NONE;
3115 }
3116
read_stopi(CPURISCVState * env,int csrno,target_ulong * val)3117 static int read_stopi(CPURISCVState *env, int csrno, target_ulong *val)
3118 {
3119 int irq;
3120 uint8_t iprio;
3121
3122 if (env->virt_enabled) {
3123 return read_vstopi(env, CSR_VSTOPI, val);
3124 }
3125
3126 irq = riscv_cpu_sirq_pending(env);
3127 if (irq <= 0 || irq > 63) {
3128 *val = 0;
3129 } else {
3130 iprio = env->siprio[irq];
3131 if (!iprio) {
3132 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_S) {
3133 iprio = IPRIO_MMAXIPRIO;
3134 }
3135 }
3136 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
3137 *val |= iprio;
3138 }
3139
3140 return RISCV_EXCP_NONE;
3141 }
3142
3143 /* Hypervisor Extensions */
read_hstatus(CPURISCVState * env,int csrno,target_ulong * val)3144 static RISCVException read_hstatus(CPURISCVState *env, int csrno,
3145 target_ulong *val)
3146 {
3147 *val = env->hstatus;
3148 if (riscv_cpu_mxl(env) != MXL_RV32) {
3149 /* We only support 64-bit VSXL */
3150 *val = set_field(*val, HSTATUS_VSXL, 2);
3151 }
3152 /* We only support little endian */
3153 *val = set_field(*val, HSTATUS_VSBE, 0);
3154 return RISCV_EXCP_NONE;
3155 }
3156
write_hstatus(CPURISCVState * env,int csrno,target_ulong val)3157 static RISCVException write_hstatus(CPURISCVState *env, int csrno,
3158 target_ulong val)
3159 {
3160 env->hstatus = val;
3161 if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) {
3162 qemu_log_mask(LOG_UNIMP,
3163 "QEMU does not support mixed HSXLEN options.");
3164 }
3165 if (get_field(val, HSTATUS_VSBE) != 0) {
3166 qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests.");
3167 }
3168 return RISCV_EXCP_NONE;
3169 }
3170
read_hedeleg(CPURISCVState * env,int csrno,target_ulong * val)3171 static RISCVException read_hedeleg(CPURISCVState *env, int csrno,
3172 target_ulong *val)
3173 {
3174 *val = env->hedeleg;
3175 return RISCV_EXCP_NONE;
3176 }
3177
write_hedeleg(CPURISCVState * env,int csrno,target_ulong val)3178 static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
3179 target_ulong val)
3180 {
3181 env->hedeleg = val & vs_delegable_excps;
3182 return RISCV_EXCP_NONE;
3183 }
3184
rmw_hvien64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3185 static RISCVException rmw_hvien64(CPURISCVState *env, int csrno,
3186 uint64_t *ret_val,
3187 uint64_t new_val, uint64_t wr_mask)
3188 {
3189 uint64_t mask = wr_mask & hvien_writable_mask;
3190
3191 if (ret_val) {
3192 *ret_val = env->hvien;
3193 }
3194
3195 env->hvien = (env->hvien & ~mask) | (new_val & mask);
3196
3197 return RISCV_EXCP_NONE;
3198 }
3199
rmw_hvien(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3200 static RISCVException rmw_hvien(CPURISCVState *env, int csrno,
3201 target_ulong *ret_val,
3202 target_ulong new_val, target_ulong wr_mask)
3203 {
3204 uint64_t rval;
3205 RISCVException ret;
3206
3207 ret = rmw_hvien64(env, csrno, &rval, new_val, wr_mask);
3208 if (ret_val) {
3209 *ret_val = rval;
3210 }
3211
3212 return ret;
3213 }
3214
rmw_hvienh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3215 static RISCVException rmw_hvienh(CPURISCVState *env, int csrno,
3216 target_ulong *ret_val,
3217 target_ulong new_val, target_ulong wr_mask)
3218 {
3219 uint64_t rval;
3220 RISCVException ret;
3221
3222 ret = rmw_hvien64(env, csrno, &rval,
3223 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3224 if (ret_val) {
3225 *ret_val = rval >> 32;
3226 }
3227
3228 return ret;
3229 }
3230
rmw_hideleg64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3231 static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno,
3232 uint64_t *ret_val,
3233 uint64_t new_val, uint64_t wr_mask)
3234 {
3235 uint64_t mask = wr_mask & vs_delegable_ints;
3236
3237 if (ret_val) {
3238 *ret_val = env->hideleg & vs_delegable_ints;
3239 }
3240
3241 env->hideleg = (env->hideleg & ~mask) | (new_val & mask);
3242 return RISCV_EXCP_NONE;
3243 }
3244
rmw_hideleg(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3245 static RISCVException rmw_hideleg(CPURISCVState *env, int csrno,
3246 target_ulong *ret_val,
3247 target_ulong new_val, target_ulong wr_mask)
3248 {
3249 uint64_t rval;
3250 RISCVException ret;
3251
3252 ret = rmw_hideleg64(env, csrno, &rval, new_val, wr_mask);
3253 if (ret_val) {
3254 *ret_val = rval;
3255 }
3256
3257 return ret;
3258 }
3259
rmw_hidelegh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3260 static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno,
3261 target_ulong *ret_val,
3262 target_ulong new_val, target_ulong wr_mask)
3263 {
3264 uint64_t rval;
3265 RISCVException ret;
3266
3267 ret = rmw_hideleg64(env, csrno, &rval,
3268 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3269 if (ret_val) {
3270 *ret_val = rval >> 32;
3271 }
3272
3273 return ret;
3274 }
3275
3276 /*
3277 * The function is written for two use-cases:
3278 * 1- To access hvip csr as is for HS-mode access.
3279 * 2- To access vsip as a combination of hvip, and mip for vs-mode.
3280 *
3281 * Both report bits 2, 6, 10 and 13:63.
3282 * vsip needs to be read-only zero when both hideleg[i] and
3283 * hvien[i] are zero.
3284 */
rmw_hvip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3285 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
3286 uint64_t *ret_val,
3287 uint64_t new_val, uint64_t wr_mask)
3288 {
3289 RISCVException ret;
3290 uint64_t old_hvip;
3291 uint64_t ret_mip;
3292
3293 /*
3294 * For bits 10, 6 and 2, vsip[i] is an alias of hip[i]. These bits are
3295 * present in hip, hvip and mip. Where mip[i] is alias of hip[i] and hvip[i]
3296 * is OR'ed in hip[i] to inject virtual interrupts from hypervisor. These
3297 * bits are actually being maintained in mip so we read them from there.
3298 * This way we have a single source of truth and allows for easier
3299 * implementation.
3300 *
3301 * For bits 13:63 we have:
3302 *
3303 * hideleg[i] hvien[i]
3304 * 0 0 No delegation. vsip[i] readonly zero.
3305 * 0 1 vsip[i] is alias of hvip[i], sip bypassed.
3306 * 1 X vsip[i] is alias of sip[i], hvip bypassed.
3307 *
3308 * alias_mask denotes the bits that come from sip (mip here given we
3309 * maintain all bits there). nalias_mask denotes bits that come from
3310 * hvip.
3311 */
3312 uint64_t alias_mask = (env->hideleg | ~env->hvien) | VS_MODE_INTERRUPTS;
3313 uint64_t nalias_mask = (~env->hideleg & env->hvien);
3314 uint64_t wr_mask_hvip;
3315 uint64_t wr_mask_mip;
3316
3317 /*
3318 * Both alias and non-alias mask remain same for vsip except:
3319 * 1- For VS* bits if they are zero in hideleg.
3320 * 2- For 13:63 bits if they are zero in both hideleg and hvien.
3321 */
3322 if (csrno == CSR_VSIP) {
3323 /* zero-out VS* bits that are not delegated to VS mode. */
3324 alias_mask &= (env->hideleg | ~VS_MODE_INTERRUPTS);
3325
3326 /*
3327 * zero-out 13:63 bits that are zero in both hideleg and hvien.
3328 * nalias_mask mask can not contain any VS* bits so only second
3329 * condition applies on it.
3330 */
3331 nalias_mask &= (env->hideleg | env->hvien);
3332 alias_mask &= (env->hideleg | env->hvien);
3333 }
3334
3335 wr_mask_hvip = wr_mask & nalias_mask & hvip_writable_mask;
3336 wr_mask_mip = wr_mask & alias_mask & hvip_writable_mask;
3337
3338 /* Aliased bits, bits 10, 6, 2 need to come from mip. */
3339 ret = rmw_mip64(env, csrno, &ret_mip, new_val, wr_mask_mip);
3340 if (ret != RISCV_EXCP_NONE) {
3341 return ret;
3342 }
3343
3344 old_hvip = env->hvip;
3345
3346 if (wr_mask_hvip) {
3347 env->hvip = (env->hvip & ~wr_mask_hvip) | (new_val & wr_mask_hvip);
3348
3349 /*
3350 * Given hvip is separate source from mip, we need to trigger interrupt
3351 * from here separately. Normally this happen from riscv_cpu_update_mip.
3352 */
3353 riscv_cpu_interrupt(env);
3354 }
3355
3356 if (ret_val) {
3357 /* Only take VS* bits from mip. */
3358 ret_mip &= alias_mask;
3359
3360 /* Take in non-delegated 13:63 bits from hvip. */
3361 old_hvip &= nalias_mask;
3362
3363 *ret_val = ret_mip | old_hvip;
3364 }
3365
3366 return ret;
3367 }
3368
rmw_hvip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3369 static RISCVException rmw_hvip(CPURISCVState *env, int csrno,
3370 target_ulong *ret_val,
3371 target_ulong new_val, target_ulong wr_mask)
3372 {
3373 uint64_t rval;
3374 RISCVException ret;
3375
3376 ret = rmw_hvip64(env, csrno, &rval, new_val, wr_mask);
3377 if (ret_val) {
3378 *ret_val = rval;
3379 }
3380
3381 return ret;
3382 }
3383
rmw_hviph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3384 static RISCVException rmw_hviph(CPURISCVState *env, int csrno,
3385 target_ulong *ret_val,
3386 target_ulong new_val, target_ulong wr_mask)
3387 {
3388 uint64_t rval;
3389 RISCVException ret;
3390
3391 ret = rmw_hvip64(env, csrno, &rval,
3392 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3393 if (ret_val) {
3394 *ret_val = rval >> 32;
3395 }
3396
3397 return ret;
3398 }
3399
rmw_hip(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)3400 static RISCVException rmw_hip(CPURISCVState *env, int csrno,
3401 target_ulong *ret_value,
3402 target_ulong new_value, target_ulong write_mask)
3403 {
3404 int ret = rmw_mip(env, csrno, ret_value, new_value,
3405 write_mask & hip_writable_mask);
3406
3407 if (ret_value) {
3408 *ret_value &= HS_MODE_INTERRUPTS;
3409 }
3410 return ret;
3411 }
3412
rmw_hie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3413 static RISCVException rmw_hie(CPURISCVState *env, int csrno,
3414 target_ulong *ret_val,
3415 target_ulong new_val, target_ulong wr_mask)
3416 {
3417 uint64_t rval;
3418 RISCVException ret;
3419
3420 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & HS_MODE_INTERRUPTS);
3421 if (ret_val) {
3422 *ret_val = rval & HS_MODE_INTERRUPTS;
3423 }
3424
3425 return ret;
3426 }
3427
read_hcounteren(CPURISCVState * env,int csrno,target_ulong * val)3428 static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
3429 target_ulong *val)
3430 {
3431 *val = env->hcounteren;
3432 return RISCV_EXCP_NONE;
3433 }
3434
write_hcounteren(CPURISCVState * env,int csrno,target_ulong val)3435 static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
3436 target_ulong val)
3437 {
3438 env->hcounteren = val;
3439 return RISCV_EXCP_NONE;
3440 }
3441
read_hgeie(CPURISCVState * env,int csrno,target_ulong * val)3442 static RISCVException read_hgeie(CPURISCVState *env, int csrno,
3443 target_ulong *val)
3444 {
3445 if (val) {
3446 *val = env->hgeie;
3447 }
3448 return RISCV_EXCP_NONE;
3449 }
3450
write_hgeie(CPURISCVState * env,int csrno,target_ulong val)3451 static RISCVException write_hgeie(CPURISCVState *env, int csrno,
3452 target_ulong val)
3453 {
3454 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */
3455 val &= ((((target_ulong)1) << env->geilen) - 1) << 1;
3456 env->hgeie = val;
3457 /* Update mip.SGEIP bit */
3458 riscv_cpu_update_mip(env, MIP_SGEIP,
3459 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
3460 return RISCV_EXCP_NONE;
3461 }
3462
read_htval(CPURISCVState * env,int csrno,target_ulong * val)3463 static RISCVException read_htval(CPURISCVState *env, int csrno,
3464 target_ulong *val)
3465 {
3466 *val = env->htval;
3467 return RISCV_EXCP_NONE;
3468 }
3469
write_htval(CPURISCVState * env,int csrno,target_ulong val)3470 static RISCVException write_htval(CPURISCVState *env, int csrno,
3471 target_ulong val)
3472 {
3473 env->htval = val;
3474 return RISCV_EXCP_NONE;
3475 }
3476
read_htinst(CPURISCVState * env,int csrno,target_ulong * val)3477 static RISCVException read_htinst(CPURISCVState *env, int csrno,
3478 target_ulong *val)
3479 {
3480 *val = env->htinst;
3481 return RISCV_EXCP_NONE;
3482 }
3483
write_htinst(CPURISCVState * env,int csrno,target_ulong val)3484 static RISCVException write_htinst(CPURISCVState *env, int csrno,
3485 target_ulong val)
3486 {
3487 return RISCV_EXCP_NONE;
3488 }
3489
read_hgeip(CPURISCVState * env,int csrno,target_ulong * val)3490 static RISCVException read_hgeip(CPURISCVState *env, int csrno,
3491 target_ulong *val)
3492 {
3493 if (val) {
3494 *val = env->hgeip;
3495 }
3496 return RISCV_EXCP_NONE;
3497 }
3498
read_hgatp(CPURISCVState * env,int csrno,target_ulong * val)3499 static RISCVException read_hgatp(CPURISCVState *env, int csrno,
3500 target_ulong *val)
3501 {
3502 *val = env->hgatp;
3503 return RISCV_EXCP_NONE;
3504 }
3505
write_hgatp(CPURISCVState * env,int csrno,target_ulong val)3506 static RISCVException write_hgatp(CPURISCVState *env, int csrno,
3507 target_ulong val)
3508 {
3509 env->hgatp = val;
3510 return RISCV_EXCP_NONE;
3511 }
3512
read_htimedelta(CPURISCVState * env,int csrno,target_ulong * val)3513 static RISCVException read_htimedelta(CPURISCVState *env, int csrno,
3514 target_ulong *val)
3515 {
3516 if (!env->rdtime_fn) {
3517 return RISCV_EXCP_ILLEGAL_INST;
3518 }
3519
3520 *val = env->htimedelta;
3521 return RISCV_EXCP_NONE;
3522 }
3523
write_htimedelta(CPURISCVState * env,int csrno,target_ulong val)3524 static RISCVException write_htimedelta(CPURISCVState *env, int csrno,
3525 target_ulong val)
3526 {
3527 if (!env->rdtime_fn) {
3528 return RISCV_EXCP_ILLEGAL_INST;
3529 }
3530
3531 if (riscv_cpu_mxl(env) == MXL_RV32) {
3532 env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val);
3533 } else {
3534 env->htimedelta = val;
3535 }
3536
3537 if (riscv_cpu_cfg(env)->ext_sstc && env->rdtime_fn) {
3538 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
3539 env->htimedelta, MIP_VSTIP);
3540 }
3541
3542 return RISCV_EXCP_NONE;
3543 }
3544
read_htimedeltah(CPURISCVState * env,int csrno,target_ulong * val)3545 static RISCVException read_htimedeltah(CPURISCVState *env, int csrno,
3546 target_ulong *val)
3547 {
3548 if (!env->rdtime_fn) {
3549 return RISCV_EXCP_ILLEGAL_INST;
3550 }
3551
3552 *val = env->htimedelta >> 32;
3553 return RISCV_EXCP_NONE;
3554 }
3555
write_htimedeltah(CPURISCVState * env,int csrno,target_ulong val)3556 static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
3557 target_ulong val)
3558 {
3559 if (!env->rdtime_fn) {
3560 return RISCV_EXCP_ILLEGAL_INST;
3561 }
3562
3563 env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val);
3564
3565 if (riscv_cpu_cfg(env)->ext_sstc && env->rdtime_fn) {
3566 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
3567 env->htimedelta, MIP_VSTIP);
3568 }
3569
3570 return RISCV_EXCP_NONE;
3571 }
3572
read_hvictl(CPURISCVState * env,int csrno,target_ulong * val)3573 static int read_hvictl(CPURISCVState *env, int csrno, target_ulong *val)
3574 {
3575 *val = env->hvictl;
3576 return RISCV_EXCP_NONE;
3577 }
3578
write_hvictl(CPURISCVState * env,int csrno,target_ulong val)3579 static int write_hvictl(CPURISCVState *env, int csrno, target_ulong val)
3580 {
3581 env->hvictl = val & HVICTL_VALID_MASK;
3582 return RISCV_EXCP_NONE;
3583 }
3584
read_hvipriox(CPURISCVState * env,int first_index,uint8_t * iprio,target_ulong * val)3585 static int read_hvipriox(CPURISCVState *env, int first_index,
3586 uint8_t *iprio, target_ulong *val)
3587 {
3588 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
3589
3590 /* First index has to be a multiple of number of irqs per register */
3591 if (first_index % num_irqs) {
3592 return (env->virt_enabled) ?
3593 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
3594 }
3595
3596 /* Fill-up return value */
3597 *val = 0;
3598 for (i = 0; i < num_irqs; i++) {
3599 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
3600 continue;
3601 }
3602 if (rdzero) {
3603 continue;
3604 }
3605 *val |= ((target_ulong)iprio[irq]) << (i * 8);
3606 }
3607
3608 return RISCV_EXCP_NONE;
3609 }
3610
write_hvipriox(CPURISCVState * env,int first_index,uint8_t * iprio,target_ulong val)3611 static int write_hvipriox(CPURISCVState *env, int first_index,
3612 uint8_t *iprio, target_ulong val)
3613 {
3614 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
3615
3616 /* First index has to be a multiple of number of irqs per register */
3617 if (first_index % num_irqs) {
3618 return (env->virt_enabled) ?
3619 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
3620 }
3621
3622 /* Fill-up priority array */
3623 for (i = 0; i < num_irqs; i++) {
3624 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
3625 continue;
3626 }
3627 if (rdzero) {
3628 iprio[irq] = 0;
3629 } else {
3630 iprio[irq] = (val >> (i * 8)) & 0xff;
3631 }
3632 }
3633
3634 return RISCV_EXCP_NONE;
3635 }
3636
read_hviprio1(CPURISCVState * env,int csrno,target_ulong * val)3637 static int read_hviprio1(CPURISCVState *env, int csrno, target_ulong *val)
3638 {
3639 return read_hvipriox(env, 0, env->hviprio, val);
3640 }
3641
write_hviprio1(CPURISCVState * env,int csrno,target_ulong val)3642 static int write_hviprio1(CPURISCVState *env, int csrno, target_ulong val)
3643 {
3644 return write_hvipriox(env, 0, env->hviprio, val);
3645 }
3646
read_hviprio1h(CPURISCVState * env,int csrno,target_ulong * val)3647 static int read_hviprio1h(CPURISCVState *env, int csrno, target_ulong *val)
3648 {
3649 return read_hvipriox(env, 4, env->hviprio, val);
3650 }
3651
write_hviprio1h(CPURISCVState * env,int csrno,target_ulong val)3652 static int write_hviprio1h(CPURISCVState *env, int csrno, target_ulong val)
3653 {
3654 return write_hvipriox(env, 4, env->hviprio, val);
3655 }
3656
read_hviprio2(CPURISCVState * env,int csrno,target_ulong * val)3657 static int read_hviprio2(CPURISCVState *env, int csrno, target_ulong *val)
3658 {
3659 return read_hvipriox(env, 8, env->hviprio, val);
3660 }
3661
write_hviprio2(CPURISCVState * env,int csrno,target_ulong val)3662 static int write_hviprio2(CPURISCVState *env, int csrno, target_ulong val)
3663 {
3664 return write_hvipriox(env, 8, env->hviprio, val);
3665 }
3666
read_hviprio2h(CPURISCVState * env,int csrno,target_ulong * val)3667 static int read_hviprio2h(CPURISCVState *env, int csrno, target_ulong *val)
3668 {
3669 return read_hvipriox(env, 12, env->hviprio, val);
3670 }
3671
write_hviprio2h(CPURISCVState * env,int csrno,target_ulong val)3672 static int write_hviprio2h(CPURISCVState *env, int csrno, target_ulong val)
3673 {
3674 return write_hvipriox(env, 12, env->hviprio, val);
3675 }
3676
3677 /* Virtual CSR Registers */
read_vsstatus(CPURISCVState * env,int csrno,target_ulong * val)3678 static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
3679 target_ulong *val)
3680 {
3681 *val = env->vsstatus;
3682 return RISCV_EXCP_NONE;
3683 }
3684
write_vsstatus(CPURISCVState * env,int csrno,target_ulong val)3685 static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
3686 target_ulong val)
3687 {
3688 uint64_t mask = (target_ulong)-1;
3689 if ((val & VSSTATUS64_UXL) == 0) {
3690 mask &= ~VSSTATUS64_UXL;
3691 }
3692 env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
3693 return RISCV_EXCP_NONE;
3694 }
3695
read_vstvec(CPURISCVState * env,int csrno,target_ulong * val)3696 static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val)
3697 {
3698 *val = env->vstvec;
3699 return RISCV_EXCP_NONE;
3700 }
3701
write_vstvec(CPURISCVState * env,int csrno,target_ulong val)3702 static RISCVException write_vstvec(CPURISCVState *env, int csrno,
3703 target_ulong val)
3704 {
3705 env->vstvec = val;
3706 return RISCV_EXCP_NONE;
3707 }
3708
read_vsscratch(CPURISCVState * env,int csrno,target_ulong * val)3709 static RISCVException read_vsscratch(CPURISCVState *env, int csrno,
3710 target_ulong *val)
3711 {
3712 *val = env->vsscratch;
3713 return RISCV_EXCP_NONE;
3714 }
3715
write_vsscratch(CPURISCVState * env,int csrno,target_ulong val)3716 static RISCVException write_vsscratch(CPURISCVState *env, int csrno,
3717 target_ulong val)
3718 {
3719 env->vsscratch = val;
3720 return RISCV_EXCP_NONE;
3721 }
3722
read_vsepc(CPURISCVState * env,int csrno,target_ulong * val)3723 static RISCVException read_vsepc(CPURISCVState *env, int csrno,
3724 target_ulong *val)
3725 {
3726 *val = env->vsepc;
3727 return RISCV_EXCP_NONE;
3728 }
3729
write_vsepc(CPURISCVState * env,int csrno,target_ulong val)3730 static RISCVException write_vsepc(CPURISCVState *env, int csrno,
3731 target_ulong val)
3732 {
3733 env->vsepc = val;
3734 return RISCV_EXCP_NONE;
3735 }
3736
read_vscause(CPURISCVState * env,int csrno,target_ulong * val)3737 static RISCVException read_vscause(CPURISCVState *env, int csrno,
3738 target_ulong *val)
3739 {
3740 *val = env->vscause;
3741 return RISCV_EXCP_NONE;
3742 }
3743
write_vscause(CPURISCVState * env,int csrno,target_ulong val)3744 static RISCVException write_vscause(CPURISCVState *env, int csrno,
3745 target_ulong val)
3746 {
3747 env->vscause = val;
3748 return RISCV_EXCP_NONE;
3749 }
3750
read_vstval(CPURISCVState * env,int csrno,target_ulong * val)3751 static RISCVException read_vstval(CPURISCVState *env, int csrno,
3752 target_ulong *val)
3753 {
3754 *val = env->vstval;
3755 return RISCV_EXCP_NONE;
3756 }
3757
write_vstval(CPURISCVState * env,int csrno,target_ulong val)3758 static RISCVException write_vstval(CPURISCVState *env, int csrno,
3759 target_ulong val)
3760 {
3761 env->vstval = val;
3762 return RISCV_EXCP_NONE;
3763 }
3764
read_vsatp(CPURISCVState * env,int csrno,target_ulong * val)3765 static RISCVException read_vsatp(CPURISCVState *env, int csrno,
3766 target_ulong *val)
3767 {
3768 *val = env->vsatp;
3769 return RISCV_EXCP_NONE;
3770 }
3771
write_vsatp(CPURISCVState * env,int csrno,target_ulong val)3772 static RISCVException write_vsatp(CPURISCVState *env, int csrno,
3773 target_ulong val)
3774 {
3775 env->vsatp = val;
3776 return RISCV_EXCP_NONE;
3777 }
3778
read_mtval2(CPURISCVState * env,int csrno,target_ulong * val)3779 static RISCVException read_mtval2(CPURISCVState *env, int csrno,
3780 target_ulong *val)
3781 {
3782 *val = env->mtval2;
3783 return RISCV_EXCP_NONE;
3784 }
3785
write_mtval2(CPURISCVState * env,int csrno,target_ulong val)3786 static RISCVException write_mtval2(CPURISCVState *env, int csrno,
3787 target_ulong val)
3788 {
3789 env->mtval2 = val;
3790 return RISCV_EXCP_NONE;
3791 }
3792
read_mtinst(CPURISCVState * env,int csrno,target_ulong * val)3793 static RISCVException read_mtinst(CPURISCVState *env, int csrno,
3794 target_ulong *val)
3795 {
3796 *val = env->mtinst;
3797 return RISCV_EXCP_NONE;
3798 }
3799
write_mtinst(CPURISCVState * env,int csrno,target_ulong val)3800 static RISCVException write_mtinst(CPURISCVState *env, int csrno,
3801 target_ulong val)
3802 {
3803 env->mtinst = val;
3804 return RISCV_EXCP_NONE;
3805 }
3806
3807 /* Physical Memory Protection */
read_mseccfg(CPURISCVState * env,int csrno,target_ulong * val)3808 static RISCVException read_mseccfg(CPURISCVState *env, int csrno,
3809 target_ulong *val)
3810 {
3811 *val = mseccfg_csr_read(env);
3812 return RISCV_EXCP_NONE;
3813 }
3814
write_mseccfg(CPURISCVState * env,int csrno,target_ulong val)3815 static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
3816 target_ulong val)
3817 {
3818 mseccfg_csr_write(env, val);
3819 return RISCV_EXCP_NONE;
3820 }
3821
read_pmpcfg(CPURISCVState * env,int csrno,target_ulong * val)3822 static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
3823 target_ulong *val)
3824 {
3825 uint32_t reg_index = csrno - CSR_PMPCFG0;
3826
3827 *val = pmpcfg_csr_read(env, reg_index);
3828 return RISCV_EXCP_NONE;
3829 }
3830
write_pmpcfg(CPURISCVState * env,int csrno,target_ulong val)3831 static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
3832 target_ulong val)
3833 {
3834 uint32_t reg_index = csrno - CSR_PMPCFG0;
3835
3836 pmpcfg_csr_write(env, reg_index, val);
3837 return RISCV_EXCP_NONE;
3838 }
3839
read_pmpaddr(CPURISCVState * env,int csrno,target_ulong * val)3840 static RISCVException read_pmpaddr(CPURISCVState *env, int csrno,
3841 target_ulong *val)
3842 {
3843 *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0);
3844 return RISCV_EXCP_NONE;
3845 }
3846
write_pmpaddr(CPURISCVState * env,int csrno,target_ulong val)3847 static RISCVException write_pmpaddr(CPURISCVState *env, int csrno,
3848 target_ulong val)
3849 {
3850 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val);
3851 return RISCV_EXCP_NONE;
3852 }
3853
read_tselect(CPURISCVState * env,int csrno,target_ulong * val)3854 static RISCVException read_tselect(CPURISCVState *env, int csrno,
3855 target_ulong *val)
3856 {
3857 *val = tselect_csr_read(env);
3858 return RISCV_EXCP_NONE;
3859 }
3860
write_tselect(CPURISCVState * env,int csrno,target_ulong val)3861 static RISCVException write_tselect(CPURISCVState *env, int csrno,
3862 target_ulong val)
3863 {
3864 tselect_csr_write(env, val);
3865 return RISCV_EXCP_NONE;
3866 }
3867
read_tdata(CPURISCVState * env,int csrno,target_ulong * val)3868 static RISCVException read_tdata(CPURISCVState *env, int csrno,
3869 target_ulong *val)
3870 {
3871 /* return 0 in tdata1 to end the trigger enumeration */
3872 if (env->trigger_cur >= RV_MAX_TRIGGERS && csrno == CSR_TDATA1) {
3873 *val = 0;
3874 return RISCV_EXCP_NONE;
3875 }
3876
3877 if (!tdata_available(env, csrno - CSR_TDATA1)) {
3878 return RISCV_EXCP_ILLEGAL_INST;
3879 }
3880
3881 *val = tdata_csr_read(env, csrno - CSR_TDATA1);
3882 return RISCV_EXCP_NONE;
3883 }
3884
write_tdata(CPURISCVState * env,int csrno,target_ulong val)3885 static RISCVException write_tdata(CPURISCVState *env, int csrno,
3886 target_ulong val)
3887 {
3888 if (!tdata_available(env, csrno - CSR_TDATA1)) {
3889 return RISCV_EXCP_ILLEGAL_INST;
3890 }
3891
3892 tdata_csr_write(env, csrno - CSR_TDATA1, val);
3893 return RISCV_EXCP_NONE;
3894 }
3895
read_tinfo(CPURISCVState * env,int csrno,target_ulong * val)3896 static RISCVException read_tinfo(CPURISCVState *env, int csrno,
3897 target_ulong *val)
3898 {
3899 *val = tinfo_csr_read(env);
3900 return RISCV_EXCP_NONE;
3901 }
3902
3903 /*
3904 * Functions to access Pointer Masking feature registers
3905 * We have to check if current priv lvl could modify
3906 * csr in given mode
3907 */
check_pm_current_disabled(CPURISCVState * env,int csrno)3908 static bool check_pm_current_disabled(CPURISCVState *env, int csrno)
3909 {
3910 int csr_priv = get_field(csrno, 0x300);
3911 int pm_current;
3912
3913 if (env->debugger) {
3914 return false;
3915 }
3916 /*
3917 * If priv lvls differ that means we're accessing csr from higher priv lvl,
3918 * so allow the access
3919 */
3920 if (env->priv != csr_priv) {
3921 return false;
3922 }
3923 switch (env->priv) {
3924 case PRV_M:
3925 pm_current = get_field(env->mmte, M_PM_CURRENT);
3926 break;
3927 case PRV_S:
3928 pm_current = get_field(env->mmte, S_PM_CURRENT);
3929 break;
3930 case PRV_U:
3931 pm_current = get_field(env->mmte, U_PM_CURRENT);
3932 break;
3933 default:
3934 g_assert_not_reached();
3935 }
3936 /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */
3937 return !pm_current;
3938 }
3939
read_mmte(CPURISCVState * env,int csrno,target_ulong * val)3940 static RISCVException read_mmte(CPURISCVState *env, int csrno,
3941 target_ulong *val)
3942 {
3943 *val = env->mmte & MMTE_MASK;
3944 return RISCV_EXCP_NONE;
3945 }
3946
write_mmte(CPURISCVState * env,int csrno,target_ulong val)3947 static RISCVException write_mmte(CPURISCVState *env, int csrno,
3948 target_ulong val)
3949 {
3950 uint64_t mstatus;
3951 target_ulong wpri_val = val & MMTE_MASK;
3952
3953 if (val != wpri_val) {
3954 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
3955 TARGET_FMT_lx "\n", "MMTE: WPRI violation written 0x",
3956 val, "vs expected 0x", wpri_val);
3957 }
3958 /* for machine mode pm.current is hardwired to 1 */
3959 wpri_val |= MMTE_M_PM_CURRENT;
3960
3961 /* hardwiring pm.instruction bit to 0, since it's not supported yet */
3962 wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN);
3963 env->mmte = wpri_val | EXT_STATUS_DIRTY;
3964 riscv_cpu_update_mask(env);
3965
3966 /* Set XS and SD bits, since PM CSRs are dirty */
3967 mstatus = env->mstatus | MSTATUS_XS;
3968 write_mstatus(env, csrno, mstatus);
3969 return RISCV_EXCP_NONE;
3970 }
3971
read_smte(CPURISCVState * env,int csrno,target_ulong * val)3972 static RISCVException read_smte(CPURISCVState *env, int csrno,
3973 target_ulong *val)
3974 {
3975 *val = env->mmte & SMTE_MASK;
3976 return RISCV_EXCP_NONE;
3977 }
3978
write_smte(CPURISCVState * env,int csrno,target_ulong val)3979 static RISCVException write_smte(CPURISCVState *env, int csrno,
3980 target_ulong val)
3981 {
3982 target_ulong wpri_val = val & SMTE_MASK;
3983
3984 if (val != wpri_val) {
3985 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
3986 TARGET_FMT_lx "\n", "SMTE: WPRI violation written 0x",
3987 val, "vs expected 0x", wpri_val);
3988 }
3989
3990 /* if pm.current==0 we can't modify current PM CSRs */
3991 if (check_pm_current_disabled(env, csrno)) {
3992 return RISCV_EXCP_NONE;
3993 }
3994
3995 wpri_val |= (env->mmte & ~SMTE_MASK);
3996 write_mmte(env, csrno, wpri_val);
3997 return RISCV_EXCP_NONE;
3998 }
3999
read_umte(CPURISCVState * env,int csrno,target_ulong * val)4000 static RISCVException read_umte(CPURISCVState *env, int csrno,
4001 target_ulong *val)
4002 {
4003 *val = env->mmte & UMTE_MASK;
4004 return RISCV_EXCP_NONE;
4005 }
4006
write_umte(CPURISCVState * env,int csrno,target_ulong val)4007 static RISCVException write_umte(CPURISCVState *env, int csrno,
4008 target_ulong val)
4009 {
4010 target_ulong wpri_val = val & UMTE_MASK;
4011
4012 if (val != wpri_val) {
4013 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
4014 TARGET_FMT_lx "\n", "UMTE: WPRI violation written 0x",
4015 val, "vs expected 0x", wpri_val);
4016 }
4017
4018 if (check_pm_current_disabled(env, csrno)) {
4019 return RISCV_EXCP_NONE;
4020 }
4021
4022 wpri_val |= (env->mmte & ~UMTE_MASK);
4023 write_mmte(env, csrno, wpri_val);
4024 return RISCV_EXCP_NONE;
4025 }
4026
read_mpmmask(CPURISCVState * env,int csrno,target_ulong * val)4027 static RISCVException read_mpmmask(CPURISCVState *env, int csrno,
4028 target_ulong *val)
4029 {
4030 *val = env->mpmmask;
4031 return RISCV_EXCP_NONE;
4032 }
4033
write_mpmmask(CPURISCVState * env,int csrno,target_ulong val)4034 static RISCVException write_mpmmask(CPURISCVState *env, int csrno,
4035 target_ulong val)
4036 {
4037 uint64_t mstatus;
4038
4039 env->mpmmask = val;
4040 if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) {
4041 env->cur_pmmask = val;
4042 }
4043 env->mmte |= EXT_STATUS_DIRTY;
4044
4045 /* Set XS and SD bits, since PM CSRs are dirty */
4046 mstatus = env->mstatus | MSTATUS_XS;
4047 write_mstatus(env, csrno, mstatus);
4048 return RISCV_EXCP_NONE;
4049 }
4050
read_spmmask(CPURISCVState * env,int csrno,target_ulong * val)4051 static RISCVException read_spmmask(CPURISCVState *env, int csrno,
4052 target_ulong *val)
4053 {
4054 *val = env->spmmask;
4055 return RISCV_EXCP_NONE;
4056 }
4057
write_spmmask(CPURISCVState * env,int csrno,target_ulong val)4058 static RISCVException write_spmmask(CPURISCVState *env, int csrno,
4059 target_ulong val)
4060 {
4061 uint64_t mstatus;
4062
4063 /* if pm.current==0 we can't modify current PM CSRs */
4064 if (check_pm_current_disabled(env, csrno)) {
4065 return RISCV_EXCP_NONE;
4066 }
4067 env->spmmask = val;
4068 if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) {
4069 env->cur_pmmask = val;
4070 if (cpu_get_xl(env, PRV_S) == MXL_RV32) {
4071 env->cur_pmmask &= UINT32_MAX;
4072 }
4073 }
4074 env->mmte |= EXT_STATUS_DIRTY;
4075
4076 /* Set XS and SD bits, since PM CSRs are dirty */
4077 mstatus = env->mstatus | MSTATUS_XS;
4078 write_mstatus(env, csrno, mstatus);
4079 return RISCV_EXCP_NONE;
4080 }
4081
read_upmmask(CPURISCVState * env,int csrno,target_ulong * val)4082 static RISCVException read_upmmask(CPURISCVState *env, int csrno,
4083 target_ulong *val)
4084 {
4085 *val = env->upmmask;
4086 return RISCV_EXCP_NONE;
4087 }
4088
write_upmmask(CPURISCVState * env,int csrno,target_ulong val)4089 static RISCVException write_upmmask(CPURISCVState *env, int csrno,
4090 target_ulong val)
4091 {
4092 uint64_t mstatus;
4093
4094 /* if pm.current==0 we can't modify current PM CSRs */
4095 if (check_pm_current_disabled(env, csrno)) {
4096 return RISCV_EXCP_NONE;
4097 }
4098 env->upmmask = val;
4099 if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) {
4100 env->cur_pmmask = val;
4101 if (cpu_get_xl(env, PRV_U) == MXL_RV32) {
4102 env->cur_pmmask &= UINT32_MAX;
4103 }
4104 }
4105 env->mmte |= EXT_STATUS_DIRTY;
4106
4107 /* Set XS and SD bits, since PM CSRs are dirty */
4108 mstatus = env->mstatus | MSTATUS_XS;
4109 write_mstatus(env, csrno, mstatus);
4110 return RISCV_EXCP_NONE;
4111 }
4112
read_mpmbase(CPURISCVState * env,int csrno,target_ulong * val)4113 static RISCVException read_mpmbase(CPURISCVState *env, int csrno,
4114 target_ulong *val)
4115 {
4116 *val = env->mpmbase;
4117 return RISCV_EXCP_NONE;
4118 }
4119
write_mpmbase(CPURISCVState * env,int csrno,target_ulong val)4120 static RISCVException write_mpmbase(CPURISCVState *env, int csrno,
4121 target_ulong val)
4122 {
4123 uint64_t mstatus;
4124
4125 env->mpmbase = val;
4126 if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) {
4127 env->cur_pmbase = val;
4128 }
4129 env->mmte |= EXT_STATUS_DIRTY;
4130
4131 /* Set XS and SD bits, since PM CSRs are dirty */
4132 mstatus = env->mstatus | MSTATUS_XS;
4133 write_mstatus(env, csrno, mstatus);
4134 return RISCV_EXCP_NONE;
4135 }
4136
read_spmbase(CPURISCVState * env,int csrno,target_ulong * val)4137 static RISCVException read_spmbase(CPURISCVState *env, int csrno,
4138 target_ulong *val)
4139 {
4140 *val = env->spmbase;
4141 return RISCV_EXCP_NONE;
4142 }
4143
write_spmbase(CPURISCVState * env,int csrno,target_ulong val)4144 static RISCVException write_spmbase(CPURISCVState *env, int csrno,
4145 target_ulong val)
4146 {
4147 uint64_t mstatus;
4148
4149 /* if pm.current==0 we can't modify current PM CSRs */
4150 if (check_pm_current_disabled(env, csrno)) {
4151 return RISCV_EXCP_NONE;
4152 }
4153 env->spmbase = val;
4154 if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) {
4155 env->cur_pmbase = val;
4156 if (cpu_get_xl(env, PRV_S) == MXL_RV32) {
4157 env->cur_pmbase &= UINT32_MAX;
4158 }
4159 }
4160 env->mmte |= EXT_STATUS_DIRTY;
4161
4162 /* Set XS and SD bits, since PM CSRs are dirty */
4163 mstatus = env->mstatus | MSTATUS_XS;
4164 write_mstatus(env, csrno, mstatus);
4165 return RISCV_EXCP_NONE;
4166 }
4167
read_upmbase(CPURISCVState * env,int csrno,target_ulong * val)4168 static RISCVException read_upmbase(CPURISCVState *env, int csrno,
4169 target_ulong *val)
4170 {
4171 *val = env->upmbase;
4172 return RISCV_EXCP_NONE;
4173 }
4174
write_upmbase(CPURISCVState * env,int csrno,target_ulong val)4175 static RISCVException write_upmbase(CPURISCVState *env, int csrno,
4176 target_ulong val)
4177 {
4178 uint64_t mstatus;
4179
4180 /* if pm.current==0 we can't modify current PM CSRs */
4181 if (check_pm_current_disabled(env, csrno)) {
4182 return RISCV_EXCP_NONE;
4183 }
4184 env->upmbase = val;
4185 if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) {
4186 env->cur_pmbase = val;
4187 if (cpu_get_xl(env, PRV_U) == MXL_RV32) {
4188 env->cur_pmbase &= UINT32_MAX;
4189 }
4190 }
4191 env->mmte |= EXT_STATUS_DIRTY;
4192
4193 /* Set XS and SD bits, since PM CSRs are dirty */
4194 mstatus = env->mstatus | MSTATUS_XS;
4195 write_mstatus(env, csrno, mstatus);
4196 return RISCV_EXCP_NONE;
4197 }
4198
4199 #endif
4200
4201 /* Crypto Extension */
rmw_seed(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)4202 static RISCVException rmw_seed(CPURISCVState *env, int csrno,
4203 target_ulong *ret_value,
4204 target_ulong new_value,
4205 target_ulong write_mask)
4206 {
4207 uint16_t random_v;
4208 Error *random_e = NULL;
4209 int random_r;
4210 target_ulong rval;
4211
4212 random_r = qemu_guest_getrandom(&random_v, 2, &random_e);
4213 if (unlikely(random_r < 0)) {
4214 /*
4215 * Failed, for unknown reasons in the crypto subsystem.
4216 * The best we can do is log the reason and return a
4217 * failure indication to the guest. There is no reason
4218 * we know to expect the failure to be transitory, so
4219 * indicate DEAD to avoid having the guest spin on WAIT.
4220 */
4221 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
4222 __func__, error_get_pretty(random_e));
4223 error_free(random_e);
4224 rval = SEED_OPST_DEAD;
4225 } else {
4226 rval = random_v | SEED_OPST_ES16;
4227 }
4228
4229 if (ret_value) {
4230 *ret_value = rval;
4231 }
4232
4233 return RISCV_EXCP_NONE;
4234 }
4235
4236 /*
4237 * riscv_csrrw - read and/or update control and status register
4238 *
4239 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0);
4240 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1);
4241 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value);
4242 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
4243 */
4244
riscv_csrrw_check(CPURISCVState * env,int csrno,bool write_mask)4245 static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
4246 int csrno,
4247 bool write_mask)
4248 {
4249 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
4250 bool read_only = get_field(csrno, 0xC00) == 3;
4251 int csr_min_priv = csr_ops[csrno].min_priv_ver;
4252
4253 /* ensure the CSR extension is enabled */
4254 if (!riscv_cpu_cfg(env)->ext_zicsr) {
4255 return RISCV_EXCP_ILLEGAL_INST;
4256 }
4257
4258 /* ensure CSR is implemented by checking predicate */
4259 if (!csr_ops[csrno].predicate) {
4260 return RISCV_EXCP_ILLEGAL_INST;
4261 }
4262
4263 /* privileged spec version check */
4264 if (env->priv_ver < csr_min_priv) {
4265 return RISCV_EXCP_ILLEGAL_INST;
4266 }
4267
4268 /* read / write check */
4269 if (write_mask && read_only) {
4270 return RISCV_EXCP_ILLEGAL_INST;
4271 }
4272
4273 /*
4274 * The predicate() not only does existence check but also does some
4275 * access control check which triggers for example virtual instruction
4276 * exception in some cases. When writing read-only CSRs in those cases
4277 * illegal instruction exception should be triggered instead of virtual
4278 * instruction exception. Hence this comes after the read / write check.
4279 */
4280 RISCVException ret = csr_ops[csrno].predicate(env, csrno);
4281 if (ret != RISCV_EXCP_NONE) {
4282 return ret;
4283 }
4284
4285 #if !defined(CONFIG_USER_ONLY)
4286 int csr_priv, effective_priv = env->priv;
4287
4288 if (riscv_has_ext(env, RVH) && env->priv == PRV_S &&
4289 !env->virt_enabled) {
4290 /*
4291 * We are in HS mode. Add 1 to the effective privilege level to
4292 * allow us to access the Hypervisor CSRs.
4293 */
4294 effective_priv++;
4295 }
4296
4297 csr_priv = get_field(csrno, 0x300);
4298 if (!env->debugger && (effective_priv < csr_priv)) {
4299 if (csr_priv == (PRV_S + 1) && env->virt_enabled) {
4300 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
4301 }
4302 return RISCV_EXCP_ILLEGAL_INST;
4303 }
4304 #endif
4305 return RISCV_EXCP_NONE;
4306 }
4307
riscv_csrrw_do64(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)4308 static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
4309 target_ulong *ret_value,
4310 target_ulong new_value,
4311 target_ulong write_mask)
4312 {
4313 RISCVException ret;
4314 target_ulong old_value = 0;
4315
4316 /* execute combined read/write operation if it exists */
4317 if (csr_ops[csrno].op) {
4318 return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask);
4319 }
4320
4321 /*
4322 * ret_value == NULL means that rd=x0 and we're coming from helper_csrw()
4323 * and we can't throw side effects caused by CSR reads.
4324 */
4325 if (ret_value) {
4326 /* if no accessor exists then return failure */
4327 if (!csr_ops[csrno].read) {
4328 return RISCV_EXCP_ILLEGAL_INST;
4329 }
4330 /* read old value */
4331 ret = csr_ops[csrno].read(env, csrno, &old_value);
4332 if (ret != RISCV_EXCP_NONE) {
4333 return ret;
4334 }
4335 }
4336
4337 /* write value if writable and write mask set, otherwise drop writes */
4338 if (write_mask) {
4339 new_value = (old_value & ~write_mask) | (new_value & write_mask);
4340 if (csr_ops[csrno].write) {
4341 ret = csr_ops[csrno].write(env, csrno, new_value);
4342 if (ret != RISCV_EXCP_NONE) {
4343 return ret;
4344 }
4345 }
4346 }
4347
4348 /* return old value */
4349 if (ret_value) {
4350 *ret_value = old_value;
4351 }
4352
4353 return RISCV_EXCP_NONE;
4354 }
4355
riscv_csrrw(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)4356 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
4357 target_ulong *ret_value,
4358 target_ulong new_value, target_ulong write_mask)
4359 {
4360 RISCVException ret = riscv_csrrw_check(env, csrno, write_mask);
4361 if (ret != RISCV_EXCP_NONE) {
4362 return ret;
4363 }
4364
4365 return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask);
4366 }
4367
riscv_csrrw_do128(CPURISCVState * env,int csrno,Int128 * ret_value,Int128 new_value,Int128 write_mask)4368 static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
4369 Int128 *ret_value,
4370 Int128 new_value,
4371 Int128 write_mask)
4372 {
4373 RISCVException ret;
4374 Int128 old_value;
4375
4376 /* read old value */
4377 ret = csr_ops[csrno].read128(env, csrno, &old_value);
4378 if (ret != RISCV_EXCP_NONE) {
4379 return ret;
4380 }
4381
4382 /* write value if writable and write mask set, otherwise drop writes */
4383 if (int128_nz(write_mask)) {
4384 new_value = int128_or(int128_and(old_value, int128_not(write_mask)),
4385 int128_and(new_value, write_mask));
4386 if (csr_ops[csrno].write128) {
4387 ret = csr_ops[csrno].write128(env, csrno, new_value);
4388 if (ret != RISCV_EXCP_NONE) {
4389 return ret;
4390 }
4391 } else if (csr_ops[csrno].write) {
4392 /* avoids having to write wrappers for all registers */
4393 ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value));
4394 if (ret != RISCV_EXCP_NONE) {
4395 return ret;
4396 }
4397 }
4398 }
4399
4400 /* return old value */
4401 if (ret_value) {
4402 *ret_value = old_value;
4403 }
4404
4405 return RISCV_EXCP_NONE;
4406 }
4407
riscv_csrrw_i128(CPURISCVState * env,int csrno,Int128 * ret_value,Int128 new_value,Int128 write_mask)4408 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
4409 Int128 *ret_value,
4410 Int128 new_value, Int128 write_mask)
4411 {
4412 RISCVException ret;
4413
4414 ret = riscv_csrrw_check(env, csrno, int128_nz(write_mask));
4415 if (ret != RISCV_EXCP_NONE) {
4416 return ret;
4417 }
4418
4419 if (csr_ops[csrno].read128) {
4420 return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask);
4421 }
4422
4423 /*
4424 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
4425 * at all defined.
4426 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
4427 * significant), for those, this fallback is correctly handling the
4428 * accesses
4429 */
4430 target_ulong old_value;
4431 ret = riscv_csrrw_do64(env, csrno, &old_value,
4432 int128_getlo(new_value),
4433 int128_getlo(write_mask));
4434 if (ret == RISCV_EXCP_NONE && ret_value) {
4435 *ret_value = int128_make64(old_value);
4436 }
4437 return ret;
4438 }
4439
4440 /*
4441 * Debugger support. If not in user mode, set env->debugger before the
4442 * riscv_csrrw call and clear it after the call.
4443 */
riscv_csrrw_debug(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)4444 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
4445 target_ulong *ret_value,
4446 target_ulong new_value,
4447 target_ulong write_mask)
4448 {
4449 RISCVException ret;
4450 #if !defined(CONFIG_USER_ONLY)
4451 env->debugger = true;
4452 #endif
4453 ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask);
4454 #if !defined(CONFIG_USER_ONLY)
4455 env->debugger = false;
4456 #endif
4457 return ret;
4458 }
4459
read_jvt(CPURISCVState * env,int csrno,target_ulong * val)4460 static RISCVException read_jvt(CPURISCVState *env, int csrno,
4461 target_ulong *val)
4462 {
4463 *val = env->jvt;
4464 return RISCV_EXCP_NONE;
4465 }
4466
write_jvt(CPURISCVState * env,int csrno,target_ulong val)4467 static RISCVException write_jvt(CPURISCVState *env, int csrno,
4468 target_ulong val)
4469 {
4470 env->jvt = val;
4471 return RISCV_EXCP_NONE;
4472 }
4473
4474 /*
4475 * Control and Status Register function table
4476 * riscv_csr_operations::predicate() must be provided for an implemented CSR
4477 */
4478 riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
4479 /* User Floating-Point CSRs */
4480 [CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags },
4481 [CSR_FRM] = { "frm", fs, read_frm, write_frm },
4482 [CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr },
4483 /* Vector CSRs */
4484 [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart },
4485 [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat },
4486 [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm },
4487 [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr },
4488 [CSR_VL] = { "vl", vs, read_vl },
4489 [CSR_VTYPE] = { "vtype", vs, read_vtype },
4490 [CSR_VLENB] = { "vlenb", vs, read_vlenb },
4491 /* User Timers and Counters */
4492 [CSR_CYCLE] = { "cycle", ctr, read_hpmcounter },
4493 [CSR_INSTRET] = { "instret", ctr, read_hpmcounter },
4494 [CSR_CYCLEH] = { "cycleh", ctr32, read_hpmcounterh },
4495 [CSR_INSTRETH] = { "instreth", ctr32, read_hpmcounterh },
4496
4497 /*
4498 * In privileged mode, the monitor will have to emulate TIME CSRs only if
4499 * rdtime callback is not provided by machine/platform emulation.
4500 */
4501 [CSR_TIME] = { "time", ctr, read_time },
4502 [CSR_TIMEH] = { "timeh", ctr32, read_timeh },
4503
4504 /* Crypto Extension */
4505 [CSR_SEED] = { "seed", seed, NULL, NULL, rmw_seed },
4506
4507 /* Zcmt Extension */
4508 [CSR_JVT] = {"jvt", zcmt, read_jvt, write_jvt},
4509
4510 #if !defined(CONFIG_USER_ONLY)
4511 /* Machine Timers and Counters */
4512 [CSR_MCYCLE] = { "mcycle", any, read_hpmcounter,
4513 write_mhpmcounter },
4514 [CSR_MINSTRET] = { "minstret", any, read_hpmcounter,
4515 write_mhpmcounter },
4516 [CSR_MCYCLEH] = { "mcycleh", any32, read_hpmcounterh,
4517 write_mhpmcounterh },
4518 [CSR_MINSTRETH] = { "minstreth", any32, read_hpmcounterh,
4519 write_mhpmcounterh },
4520
4521 /* Machine Information Registers */
4522 [CSR_MVENDORID] = { "mvendorid", any, read_mvendorid },
4523 [CSR_MARCHID] = { "marchid", any, read_marchid },
4524 [CSR_MIMPID] = { "mimpid", any, read_mimpid },
4525 [CSR_MHARTID] = { "mhartid", any, read_mhartid },
4526
4527 [CSR_MCONFIGPTR] = { "mconfigptr", any, read_zero,
4528 .min_priv_ver = PRIV_VERSION_1_12_0 },
4529 /* Machine Trap Setup */
4530 [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus,
4531 NULL, read_mstatus_i128 },
4532 [CSR_MISA] = { "misa", any, read_misa, write_misa,
4533 NULL, read_misa_i128 },
4534 [CSR_MIDELEG] = { "mideleg", any, NULL, NULL, rmw_mideleg },
4535 [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg },
4536 [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie },
4537 [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec },
4538 [CSR_MCOUNTEREN] = { "mcounteren", umode, read_mcounteren,
4539 write_mcounteren },
4540
4541 [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush,
4542 write_mstatush },
4543
4544 /* Machine Trap Handling */
4545 [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch,
4546 NULL, read_mscratch_i128, write_mscratch_i128 },
4547 [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc },
4548 [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause },
4549 [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval },
4550 [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
4551
4552 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */
4553 [CSR_MISELECT] = { "miselect", aia_any, NULL, NULL, rmw_xiselect },
4554 [CSR_MIREG] = { "mireg", aia_any, NULL, NULL, rmw_xireg },
4555
4556 /* Machine-Level Interrupts (AIA) */
4557 [CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei },
4558 [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi },
4559
4560 /* Virtual Interrupts for Supervisor Level (AIA) */
4561 [CSR_MVIEN] = { "mvien", aia_any, NULL, NULL, rmw_mvien },
4562 [CSR_MVIP] = { "mvip", aia_any, NULL, NULL, rmw_mvip },
4563
4564 /* Machine-Level High-Half CSRs (AIA) */
4565 [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh },
4566 [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh },
4567 [CSR_MVIENH] = { "mvienh", aia_any32, NULL, NULL, rmw_mvienh },
4568 [CSR_MVIPH] = { "mviph", aia_any32, NULL, NULL, rmw_mviph },
4569 [CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph },
4570
4571 /* Execution environment configuration */
4572 [CSR_MENVCFG] = { "menvcfg", umode, read_menvcfg, write_menvcfg,
4573 .min_priv_ver = PRIV_VERSION_1_12_0 },
4574 [CSR_MENVCFGH] = { "menvcfgh", umode32, read_menvcfgh, write_menvcfgh,
4575 .min_priv_ver = PRIV_VERSION_1_12_0 },
4576 [CSR_SENVCFG] = { "senvcfg", smode, read_senvcfg, write_senvcfg,
4577 .min_priv_ver = PRIV_VERSION_1_12_0 },
4578 [CSR_HENVCFG] = { "henvcfg", hmode, read_henvcfg, write_henvcfg,
4579 .min_priv_ver = PRIV_VERSION_1_12_0 },
4580 [CSR_HENVCFGH] = { "henvcfgh", hmode32, read_henvcfgh, write_henvcfgh,
4581 .min_priv_ver = PRIV_VERSION_1_12_0 },
4582
4583 /* Smstateen extension CSRs */
4584 [CSR_MSTATEEN0] = { "mstateen0", mstateen, read_mstateen, write_mstateen0,
4585 .min_priv_ver = PRIV_VERSION_1_12_0 },
4586 [CSR_MSTATEEN0H] = { "mstateen0h", mstateen, read_mstateenh,
4587 write_mstateen0h,
4588 .min_priv_ver = PRIV_VERSION_1_12_0 },
4589 [CSR_MSTATEEN1] = { "mstateen1", mstateen, read_mstateen,
4590 write_mstateen_1_3,
4591 .min_priv_ver = PRIV_VERSION_1_12_0 },
4592 [CSR_MSTATEEN1H] = { "mstateen1h", mstateen, read_mstateenh,
4593 write_mstateenh_1_3,
4594 .min_priv_ver = PRIV_VERSION_1_12_0 },
4595 [CSR_MSTATEEN2] = { "mstateen2", mstateen, read_mstateen,
4596 write_mstateen_1_3,
4597 .min_priv_ver = PRIV_VERSION_1_12_0 },
4598 [CSR_MSTATEEN2H] = { "mstateen2h", mstateen, read_mstateenh,
4599 write_mstateenh_1_3,
4600 .min_priv_ver = PRIV_VERSION_1_12_0 },
4601 [CSR_MSTATEEN3] = { "mstateen3", mstateen, read_mstateen,
4602 write_mstateen_1_3,
4603 .min_priv_ver = PRIV_VERSION_1_12_0 },
4604 [CSR_MSTATEEN3H] = { "mstateen3h", mstateen, read_mstateenh,
4605 write_mstateenh_1_3,
4606 .min_priv_ver = PRIV_VERSION_1_12_0 },
4607 [CSR_HSTATEEN0] = { "hstateen0", hstateen, read_hstateen, write_hstateen0,
4608 .min_priv_ver = PRIV_VERSION_1_12_0 },
4609 [CSR_HSTATEEN0H] = { "hstateen0h", hstateenh, read_hstateenh,
4610 write_hstateen0h,
4611 .min_priv_ver = PRIV_VERSION_1_12_0 },
4612 [CSR_HSTATEEN1] = { "hstateen1", hstateen, read_hstateen,
4613 write_hstateen_1_3,
4614 .min_priv_ver = PRIV_VERSION_1_12_0 },
4615 [CSR_HSTATEEN1H] = { "hstateen1h", hstateenh, read_hstateenh,
4616 write_hstateenh_1_3,
4617 .min_priv_ver = PRIV_VERSION_1_12_0 },
4618 [CSR_HSTATEEN2] = { "hstateen2", hstateen, read_hstateen,
4619 write_hstateen_1_3,
4620 .min_priv_ver = PRIV_VERSION_1_12_0 },
4621 [CSR_HSTATEEN2H] = { "hstateen2h", hstateenh, read_hstateenh,
4622 write_hstateenh_1_3,
4623 .min_priv_ver = PRIV_VERSION_1_12_0 },
4624 [CSR_HSTATEEN3] = { "hstateen3", hstateen, read_hstateen,
4625 write_hstateen_1_3,
4626 .min_priv_ver = PRIV_VERSION_1_12_0 },
4627 [CSR_HSTATEEN3H] = { "hstateen3h", hstateenh, read_hstateenh,
4628 write_hstateenh_1_3,
4629 .min_priv_ver = PRIV_VERSION_1_12_0 },
4630 [CSR_SSTATEEN0] = { "sstateen0", sstateen, read_sstateen, write_sstateen0,
4631 .min_priv_ver = PRIV_VERSION_1_12_0 },
4632 [CSR_SSTATEEN1] = { "sstateen1", sstateen, read_sstateen,
4633 write_sstateen_1_3,
4634 .min_priv_ver = PRIV_VERSION_1_12_0 },
4635 [CSR_SSTATEEN2] = { "sstateen2", sstateen, read_sstateen,
4636 write_sstateen_1_3,
4637 .min_priv_ver = PRIV_VERSION_1_12_0 },
4638 [CSR_SSTATEEN3] = { "sstateen3", sstateen, read_sstateen,
4639 write_sstateen_1_3,
4640 .min_priv_ver = PRIV_VERSION_1_12_0 },
4641
4642 /* Supervisor Trap Setup */
4643 [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus,
4644 NULL, read_sstatus_i128 },
4645 [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie },
4646 [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec },
4647 [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren,
4648 write_scounteren },
4649
4650 /* Supervisor Trap Handling */
4651 [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch,
4652 NULL, read_sscratch_i128, write_sscratch_i128 },
4653 [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc },
4654 [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause },
4655 [CSR_STVAL] = { "stval", smode, read_stval, write_stval },
4656 [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip },
4657 [CSR_STIMECMP] = { "stimecmp", sstc, read_stimecmp, write_stimecmp,
4658 .min_priv_ver = PRIV_VERSION_1_12_0 },
4659 [CSR_STIMECMPH] = { "stimecmph", sstc_32, read_stimecmph, write_stimecmph,
4660 .min_priv_ver = PRIV_VERSION_1_12_0 },
4661 [CSR_VSTIMECMP] = { "vstimecmp", sstc, read_vstimecmp,
4662 write_vstimecmp,
4663 .min_priv_ver = PRIV_VERSION_1_12_0 },
4664 [CSR_VSTIMECMPH] = { "vstimecmph", sstc_32, read_vstimecmph,
4665 write_vstimecmph,
4666 .min_priv_ver = PRIV_VERSION_1_12_0 },
4667
4668 /* Supervisor Protection and Translation */
4669 [CSR_SATP] = { "satp", satp, read_satp, write_satp },
4670
4671 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
4672 [CSR_SISELECT] = { "siselect", aia_smode, NULL, NULL, rmw_xiselect },
4673 [CSR_SIREG] = { "sireg", aia_smode, NULL, NULL, rmw_xireg },
4674
4675 /* Supervisor-Level Interrupts (AIA) */
4676 [CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei },
4677 [CSR_STOPI] = { "stopi", aia_smode, read_stopi },
4678
4679 /* Supervisor-Level High-Half CSRs (AIA) */
4680 [CSR_SIEH] = { "sieh", aia_smode32, NULL, NULL, rmw_sieh },
4681 [CSR_SIPH] = { "siph", aia_smode32, NULL, NULL, rmw_siph },
4682
4683 [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus,
4684 .min_priv_ver = PRIV_VERSION_1_12_0 },
4685 [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg,
4686 .min_priv_ver = PRIV_VERSION_1_12_0 },
4687 [CSR_HIDELEG] = { "hideleg", hmode, NULL, NULL, rmw_hideleg,
4688 .min_priv_ver = PRIV_VERSION_1_12_0 },
4689 [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip,
4690 .min_priv_ver = PRIV_VERSION_1_12_0 },
4691 [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip,
4692 .min_priv_ver = PRIV_VERSION_1_12_0 },
4693 [CSR_HIE] = { "hie", hmode, NULL, NULL, rmw_hie,
4694 .min_priv_ver = PRIV_VERSION_1_12_0 },
4695 [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren,
4696 write_hcounteren,
4697 .min_priv_ver = PRIV_VERSION_1_12_0 },
4698 [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie,
4699 .min_priv_ver = PRIV_VERSION_1_12_0 },
4700 [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval,
4701 .min_priv_ver = PRIV_VERSION_1_12_0 },
4702 [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst,
4703 .min_priv_ver = PRIV_VERSION_1_12_0 },
4704 [CSR_HGEIP] = { "hgeip", hmode, read_hgeip,
4705 .min_priv_ver = PRIV_VERSION_1_12_0 },
4706 [CSR_HGATP] = { "hgatp", hgatp, read_hgatp, write_hgatp,
4707 .min_priv_ver = PRIV_VERSION_1_12_0 },
4708 [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta,
4709 write_htimedelta,
4710 .min_priv_ver = PRIV_VERSION_1_12_0 },
4711 [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah,
4712 write_htimedeltah,
4713 .min_priv_ver = PRIV_VERSION_1_12_0 },
4714
4715 [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus,
4716 write_vsstatus,
4717 .min_priv_ver = PRIV_VERSION_1_12_0 },
4718 [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip,
4719 .min_priv_ver = PRIV_VERSION_1_12_0 },
4720 [CSR_VSIE] = { "vsie", hmode, NULL, NULL, rmw_vsie ,
4721 .min_priv_ver = PRIV_VERSION_1_12_0 },
4722 [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec,
4723 .min_priv_ver = PRIV_VERSION_1_12_0 },
4724 [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch,
4725 write_vsscratch,
4726 .min_priv_ver = PRIV_VERSION_1_12_0 },
4727 [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc,
4728 .min_priv_ver = PRIV_VERSION_1_12_0 },
4729 [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause,
4730 .min_priv_ver = PRIV_VERSION_1_12_0 },
4731 [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval,
4732 .min_priv_ver = PRIV_VERSION_1_12_0 },
4733 [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp,
4734 .min_priv_ver = PRIV_VERSION_1_12_0 },
4735
4736 [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2,
4737 .min_priv_ver = PRIV_VERSION_1_12_0 },
4738 [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst,
4739 .min_priv_ver = PRIV_VERSION_1_12_0 },
4740
4741 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
4742 [CSR_HVIEN] = { "hvien", aia_hmode, NULL, NULL, rmw_hvien },
4743 [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl,
4744 write_hvictl },
4745 [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1,
4746 write_hviprio1 },
4747 [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2,
4748 write_hviprio2 },
4749 /*
4750 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
4751 */
4752 [CSR_VSISELECT] = { "vsiselect", aia_hmode, NULL, NULL,
4753 rmw_xiselect },
4754 [CSR_VSIREG] = { "vsireg", aia_hmode, NULL, NULL, rmw_xireg },
4755
4756 /* VS-Level Interrupts (H-extension with AIA) */
4757 [CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei },
4758 [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi },
4759
4760 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
4761 [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL,
4762 rmw_hidelegh },
4763 [CSR_HVIENH] = { "hvienh", aia_hmode32, NULL, NULL, rmw_hvienh },
4764 [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph },
4765 [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h,
4766 write_hviprio1h },
4767 [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h,
4768 write_hviprio2h },
4769 [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh },
4770 [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph },
4771
4772 /* Physical Memory Protection */
4773 [CSR_MSECCFG] = { "mseccfg", have_mseccfg, read_mseccfg, write_mseccfg,
4774 .min_priv_ver = PRIV_VERSION_1_11_0 },
4775 [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg },
4776 [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg },
4777 [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg },
4778 [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg },
4779 [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr },
4780 [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr },
4781 [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr },
4782 [CSR_PMPADDR3] = { "pmpaddr3", pmp, read_pmpaddr, write_pmpaddr },
4783 [CSR_PMPADDR4] = { "pmpaddr4", pmp, read_pmpaddr, write_pmpaddr },
4784 [CSR_PMPADDR5] = { "pmpaddr5", pmp, read_pmpaddr, write_pmpaddr },
4785 [CSR_PMPADDR6] = { "pmpaddr6", pmp, read_pmpaddr, write_pmpaddr },
4786 [CSR_PMPADDR7] = { "pmpaddr7", pmp, read_pmpaddr, write_pmpaddr },
4787 [CSR_PMPADDR8] = { "pmpaddr8", pmp, read_pmpaddr, write_pmpaddr },
4788 [CSR_PMPADDR9] = { "pmpaddr9", pmp, read_pmpaddr, write_pmpaddr },
4789 [CSR_PMPADDR10] = { "pmpaddr10", pmp, read_pmpaddr, write_pmpaddr },
4790 [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr },
4791 [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr },
4792 [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr },
4793 [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
4794 [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
4795
4796 /* Debug CSRs */
4797 [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect },
4798 [CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata },
4799 [CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata },
4800 [CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata },
4801 [CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore },
4802
4803 /* User Pointer Masking */
4804 [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte },
4805 [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask,
4806 write_upmmask },
4807 [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase,
4808 write_upmbase },
4809 /* Machine Pointer Masking */
4810 [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte },
4811 [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask,
4812 write_mpmmask },
4813 [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase,
4814 write_mpmbase },
4815 /* Supervisor Pointer Masking */
4816 [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte },
4817 [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask,
4818 write_spmmask },
4819 [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase,
4820 write_spmbase },
4821
4822 /* Performance Counters */
4823 [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter },
4824 [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter },
4825 [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_hpmcounter },
4826 [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_hpmcounter },
4827 [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_hpmcounter },
4828 [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_hpmcounter },
4829 [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_hpmcounter },
4830 [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_hpmcounter },
4831 [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_hpmcounter },
4832 [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_hpmcounter },
4833 [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_hpmcounter },
4834 [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_hpmcounter },
4835 [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_hpmcounter },
4836 [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_hpmcounter },
4837 [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_hpmcounter },
4838 [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_hpmcounter },
4839 [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_hpmcounter },
4840 [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_hpmcounter },
4841 [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_hpmcounter },
4842 [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_hpmcounter },
4843 [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_hpmcounter },
4844 [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_hpmcounter },
4845 [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_hpmcounter },
4846 [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_hpmcounter },
4847 [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_hpmcounter },
4848 [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_hpmcounter },
4849 [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_hpmcounter },
4850 [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_hpmcounter },
4851 [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_hpmcounter },
4852
4853 [CSR_MHPMCOUNTER3] = { "mhpmcounter3", mctr, read_hpmcounter,
4854 write_mhpmcounter },
4855 [CSR_MHPMCOUNTER4] = { "mhpmcounter4", mctr, read_hpmcounter,
4856 write_mhpmcounter },
4857 [CSR_MHPMCOUNTER5] = { "mhpmcounter5", mctr, read_hpmcounter,
4858 write_mhpmcounter },
4859 [CSR_MHPMCOUNTER6] = { "mhpmcounter6", mctr, read_hpmcounter,
4860 write_mhpmcounter },
4861 [CSR_MHPMCOUNTER7] = { "mhpmcounter7", mctr, read_hpmcounter,
4862 write_mhpmcounter },
4863 [CSR_MHPMCOUNTER8] = { "mhpmcounter8", mctr, read_hpmcounter,
4864 write_mhpmcounter },
4865 [CSR_MHPMCOUNTER9] = { "mhpmcounter9", mctr, read_hpmcounter,
4866 write_mhpmcounter },
4867 [CSR_MHPMCOUNTER10] = { "mhpmcounter10", mctr, read_hpmcounter,
4868 write_mhpmcounter },
4869 [CSR_MHPMCOUNTER11] = { "mhpmcounter11", mctr, read_hpmcounter,
4870 write_mhpmcounter },
4871 [CSR_MHPMCOUNTER12] = { "mhpmcounter12", mctr, read_hpmcounter,
4872 write_mhpmcounter },
4873 [CSR_MHPMCOUNTER13] = { "mhpmcounter13", mctr, read_hpmcounter,
4874 write_mhpmcounter },
4875 [CSR_MHPMCOUNTER14] = { "mhpmcounter14", mctr, read_hpmcounter,
4876 write_mhpmcounter },
4877 [CSR_MHPMCOUNTER15] = { "mhpmcounter15", mctr, read_hpmcounter,
4878 write_mhpmcounter },
4879 [CSR_MHPMCOUNTER16] = { "mhpmcounter16", mctr, read_hpmcounter,
4880 write_mhpmcounter },
4881 [CSR_MHPMCOUNTER17] = { "mhpmcounter17", mctr, read_hpmcounter,
4882 write_mhpmcounter },
4883 [CSR_MHPMCOUNTER18] = { "mhpmcounter18", mctr, read_hpmcounter,
4884 write_mhpmcounter },
4885 [CSR_MHPMCOUNTER19] = { "mhpmcounter19", mctr, read_hpmcounter,
4886 write_mhpmcounter },
4887 [CSR_MHPMCOUNTER20] = { "mhpmcounter20", mctr, read_hpmcounter,
4888 write_mhpmcounter },
4889 [CSR_MHPMCOUNTER21] = { "mhpmcounter21", mctr, read_hpmcounter,
4890 write_mhpmcounter },
4891 [CSR_MHPMCOUNTER22] = { "mhpmcounter22", mctr, read_hpmcounter,
4892 write_mhpmcounter },
4893 [CSR_MHPMCOUNTER23] = { "mhpmcounter23", mctr, read_hpmcounter,
4894 write_mhpmcounter },
4895 [CSR_MHPMCOUNTER24] = { "mhpmcounter24", mctr, read_hpmcounter,
4896 write_mhpmcounter },
4897 [CSR_MHPMCOUNTER25] = { "mhpmcounter25", mctr, read_hpmcounter,
4898 write_mhpmcounter },
4899 [CSR_MHPMCOUNTER26] = { "mhpmcounter26", mctr, read_hpmcounter,
4900 write_mhpmcounter },
4901 [CSR_MHPMCOUNTER27] = { "mhpmcounter27", mctr, read_hpmcounter,
4902 write_mhpmcounter },
4903 [CSR_MHPMCOUNTER28] = { "mhpmcounter28", mctr, read_hpmcounter,
4904 write_mhpmcounter },
4905 [CSR_MHPMCOUNTER29] = { "mhpmcounter29", mctr, read_hpmcounter,
4906 write_mhpmcounter },
4907 [CSR_MHPMCOUNTER30] = { "mhpmcounter30", mctr, read_hpmcounter,
4908 write_mhpmcounter },
4909 [CSR_MHPMCOUNTER31] = { "mhpmcounter31", mctr, read_hpmcounter,
4910 write_mhpmcounter },
4911
4912 [CSR_MCOUNTINHIBIT] = { "mcountinhibit", any, read_mcountinhibit,
4913 write_mcountinhibit,
4914 .min_priv_ver = PRIV_VERSION_1_11_0 },
4915
4916 [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent,
4917 write_mhpmevent },
4918 [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent,
4919 write_mhpmevent },
4920 [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_mhpmevent,
4921 write_mhpmevent },
4922 [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_mhpmevent,
4923 write_mhpmevent },
4924 [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_mhpmevent,
4925 write_mhpmevent },
4926 [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_mhpmevent,
4927 write_mhpmevent },
4928 [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_mhpmevent,
4929 write_mhpmevent },
4930 [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_mhpmevent,
4931 write_mhpmevent },
4932 [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_mhpmevent,
4933 write_mhpmevent },
4934 [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_mhpmevent,
4935 write_mhpmevent },
4936 [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_mhpmevent,
4937 write_mhpmevent },
4938 [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_mhpmevent,
4939 write_mhpmevent },
4940 [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_mhpmevent,
4941 write_mhpmevent },
4942 [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_mhpmevent,
4943 write_mhpmevent },
4944 [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_mhpmevent,
4945 write_mhpmevent },
4946 [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_mhpmevent,
4947 write_mhpmevent },
4948 [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_mhpmevent,
4949 write_mhpmevent },
4950 [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_mhpmevent,
4951 write_mhpmevent },
4952 [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_mhpmevent,
4953 write_mhpmevent },
4954 [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_mhpmevent,
4955 write_mhpmevent },
4956 [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_mhpmevent,
4957 write_mhpmevent },
4958 [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_mhpmevent,
4959 write_mhpmevent },
4960 [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_mhpmevent,
4961 write_mhpmevent },
4962 [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_mhpmevent,
4963 write_mhpmevent },
4964 [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_mhpmevent,
4965 write_mhpmevent },
4966 [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_mhpmevent,
4967 write_mhpmevent },
4968 [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_mhpmevent,
4969 write_mhpmevent },
4970 [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_mhpmevent,
4971 write_mhpmevent },
4972 [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent,
4973 write_mhpmevent },
4974
4975 [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf, read_mhpmeventh,
4976 write_mhpmeventh,
4977 .min_priv_ver = PRIV_VERSION_1_12_0 },
4978 [CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf, read_mhpmeventh,
4979 write_mhpmeventh,
4980 .min_priv_ver = PRIV_VERSION_1_12_0 },
4981 [CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf, read_mhpmeventh,
4982 write_mhpmeventh,
4983 .min_priv_ver = PRIV_VERSION_1_12_0 },
4984 [CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf, read_mhpmeventh,
4985 write_mhpmeventh,
4986 .min_priv_ver = PRIV_VERSION_1_12_0 },
4987 [CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf, read_mhpmeventh,
4988 write_mhpmeventh,
4989 .min_priv_ver = PRIV_VERSION_1_12_0 },
4990 [CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf, read_mhpmeventh,
4991 write_mhpmeventh,
4992 .min_priv_ver = PRIV_VERSION_1_12_0 },
4993 [CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf, read_mhpmeventh,
4994 write_mhpmeventh,
4995 .min_priv_ver = PRIV_VERSION_1_12_0 },
4996 [CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf, read_mhpmeventh,
4997 write_mhpmeventh,
4998 .min_priv_ver = PRIV_VERSION_1_12_0 },
4999 [CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf, read_mhpmeventh,
5000 write_mhpmeventh,
5001 .min_priv_ver = PRIV_VERSION_1_12_0 },
5002 [CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf, read_mhpmeventh,
5003 write_mhpmeventh,
5004 .min_priv_ver = PRIV_VERSION_1_12_0 },
5005 [CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf, read_mhpmeventh,
5006 write_mhpmeventh,
5007 .min_priv_ver = PRIV_VERSION_1_12_0 },
5008 [CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf, read_mhpmeventh,
5009 write_mhpmeventh,
5010 .min_priv_ver = PRIV_VERSION_1_12_0 },
5011 [CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf, read_mhpmeventh,
5012 write_mhpmeventh,
5013 .min_priv_ver = PRIV_VERSION_1_12_0 },
5014 [CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf, read_mhpmeventh,
5015 write_mhpmeventh,
5016 .min_priv_ver = PRIV_VERSION_1_12_0 },
5017 [CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf, read_mhpmeventh,
5018 write_mhpmeventh,
5019 .min_priv_ver = PRIV_VERSION_1_12_0 },
5020 [CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf, read_mhpmeventh,
5021 write_mhpmeventh,
5022 .min_priv_ver = PRIV_VERSION_1_12_0 },
5023 [CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf, read_mhpmeventh,
5024 write_mhpmeventh,
5025 .min_priv_ver = PRIV_VERSION_1_12_0 },
5026 [CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf, read_mhpmeventh,
5027 write_mhpmeventh,
5028 .min_priv_ver = PRIV_VERSION_1_12_0 },
5029 [CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf, read_mhpmeventh,
5030 write_mhpmeventh,
5031 .min_priv_ver = PRIV_VERSION_1_12_0 },
5032 [CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf, read_mhpmeventh,
5033 write_mhpmeventh,
5034 .min_priv_ver = PRIV_VERSION_1_12_0 },
5035 [CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf, read_mhpmeventh,
5036 write_mhpmeventh,
5037 .min_priv_ver = PRIV_VERSION_1_12_0 },
5038 [CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf, read_mhpmeventh,
5039 write_mhpmeventh,
5040 .min_priv_ver = PRIV_VERSION_1_12_0 },
5041 [CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf, read_mhpmeventh,
5042 write_mhpmeventh,
5043 .min_priv_ver = PRIV_VERSION_1_12_0 },
5044 [CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf, read_mhpmeventh,
5045 write_mhpmeventh,
5046 .min_priv_ver = PRIV_VERSION_1_12_0 },
5047 [CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf, read_mhpmeventh,
5048 write_mhpmeventh,
5049 .min_priv_ver = PRIV_VERSION_1_12_0 },
5050 [CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf, read_mhpmeventh,
5051 write_mhpmeventh,
5052 .min_priv_ver = PRIV_VERSION_1_12_0 },
5053 [CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf, read_mhpmeventh,
5054 write_mhpmeventh,
5055 .min_priv_ver = PRIV_VERSION_1_12_0 },
5056 [CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf, read_mhpmeventh,
5057 write_mhpmeventh,
5058 .min_priv_ver = PRIV_VERSION_1_12_0 },
5059 [CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf, read_mhpmeventh,
5060 write_mhpmeventh,
5061 .min_priv_ver = PRIV_VERSION_1_12_0 },
5062
5063 [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_hpmcounterh },
5064 [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_hpmcounterh },
5065 [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_hpmcounterh },
5066 [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_hpmcounterh },
5067 [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_hpmcounterh },
5068 [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_hpmcounterh },
5069 [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_hpmcounterh },
5070 [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_hpmcounterh },
5071 [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_hpmcounterh },
5072 [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_hpmcounterh },
5073 [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_hpmcounterh },
5074 [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_hpmcounterh },
5075 [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_hpmcounterh },
5076 [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_hpmcounterh },
5077 [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_hpmcounterh },
5078 [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_hpmcounterh },
5079 [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_hpmcounterh },
5080 [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_hpmcounterh },
5081 [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_hpmcounterh },
5082 [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_hpmcounterh },
5083 [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_hpmcounterh },
5084 [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_hpmcounterh },
5085 [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_hpmcounterh },
5086 [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_hpmcounterh },
5087 [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_hpmcounterh },
5088 [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_hpmcounterh },
5089 [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_hpmcounterh },
5090 [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_hpmcounterh },
5091 [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_hpmcounterh },
5092
5093 [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", mctr32, read_hpmcounterh,
5094 write_mhpmcounterh },
5095 [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", mctr32, read_hpmcounterh,
5096 write_mhpmcounterh },
5097 [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", mctr32, read_hpmcounterh,
5098 write_mhpmcounterh },
5099 [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", mctr32, read_hpmcounterh,
5100 write_mhpmcounterh },
5101 [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", mctr32, read_hpmcounterh,
5102 write_mhpmcounterh },
5103 [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", mctr32, read_hpmcounterh,
5104 write_mhpmcounterh },
5105 [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", mctr32, read_hpmcounterh,
5106 write_mhpmcounterh },
5107 [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", mctr32, read_hpmcounterh,
5108 write_mhpmcounterh },
5109 [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", mctr32, read_hpmcounterh,
5110 write_mhpmcounterh },
5111 [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", mctr32, read_hpmcounterh,
5112 write_mhpmcounterh },
5113 [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", mctr32, read_hpmcounterh,
5114 write_mhpmcounterh },
5115 [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", mctr32, read_hpmcounterh,
5116 write_mhpmcounterh },
5117 [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", mctr32, read_hpmcounterh,
5118 write_mhpmcounterh },
5119 [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", mctr32, read_hpmcounterh,
5120 write_mhpmcounterh },
5121 [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", mctr32, read_hpmcounterh,
5122 write_mhpmcounterh },
5123 [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", mctr32, read_hpmcounterh,
5124 write_mhpmcounterh },
5125 [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", mctr32, read_hpmcounterh,
5126 write_mhpmcounterh },
5127 [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", mctr32, read_hpmcounterh,
5128 write_mhpmcounterh },
5129 [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", mctr32, read_hpmcounterh,
5130 write_mhpmcounterh },
5131 [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", mctr32, read_hpmcounterh,
5132 write_mhpmcounterh },
5133 [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", mctr32, read_hpmcounterh,
5134 write_mhpmcounterh },
5135 [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", mctr32, read_hpmcounterh,
5136 write_mhpmcounterh },
5137 [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", mctr32, read_hpmcounterh,
5138 write_mhpmcounterh },
5139 [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", mctr32, read_hpmcounterh,
5140 write_mhpmcounterh },
5141 [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", mctr32, read_hpmcounterh,
5142 write_mhpmcounterh },
5143 [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", mctr32, read_hpmcounterh,
5144 write_mhpmcounterh },
5145 [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", mctr32, read_hpmcounterh,
5146 write_mhpmcounterh },
5147 [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", mctr32, read_hpmcounterh,
5148 write_mhpmcounterh },
5149 [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", mctr32, read_hpmcounterh,
5150 write_mhpmcounterh },
5151 [CSR_SCOUNTOVF] = { "scountovf", sscofpmf, read_scountovf,
5152 .min_priv_ver = PRIV_VERSION_1_12_0 },
5153
5154 #endif /* !CONFIG_USER_ONLY */
5155 };
5156