1 /*
2 * RISC-V Control and Status Registers.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/timer.h"
23 #include "cpu.h"
24 #include "tcg/tcg-cpu.h"
25 #include "pmu.h"
26 #include "time_helper.h"
27 #include "exec/cputlb.h"
28 #include "exec/tb-flush.h"
29 #include "exec/icount.h"
30 #include "accel/tcg/getpc.h"
31 #include "qemu/guest-random.h"
32 #include "qapi/error.h"
33 #include "tcg/insn-start-words.h"
34 #include "internals.h"
35 #include <stdbool.h>
36
37 /* CSR function table public API */
riscv_get_csr_ops(int csrno,riscv_csr_operations * ops)38 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
39 {
40 *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)];
41 }
42
riscv_set_csr_ops(int csrno,const riscv_csr_operations * ops)43 void riscv_set_csr_ops(int csrno, const riscv_csr_operations *ops)
44 {
45 csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops;
46 }
47
48 /* Predicates */
49 #if !defined(CONFIG_USER_ONLY)
smstateen_acc_ok(CPURISCVState * env,int index,uint64_t bit)50 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit)
51 {
52 bool virt = env->virt_enabled;
53
54 if (env->priv == PRV_M || !riscv_cpu_cfg(env)->ext_smstateen) {
55 return RISCV_EXCP_NONE;
56 }
57
58 if (!(env->mstateen[index] & bit)) {
59 return RISCV_EXCP_ILLEGAL_INST;
60 }
61
62 if (virt) {
63 if (!(env->hstateen[index] & bit)) {
64 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
65 }
66
67 if (env->priv == PRV_U && !(env->sstateen[index] & bit)) {
68 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
69 }
70 }
71
72 if (env->priv == PRV_U && riscv_has_ext(env, RVS)) {
73 if (!(env->sstateen[index] & bit)) {
74 return RISCV_EXCP_ILLEGAL_INST;
75 }
76 }
77
78 return RISCV_EXCP_NONE;
79 }
80 #endif
81
fs(CPURISCVState * env,int csrno)82 static RISCVException fs(CPURISCVState *env, int csrno)
83 {
84 #if !defined(CONFIG_USER_ONLY)
85 if (!env->debugger && !riscv_cpu_fp_enabled(env) &&
86 !riscv_cpu_cfg(env)->ext_zfinx) {
87 return RISCV_EXCP_ILLEGAL_INST;
88 }
89
90 if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
91 return smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR);
92 }
93 #endif
94 return RISCV_EXCP_NONE;
95 }
96
vs(CPURISCVState * env,int csrno)97 static RISCVException vs(CPURISCVState *env, int csrno)
98 {
99 if (riscv_cpu_cfg(env)->ext_zve32x) {
100 #if !defined(CONFIG_USER_ONLY)
101 if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
102 return RISCV_EXCP_ILLEGAL_INST;
103 }
104 #endif
105 return RISCV_EXCP_NONE;
106 }
107 return RISCV_EXCP_ILLEGAL_INST;
108 }
109
ctr(CPURISCVState * env,int csrno)110 static RISCVException ctr(CPURISCVState *env, int csrno)
111 {
112 #if !defined(CONFIG_USER_ONLY)
113 RISCVCPU *cpu = env_archcpu(env);
114 int ctr_index;
115 target_ulong ctr_mask;
116 int base_csrno = CSR_CYCLE;
117 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
118
119 if (rv32 && csrno >= CSR_CYCLEH) {
120 /* Offset for RV32 hpmcounternh counters */
121 base_csrno += 0x80;
122 }
123 ctr_index = csrno - base_csrno;
124 ctr_mask = BIT(ctr_index);
125
126 if ((csrno >= CSR_CYCLE && csrno <= CSR_INSTRET) ||
127 (csrno >= CSR_CYCLEH && csrno <= CSR_INSTRETH)) {
128 if (!riscv_cpu_cfg(env)->ext_zicntr) {
129 return RISCV_EXCP_ILLEGAL_INST;
130 }
131
132 goto skip_ext_pmu_check;
133 }
134
135 if (!(cpu->pmu_avail_ctrs & ctr_mask)) {
136 /* No counter is enabled in PMU or the counter is out of range */
137 return RISCV_EXCP_ILLEGAL_INST;
138 }
139
140 skip_ext_pmu_check:
141
142 if (env->debugger) {
143 return RISCV_EXCP_NONE;
144 }
145
146 if (env->priv < PRV_M && !get_field(env->mcounteren, ctr_mask)) {
147 return RISCV_EXCP_ILLEGAL_INST;
148 }
149
150 if (env->virt_enabled) {
151 if (!get_field(env->hcounteren, ctr_mask) ||
152 (env->priv == PRV_U && !get_field(env->scounteren, ctr_mask))) {
153 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
154 }
155 }
156
157 if (riscv_has_ext(env, RVS) && env->priv == PRV_U &&
158 !get_field(env->scounteren, ctr_mask)) {
159 return RISCV_EXCP_ILLEGAL_INST;
160 }
161
162 #endif
163 return RISCV_EXCP_NONE;
164 }
165
ctr32(CPURISCVState * env,int csrno)166 static RISCVException ctr32(CPURISCVState *env, int csrno)
167 {
168 if (riscv_cpu_mxl(env) != MXL_RV32) {
169 return RISCV_EXCP_ILLEGAL_INST;
170 }
171
172 return ctr(env, csrno);
173 }
174
zcmt(CPURISCVState * env,int csrno)175 static RISCVException zcmt(CPURISCVState *env, int csrno)
176 {
177 if (!riscv_cpu_cfg(env)->ext_zcmt) {
178 return RISCV_EXCP_ILLEGAL_INST;
179 }
180
181 #if !defined(CONFIG_USER_ONLY)
182 RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_JVT);
183 if (ret != RISCV_EXCP_NONE) {
184 return ret;
185 }
186 #endif
187
188 return RISCV_EXCP_NONE;
189 }
190
cfi_ss(CPURISCVState * env,int csrno)191 static RISCVException cfi_ss(CPURISCVState *env, int csrno)
192 {
193 if (!env_archcpu(env)->cfg.ext_zicfiss) {
194 return RISCV_EXCP_ILLEGAL_INST;
195 }
196
197 /* If ext implemented, M-mode always have access to SSP CSR */
198 if (env->priv == PRV_M) {
199 return RISCV_EXCP_NONE;
200 }
201
202 /* if bcfi not active for current env, access to csr is illegal */
203 if (!cpu_get_bcfien(env)) {
204 #if !defined(CONFIG_USER_ONLY)
205 if (env->debugger) {
206 return RISCV_EXCP_NONE;
207 }
208 #endif
209 return RISCV_EXCP_ILLEGAL_INST;
210 }
211
212 return RISCV_EXCP_NONE;
213 }
214
215 #if !defined(CONFIG_USER_ONLY)
mctr(CPURISCVState * env,int csrno)216 static RISCVException mctr(CPURISCVState *env, int csrno)
217 {
218 RISCVCPU *cpu = env_archcpu(env);
219 uint32_t pmu_avail_ctrs = cpu->pmu_avail_ctrs;
220 int ctr_index;
221 int base_csrno = CSR_MHPMCOUNTER3;
222
223 if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) {
224 /* Offset for RV32 mhpmcounternh counters */
225 csrno -= 0x80;
226 }
227
228 g_assert(csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31);
229
230 ctr_index = csrno - base_csrno;
231 if ((BIT(ctr_index) & pmu_avail_ctrs >> 3) == 0) {
232 /* The PMU is not enabled or counter is out of range */
233 return RISCV_EXCP_ILLEGAL_INST;
234 }
235
236 return RISCV_EXCP_NONE;
237 }
238
mctr32(CPURISCVState * env,int csrno)239 static RISCVException mctr32(CPURISCVState *env, int csrno)
240 {
241 if (riscv_cpu_mxl(env) != MXL_RV32) {
242 return RISCV_EXCP_ILLEGAL_INST;
243 }
244
245 return mctr(env, csrno);
246 }
247
sscofpmf(CPURISCVState * env,int csrno)248 static RISCVException sscofpmf(CPURISCVState *env, int csrno)
249 {
250 if (!riscv_cpu_cfg(env)->ext_sscofpmf) {
251 return RISCV_EXCP_ILLEGAL_INST;
252 }
253
254 return RISCV_EXCP_NONE;
255 }
256
sscofpmf_32(CPURISCVState * env,int csrno)257 static RISCVException sscofpmf_32(CPURISCVState *env, int csrno)
258 {
259 if (riscv_cpu_mxl(env) != MXL_RV32) {
260 return RISCV_EXCP_ILLEGAL_INST;
261 }
262
263 return sscofpmf(env, csrno);
264 }
265
smcntrpmf(CPURISCVState * env,int csrno)266 static RISCVException smcntrpmf(CPURISCVState *env, int csrno)
267 {
268 if (!riscv_cpu_cfg(env)->ext_smcntrpmf) {
269 return RISCV_EXCP_ILLEGAL_INST;
270 }
271
272 return RISCV_EXCP_NONE;
273 }
274
smcntrpmf_32(CPURISCVState * env,int csrno)275 static RISCVException smcntrpmf_32(CPURISCVState *env, int csrno)
276 {
277 if (riscv_cpu_mxl(env) != MXL_RV32) {
278 return RISCV_EXCP_ILLEGAL_INST;
279 }
280
281 return smcntrpmf(env, csrno);
282 }
283
any(CPURISCVState * env,int csrno)284 static RISCVException any(CPURISCVState *env, int csrno)
285 {
286 return RISCV_EXCP_NONE;
287 }
288
any32(CPURISCVState * env,int csrno)289 static RISCVException any32(CPURISCVState *env, int csrno)
290 {
291 if (riscv_cpu_mxl(env) != MXL_RV32) {
292 return RISCV_EXCP_ILLEGAL_INST;
293 }
294
295 return any(env, csrno);
296
297 }
298
aia_any(CPURISCVState * env,int csrno)299 static RISCVException aia_any(CPURISCVState *env, int csrno)
300 {
301 if (!riscv_cpu_cfg(env)->ext_smaia) {
302 return RISCV_EXCP_ILLEGAL_INST;
303 }
304
305 return any(env, csrno);
306 }
307
aia_any32(CPURISCVState * env,int csrno)308 static RISCVException aia_any32(CPURISCVState *env, int csrno)
309 {
310 if (!riscv_cpu_cfg(env)->ext_smaia) {
311 return RISCV_EXCP_ILLEGAL_INST;
312 }
313
314 return any32(env, csrno);
315 }
316
csrind_any(CPURISCVState * env,int csrno)317 static RISCVException csrind_any(CPURISCVState *env, int csrno)
318 {
319 if (!riscv_cpu_cfg(env)->ext_smcsrind) {
320 return RISCV_EXCP_ILLEGAL_INST;
321 }
322
323 return RISCV_EXCP_NONE;
324 }
325
csrind_or_aia_any(CPURISCVState * env,int csrno)326 static RISCVException csrind_or_aia_any(CPURISCVState *env, int csrno)
327 {
328 if (!riscv_cpu_cfg(env)->ext_smaia && !riscv_cpu_cfg(env)->ext_smcsrind) {
329 return RISCV_EXCP_ILLEGAL_INST;
330 }
331
332 return any(env, csrno);
333 }
334
smode(CPURISCVState * env,int csrno)335 static RISCVException smode(CPURISCVState *env, int csrno)
336 {
337 if (riscv_has_ext(env, RVS)) {
338 return RISCV_EXCP_NONE;
339 }
340
341 return RISCV_EXCP_ILLEGAL_INST;
342 }
343
smode32(CPURISCVState * env,int csrno)344 static RISCVException smode32(CPURISCVState *env, int csrno)
345 {
346 if (riscv_cpu_mxl(env) != MXL_RV32) {
347 return RISCV_EXCP_ILLEGAL_INST;
348 }
349
350 return smode(env, csrno);
351 }
352
aia_smode(CPURISCVState * env,int csrno)353 static RISCVException aia_smode(CPURISCVState *env, int csrno)
354 {
355 int ret;
356
357 if (!riscv_cpu_cfg(env)->ext_ssaia) {
358 return RISCV_EXCP_ILLEGAL_INST;
359 }
360
361 if (csrno == CSR_STOPEI) {
362 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_IMSIC);
363 } else {
364 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
365 }
366
367 if (ret != RISCV_EXCP_NONE) {
368 return ret;
369 }
370
371 return smode(env, csrno);
372 }
373
aia_smode32(CPURISCVState * env,int csrno)374 static RISCVException aia_smode32(CPURISCVState *env, int csrno)
375 {
376 int ret;
377 int csr_priv = get_field(csrno, 0x300);
378
379 if (csr_priv == PRV_M && !riscv_cpu_cfg(env)->ext_smaia) {
380 return RISCV_EXCP_ILLEGAL_INST;
381 } else if (!riscv_cpu_cfg(env)->ext_ssaia) {
382 return RISCV_EXCP_ILLEGAL_INST;
383 }
384
385 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
386 if (ret != RISCV_EXCP_NONE) {
387 return ret;
388 }
389
390 return smode32(env, csrno);
391 }
392
scountinhibit_pred(CPURISCVState * env,int csrno)393 static RISCVException scountinhibit_pred(CPURISCVState *env, int csrno)
394 {
395 RISCVCPU *cpu = env_archcpu(env);
396
397 if (!cpu->cfg.ext_ssccfg || !cpu->cfg.ext_smcdeleg) {
398 return RISCV_EXCP_ILLEGAL_INST;
399 }
400
401 if (env->virt_enabled) {
402 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
403 }
404
405 return smode(env, csrno);
406 }
407
csrind_extensions_present(CPURISCVState * env)408 static bool csrind_extensions_present(CPURISCVState *env)
409 {
410 return riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind;
411 }
412
aia_extensions_present(CPURISCVState * env)413 static bool aia_extensions_present(CPURISCVState *env)
414 {
415 return riscv_cpu_cfg(env)->ext_smaia || riscv_cpu_cfg(env)->ext_ssaia;
416 }
417
csrind_or_aia_extensions_present(CPURISCVState * env)418 static bool csrind_or_aia_extensions_present(CPURISCVState *env)
419 {
420 return csrind_extensions_present(env) || aia_extensions_present(env);
421 }
422
csrind_smode(CPURISCVState * env,int csrno)423 static RISCVException csrind_smode(CPURISCVState *env, int csrno)
424 {
425 if (!csrind_extensions_present(env)) {
426 return RISCV_EXCP_ILLEGAL_INST;
427 }
428
429 return smode(env, csrno);
430 }
431
csrind_or_aia_smode(CPURISCVState * env,int csrno)432 static RISCVException csrind_or_aia_smode(CPURISCVState *env, int csrno)
433 {
434 if (!csrind_or_aia_extensions_present(env)) {
435 return RISCV_EXCP_ILLEGAL_INST;
436 }
437
438 return smode(env, csrno);
439 }
440
hmode(CPURISCVState * env,int csrno)441 static RISCVException hmode(CPURISCVState *env, int csrno)
442 {
443 if (riscv_has_ext(env, RVH)) {
444 return RISCV_EXCP_NONE;
445 }
446
447 return RISCV_EXCP_ILLEGAL_INST;
448 }
449
hmode32(CPURISCVState * env,int csrno)450 static RISCVException hmode32(CPURISCVState *env, int csrno)
451 {
452 if (riscv_cpu_mxl(env) != MXL_RV32) {
453 return RISCV_EXCP_ILLEGAL_INST;
454 }
455
456 return hmode(env, csrno);
457
458 }
459
csrind_hmode(CPURISCVState * env,int csrno)460 static RISCVException csrind_hmode(CPURISCVState *env, int csrno)
461 {
462 if (!csrind_extensions_present(env)) {
463 return RISCV_EXCP_ILLEGAL_INST;
464 }
465
466 return hmode(env, csrno);
467 }
468
csrind_or_aia_hmode(CPURISCVState * env,int csrno)469 static RISCVException csrind_or_aia_hmode(CPURISCVState *env, int csrno)
470 {
471 if (!csrind_or_aia_extensions_present(env)) {
472 return RISCV_EXCP_ILLEGAL_INST;
473 }
474
475 return hmode(env, csrno);
476 }
477
umode(CPURISCVState * env,int csrno)478 static RISCVException umode(CPURISCVState *env, int csrno)
479 {
480 if (riscv_has_ext(env, RVU)) {
481 return RISCV_EXCP_NONE;
482 }
483
484 return RISCV_EXCP_ILLEGAL_INST;
485 }
486
umode32(CPURISCVState * env,int csrno)487 static RISCVException umode32(CPURISCVState *env, int csrno)
488 {
489 if (riscv_cpu_mxl(env) != MXL_RV32) {
490 return RISCV_EXCP_ILLEGAL_INST;
491 }
492
493 return umode(env, csrno);
494 }
495
mstateen(CPURISCVState * env,int csrno)496 static RISCVException mstateen(CPURISCVState *env, int csrno)
497 {
498 if (!riscv_cpu_cfg(env)->ext_smstateen) {
499 return RISCV_EXCP_ILLEGAL_INST;
500 }
501
502 return any(env, csrno);
503 }
504
hstateen_pred(CPURISCVState * env,int csrno,int base)505 static RISCVException hstateen_pred(CPURISCVState *env, int csrno, int base)
506 {
507 if (!riscv_cpu_cfg(env)->ext_smstateen) {
508 return RISCV_EXCP_ILLEGAL_INST;
509 }
510
511 RISCVException ret = hmode(env, csrno);
512 if (ret != RISCV_EXCP_NONE) {
513 return ret;
514 }
515
516 if (env->debugger) {
517 return RISCV_EXCP_NONE;
518 }
519
520 if (env->priv < PRV_M) {
521 if (!(env->mstateen[csrno - base] & SMSTATEEN_STATEEN)) {
522 return RISCV_EXCP_ILLEGAL_INST;
523 }
524 }
525
526 return RISCV_EXCP_NONE;
527 }
528
hstateen(CPURISCVState * env,int csrno)529 static RISCVException hstateen(CPURISCVState *env, int csrno)
530 {
531 return hstateen_pred(env, csrno, CSR_HSTATEEN0);
532 }
533
hstateenh(CPURISCVState * env,int csrno)534 static RISCVException hstateenh(CPURISCVState *env, int csrno)
535 {
536 return hstateen_pred(env, csrno, CSR_HSTATEEN0H);
537 }
538
sstateen(CPURISCVState * env,int csrno)539 static RISCVException sstateen(CPURISCVState *env, int csrno)
540 {
541 bool virt = env->virt_enabled;
542 int index = csrno - CSR_SSTATEEN0;
543
544 if (!riscv_cpu_cfg(env)->ext_smstateen) {
545 return RISCV_EXCP_ILLEGAL_INST;
546 }
547
548 RISCVException ret = smode(env, csrno);
549 if (ret != RISCV_EXCP_NONE) {
550 return ret;
551 }
552
553 if (env->debugger) {
554 return RISCV_EXCP_NONE;
555 }
556
557 if (env->priv < PRV_M) {
558 if (!(env->mstateen[index] & SMSTATEEN_STATEEN)) {
559 return RISCV_EXCP_ILLEGAL_INST;
560 }
561
562 if (virt) {
563 if (!(env->hstateen[index] & SMSTATEEN_STATEEN)) {
564 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
565 }
566 }
567 }
568
569 return RISCV_EXCP_NONE;
570 }
571
sstc(CPURISCVState * env,int csrno)572 static RISCVException sstc(CPURISCVState *env, int csrno)
573 {
574 bool hmode_check = false;
575
576 if (!riscv_cpu_cfg(env)->ext_sstc || !env->rdtime_fn) {
577 return RISCV_EXCP_ILLEGAL_INST;
578 }
579
580 if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) {
581 hmode_check = true;
582 }
583
584 RISCVException ret = hmode_check ? hmode(env, csrno) : smode(env, csrno);
585 if (ret != RISCV_EXCP_NONE) {
586 return ret;
587 }
588
589 if (env->debugger) {
590 return RISCV_EXCP_NONE;
591 }
592
593 if (env->priv == PRV_M) {
594 return RISCV_EXCP_NONE;
595 }
596
597 /*
598 * No need of separate function for rv32 as menvcfg stores both menvcfg
599 * menvcfgh for RV32.
600 */
601 if (!(get_field(env->mcounteren, COUNTEREN_TM) &&
602 get_field(env->menvcfg, MENVCFG_STCE))) {
603 return RISCV_EXCP_ILLEGAL_INST;
604 }
605
606 if (env->virt_enabled) {
607 if (!(get_field(env->hcounteren, COUNTEREN_TM) &&
608 get_field(env->henvcfg, HENVCFG_STCE))) {
609 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
610 }
611 }
612
613 return RISCV_EXCP_NONE;
614 }
615
sstc_32(CPURISCVState * env,int csrno)616 static RISCVException sstc_32(CPURISCVState *env, int csrno)
617 {
618 if (riscv_cpu_mxl(env) != MXL_RV32) {
619 return RISCV_EXCP_ILLEGAL_INST;
620 }
621
622 return sstc(env, csrno);
623 }
624
satp(CPURISCVState * env,int csrno)625 static RISCVException satp(CPURISCVState *env, int csrno)
626 {
627 if (env->priv == PRV_S && !env->virt_enabled &&
628 get_field(env->mstatus, MSTATUS_TVM)) {
629 return RISCV_EXCP_ILLEGAL_INST;
630 }
631 if (env->priv == PRV_S && env->virt_enabled &&
632 get_field(env->hstatus, HSTATUS_VTVM)) {
633 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
634 }
635
636 return smode(env, csrno);
637 }
638
hgatp(CPURISCVState * env,int csrno)639 static RISCVException hgatp(CPURISCVState *env, int csrno)
640 {
641 if (env->priv == PRV_S && !env->virt_enabled &&
642 get_field(env->mstatus, MSTATUS_TVM)) {
643 return RISCV_EXCP_ILLEGAL_INST;
644 }
645
646 return hmode(env, csrno);
647 }
648
649 /*
650 * M-mode:
651 * Without ext_smctr raise illegal inst excep.
652 * Otherwise everything is accessible to m-mode.
653 *
654 * S-mode:
655 * Without ext_ssctr or mstateen.ctr raise illegal inst excep.
656 * Otherwise everything other than mctrctl is accessible.
657 *
658 * VS-mode:
659 * Without ext_ssctr or mstateen.ctr raise illegal inst excep.
660 * Without hstateen.ctr raise virtual illegal inst excep.
661 * Otherwise allow sctrctl (vsctrctl), sctrstatus, 0x200-0x2ff entry range.
662 * Always raise illegal instruction exception for sctrdepth.
663 */
ctr_mmode(CPURISCVState * env,int csrno)664 static RISCVException ctr_mmode(CPURISCVState *env, int csrno)
665 {
666 /* Check if smctr-ext is present */
667 if (riscv_cpu_cfg(env)->ext_smctr) {
668 return RISCV_EXCP_NONE;
669 }
670
671 return RISCV_EXCP_ILLEGAL_INST;
672 }
673
ctr_smode(CPURISCVState * env,int csrno)674 static RISCVException ctr_smode(CPURISCVState *env, int csrno)
675 {
676 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
677
678 if (!cfg->ext_smctr && !cfg->ext_ssctr) {
679 return RISCV_EXCP_ILLEGAL_INST;
680 }
681
682 RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_CTR);
683 if (ret == RISCV_EXCP_NONE && csrno == CSR_SCTRDEPTH &&
684 env->virt_enabled) {
685 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
686 }
687
688 return ret;
689 }
690
aia_hmode(CPURISCVState * env,int csrno)691 static RISCVException aia_hmode(CPURISCVState *env, int csrno)
692 {
693 int ret;
694
695 if (!riscv_cpu_cfg(env)->ext_ssaia) {
696 return RISCV_EXCP_ILLEGAL_INST;
697 }
698
699 if (csrno == CSR_VSTOPEI) {
700 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_IMSIC);
701 } else {
702 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
703 }
704
705 if (ret != RISCV_EXCP_NONE) {
706 return ret;
707 }
708
709 return hmode(env, csrno);
710 }
711
aia_hmode32(CPURISCVState * env,int csrno)712 static RISCVException aia_hmode32(CPURISCVState *env, int csrno)
713 {
714 int ret;
715
716 if (!riscv_cpu_cfg(env)->ext_ssaia) {
717 return RISCV_EXCP_ILLEGAL_INST;
718 }
719
720 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
721 if (ret != RISCV_EXCP_NONE) {
722 return ret;
723 }
724
725 if (!riscv_cpu_cfg(env)->ext_ssaia) {
726 return RISCV_EXCP_ILLEGAL_INST;
727 }
728
729 return hmode32(env, csrno);
730 }
731
dbltrp_hmode(CPURISCVState * env,int csrno)732 static RISCVException dbltrp_hmode(CPURISCVState *env, int csrno)
733 {
734 if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
735 return RISCV_EXCP_NONE;
736 }
737
738 return hmode(env, csrno);
739 }
740
pmp(CPURISCVState * env,int csrno)741 static RISCVException pmp(CPURISCVState *env, int csrno)
742 {
743 if (riscv_cpu_cfg(env)->pmp) {
744 int max_pmpcfg = (env->priv_ver >= PRIV_VERSION_1_12_0) ?
745 + CSR_PMPCFG15 : CSR_PMPCFG3;
746
747 if (csrno <= max_pmpcfg) {
748 uint32_t reg_index = csrno - CSR_PMPCFG0;
749
750 /* TODO: RV128 restriction check */
751 if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) {
752 return RISCV_EXCP_ILLEGAL_INST;
753 }
754 }
755
756 return RISCV_EXCP_NONE;
757 }
758
759 return RISCV_EXCP_ILLEGAL_INST;
760 }
761
have_mseccfg(CPURISCVState * env,int csrno)762 static RISCVException have_mseccfg(CPURISCVState *env, int csrno)
763 {
764 if (riscv_cpu_cfg(env)->ext_smepmp) {
765 return RISCV_EXCP_NONE;
766 }
767 if (riscv_cpu_cfg(env)->ext_zkr) {
768 return RISCV_EXCP_NONE;
769 }
770 if (riscv_cpu_cfg(env)->ext_smmpm) {
771 return RISCV_EXCP_NONE;
772 }
773
774 return RISCV_EXCP_ILLEGAL_INST;
775 }
776
debug(CPURISCVState * env,int csrno)777 static RISCVException debug(CPURISCVState *env, int csrno)
778 {
779 if (riscv_cpu_cfg(env)->debug) {
780 return RISCV_EXCP_NONE;
781 }
782
783 return RISCV_EXCP_ILLEGAL_INST;
784 }
785
rnmi(CPURISCVState * env,int csrno)786 static RISCVException rnmi(CPURISCVState *env, int csrno)
787 {
788 RISCVCPU *cpu = env_archcpu(env);
789
790 if (cpu->cfg.ext_smrnmi) {
791 return RISCV_EXCP_NONE;
792 }
793
794 return RISCV_EXCP_ILLEGAL_INST;
795 }
796 #endif
797
seed(CPURISCVState * env,int csrno)798 static RISCVException seed(CPURISCVState *env, int csrno)
799 {
800 if (!riscv_cpu_cfg(env)->ext_zkr) {
801 return RISCV_EXCP_ILLEGAL_INST;
802 }
803
804 #if !defined(CONFIG_USER_ONLY)
805 if (env->debugger) {
806 return RISCV_EXCP_NONE;
807 }
808
809 /*
810 * With a CSR read-write instruction:
811 * 1) The seed CSR is always available in machine mode as normal.
812 * 2) Attempted access to seed from virtual modes VS and VU always raises
813 * an exception(virtual instruction exception only if mseccfg.sseed=1).
814 * 3) Without the corresponding access control bit set to 1, any attempted
815 * access to seed from U, S or HS modes will raise an illegal instruction
816 * exception.
817 */
818 if (env->priv == PRV_M) {
819 return RISCV_EXCP_NONE;
820 } else if (env->virt_enabled) {
821 if (env->mseccfg & MSECCFG_SSEED) {
822 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
823 } else {
824 return RISCV_EXCP_ILLEGAL_INST;
825 }
826 } else {
827 if (env->priv == PRV_S && (env->mseccfg & MSECCFG_SSEED)) {
828 return RISCV_EXCP_NONE;
829 } else if (env->priv == PRV_U && (env->mseccfg & MSECCFG_USEED)) {
830 return RISCV_EXCP_NONE;
831 } else {
832 return RISCV_EXCP_ILLEGAL_INST;
833 }
834 }
835 #else
836 return RISCV_EXCP_NONE;
837 #endif
838 }
839
840 /* zicfiss CSR_SSP read and write */
read_ssp(CPURISCVState * env,int csrno,target_ulong * val)841 static RISCVException read_ssp(CPURISCVState *env, int csrno,
842 target_ulong *val)
843 {
844 *val = env->ssp;
845 return RISCV_EXCP_NONE;
846 }
847
write_ssp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)848 static RISCVException write_ssp(CPURISCVState *env, int csrno,
849 target_ulong val, uintptr_t ra)
850 {
851 env->ssp = val;
852 return RISCV_EXCP_NONE;
853 }
854
855 /* User Floating-Point CSRs */
read_fflags(CPURISCVState * env,int csrno,target_ulong * val)856 static RISCVException read_fflags(CPURISCVState *env, int csrno,
857 target_ulong *val)
858 {
859 *val = riscv_cpu_get_fflags(env);
860 return RISCV_EXCP_NONE;
861 }
862
write_fflags(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)863 static RISCVException write_fflags(CPURISCVState *env, int csrno,
864 target_ulong val, uintptr_t ra)
865 {
866 #if !defined(CONFIG_USER_ONLY)
867 if (riscv_has_ext(env, RVF)) {
868 env->mstatus |= MSTATUS_FS;
869 }
870 #endif
871 riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT));
872 return RISCV_EXCP_NONE;
873 }
874
read_frm(CPURISCVState * env,int csrno,target_ulong * val)875 static RISCVException read_frm(CPURISCVState *env, int csrno,
876 target_ulong *val)
877 {
878 *val = env->frm;
879 return RISCV_EXCP_NONE;
880 }
881
write_frm(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)882 static RISCVException write_frm(CPURISCVState *env, int csrno,
883 target_ulong val, uintptr_t ra)
884 {
885 #if !defined(CONFIG_USER_ONLY)
886 if (riscv_has_ext(env, RVF)) {
887 env->mstatus |= MSTATUS_FS;
888 }
889 #endif
890 env->frm = val & (FSR_RD >> FSR_RD_SHIFT);
891 return RISCV_EXCP_NONE;
892 }
893
read_fcsr(CPURISCVState * env,int csrno,target_ulong * val)894 static RISCVException read_fcsr(CPURISCVState *env, int csrno,
895 target_ulong *val)
896 {
897 *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT)
898 | (env->frm << FSR_RD_SHIFT);
899 return RISCV_EXCP_NONE;
900 }
901
write_fcsr(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)902 static RISCVException write_fcsr(CPURISCVState *env, int csrno,
903 target_ulong val, uintptr_t ra)
904 {
905 #if !defined(CONFIG_USER_ONLY)
906 if (riscv_has_ext(env, RVF)) {
907 env->mstatus |= MSTATUS_FS;
908 }
909 #endif
910 env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
911 riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
912 return RISCV_EXCP_NONE;
913 }
914
read_vtype(CPURISCVState * env,int csrno,target_ulong * val)915 static RISCVException read_vtype(CPURISCVState *env, int csrno,
916 target_ulong *val)
917 {
918 uint64_t vill;
919 switch (env->xl) {
920 case MXL_RV32:
921 vill = (uint32_t)env->vill << 31;
922 break;
923 case MXL_RV64:
924 vill = (uint64_t)env->vill << 63;
925 break;
926 default:
927 g_assert_not_reached();
928 }
929 *val = (target_ulong)vill | env->vtype;
930 return RISCV_EXCP_NONE;
931 }
932
read_vl(CPURISCVState * env,int csrno,target_ulong * val)933 static RISCVException read_vl(CPURISCVState *env, int csrno,
934 target_ulong *val)
935 {
936 *val = env->vl;
937 return RISCV_EXCP_NONE;
938 }
939
read_vlenb(CPURISCVState * env,int csrno,target_ulong * val)940 static RISCVException read_vlenb(CPURISCVState *env, int csrno,
941 target_ulong *val)
942 {
943 *val = riscv_cpu_cfg(env)->vlenb;
944 return RISCV_EXCP_NONE;
945 }
946
read_vxrm(CPURISCVState * env,int csrno,target_ulong * val)947 static RISCVException read_vxrm(CPURISCVState *env, int csrno,
948 target_ulong *val)
949 {
950 *val = env->vxrm;
951 return RISCV_EXCP_NONE;
952 }
953
write_vxrm(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)954 static RISCVException write_vxrm(CPURISCVState *env, int csrno,
955 target_ulong val, uintptr_t ra)
956 {
957 #if !defined(CONFIG_USER_ONLY)
958 env->mstatus |= MSTATUS_VS;
959 #endif
960 env->vxrm = val;
961 return RISCV_EXCP_NONE;
962 }
963
read_vxsat(CPURISCVState * env,int csrno,target_ulong * val)964 static RISCVException read_vxsat(CPURISCVState *env, int csrno,
965 target_ulong *val)
966 {
967 *val = env->vxsat & BIT(0);
968 return RISCV_EXCP_NONE;
969 }
970
write_vxsat(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)971 static RISCVException write_vxsat(CPURISCVState *env, int csrno,
972 target_ulong val, uintptr_t ra)
973 {
974 #if !defined(CONFIG_USER_ONLY)
975 env->mstatus |= MSTATUS_VS;
976 #endif
977 env->vxsat = val & BIT(0);
978 return RISCV_EXCP_NONE;
979 }
980
read_vstart(CPURISCVState * env,int csrno,target_ulong * val)981 static RISCVException read_vstart(CPURISCVState *env, int csrno,
982 target_ulong *val)
983 {
984 *val = env->vstart;
985 return RISCV_EXCP_NONE;
986 }
987
write_vstart(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)988 static RISCVException write_vstart(CPURISCVState *env, int csrno,
989 target_ulong val, uintptr_t ra)
990 {
991 #if !defined(CONFIG_USER_ONLY)
992 env->mstatus |= MSTATUS_VS;
993 #endif
994 /*
995 * The vstart CSR is defined to have only enough writable bits
996 * to hold the largest element index, i.e. lg2(VLEN) bits.
997 */
998 env->vstart = val & ~(~0ULL << ctzl(riscv_cpu_cfg(env)->vlenb << 3));
999 return RISCV_EXCP_NONE;
1000 }
1001
read_vcsr(CPURISCVState * env,int csrno,target_ulong * val)1002 static RISCVException read_vcsr(CPURISCVState *env, int csrno,
1003 target_ulong *val)
1004 {
1005 *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT);
1006 return RISCV_EXCP_NONE;
1007 }
1008
write_vcsr(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1009 static RISCVException write_vcsr(CPURISCVState *env, int csrno,
1010 target_ulong val, uintptr_t ra)
1011 {
1012 #if !defined(CONFIG_USER_ONLY)
1013 env->mstatus |= MSTATUS_VS;
1014 #endif
1015 env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT;
1016 env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT;
1017 return RISCV_EXCP_NONE;
1018 }
1019
1020 #if defined(CONFIG_USER_ONLY)
1021 /* User Timers and Counters */
get_ticks(bool shift)1022 static target_ulong get_ticks(bool shift)
1023 {
1024 int64_t val = cpu_get_host_ticks();
1025 target_ulong result = shift ? val >> 32 : val;
1026
1027 return result;
1028 }
1029
read_time(CPURISCVState * env,int csrno,target_ulong * val)1030 static RISCVException read_time(CPURISCVState *env, int csrno,
1031 target_ulong *val)
1032 {
1033 *val = cpu_get_host_ticks();
1034 return RISCV_EXCP_NONE;
1035 }
1036
read_timeh(CPURISCVState * env,int csrno,target_ulong * val)1037 static RISCVException read_timeh(CPURISCVState *env, int csrno,
1038 target_ulong *val)
1039 {
1040 *val = cpu_get_host_ticks() >> 32;
1041 return RISCV_EXCP_NONE;
1042 }
1043
read_hpmcounter(CPURISCVState * env,int csrno,target_ulong * val)1044 static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
1045 target_ulong *val)
1046 {
1047 *val = get_ticks(false);
1048 return RISCV_EXCP_NONE;
1049 }
1050
read_hpmcounterh(CPURISCVState * env,int csrno,target_ulong * val)1051 static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
1052 target_ulong *val)
1053 {
1054 *val = get_ticks(true);
1055 return RISCV_EXCP_NONE;
1056 }
1057
1058 #else /* CONFIG_USER_ONLY */
1059
read_mcyclecfg(CPURISCVState * env,int csrno,target_ulong * val)1060 static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno,
1061 target_ulong *val)
1062 {
1063 *val = env->mcyclecfg;
1064 return RISCV_EXCP_NONE;
1065 }
1066
write_mcyclecfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1067 static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno,
1068 target_ulong val, uintptr_t ra)
1069 {
1070 uint64_t inh_avail_mask;
1071
1072 if (riscv_cpu_mxl(env) == MXL_RV32) {
1073 env->mcyclecfg = val;
1074 } else {
1075 /* Set xINH fields if priv mode supported */
1076 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MCYCLECFG_BIT_MINH;
1077 inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFG_BIT_UINH : 0;
1078 inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFG_BIT_SINH : 0;
1079 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1080 riscv_has_ext(env, RVU)) ? MCYCLECFG_BIT_VUINH : 0;
1081 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1082 riscv_has_ext(env, RVS)) ? MCYCLECFG_BIT_VSINH : 0;
1083 env->mcyclecfg = val & inh_avail_mask;
1084 }
1085
1086 return RISCV_EXCP_NONE;
1087 }
1088
read_mcyclecfgh(CPURISCVState * env,int csrno,target_ulong * val)1089 static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno,
1090 target_ulong *val)
1091 {
1092 *val = env->mcyclecfgh;
1093 return RISCV_EXCP_NONE;
1094 }
1095
write_mcyclecfgh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1096 static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno,
1097 target_ulong val, uintptr_t ra)
1098 {
1099 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
1100 MCYCLECFGH_BIT_MINH);
1101
1102 /* Set xINH fields if priv mode supported */
1103 inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFGH_BIT_UINH : 0;
1104 inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFGH_BIT_SINH : 0;
1105 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1106 riscv_has_ext(env, RVU)) ? MCYCLECFGH_BIT_VUINH : 0;
1107 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1108 riscv_has_ext(env, RVS)) ? MCYCLECFGH_BIT_VSINH : 0;
1109
1110 env->mcyclecfgh = val & inh_avail_mask;
1111 return RISCV_EXCP_NONE;
1112 }
1113
read_minstretcfg(CPURISCVState * env,int csrno,target_ulong * val)1114 static RISCVException read_minstretcfg(CPURISCVState *env, int csrno,
1115 target_ulong *val)
1116 {
1117 *val = env->minstretcfg;
1118 return RISCV_EXCP_NONE;
1119 }
1120
write_minstretcfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1121 static RISCVException write_minstretcfg(CPURISCVState *env, int csrno,
1122 target_ulong val, uintptr_t ra)
1123 {
1124 uint64_t inh_avail_mask;
1125
1126 if (riscv_cpu_mxl(env) == MXL_RV32) {
1127 env->minstretcfg = val;
1128 } else {
1129 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MINSTRETCFG_BIT_MINH;
1130 inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFG_BIT_UINH : 0;
1131 inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFG_BIT_SINH : 0;
1132 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1133 riscv_has_ext(env, RVU)) ? MINSTRETCFG_BIT_VUINH : 0;
1134 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1135 riscv_has_ext(env, RVS)) ? MINSTRETCFG_BIT_VSINH : 0;
1136 env->minstretcfg = val & inh_avail_mask;
1137 }
1138 return RISCV_EXCP_NONE;
1139 }
1140
read_minstretcfgh(CPURISCVState * env,int csrno,target_ulong * val)1141 static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno,
1142 target_ulong *val)
1143 {
1144 *val = env->minstretcfgh;
1145 return RISCV_EXCP_NONE;
1146 }
1147
write_minstretcfgh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1148 static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno,
1149 target_ulong val, uintptr_t ra)
1150 {
1151 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
1152 MINSTRETCFGH_BIT_MINH);
1153
1154 inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFGH_BIT_UINH : 0;
1155 inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFGH_BIT_SINH : 0;
1156 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1157 riscv_has_ext(env, RVU)) ? MINSTRETCFGH_BIT_VUINH : 0;
1158 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1159 riscv_has_ext(env, RVS)) ? MINSTRETCFGH_BIT_VSINH : 0;
1160
1161 env->minstretcfgh = val & inh_avail_mask;
1162 return RISCV_EXCP_NONE;
1163 }
1164
read_mhpmevent(CPURISCVState * env,int csrno,target_ulong * val)1165 static RISCVException read_mhpmevent(CPURISCVState *env, int csrno,
1166 target_ulong *val)
1167 {
1168 int evt_index = csrno - CSR_MCOUNTINHIBIT;
1169
1170 *val = env->mhpmevent_val[evt_index];
1171
1172 return RISCV_EXCP_NONE;
1173 }
1174
write_mhpmevent(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1175 static RISCVException write_mhpmevent(CPURISCVState *env, int csrno,
1176 target_ulong val, uintptr_t ra)
1177 {
1178 int evt_index = csrno - CSR_MCOUNTINHIBIT;
1179 uint64_t mhpmevt_val = val;
1180 uint64_t inh_avail_mask;
1181
1182 if (riscv_cpu_mxl(env) == MXL_RV32) {
1183 env->mhpmevent_val[evt_index] = val;
1184 mhpmevt_val = mhpmevt_val |
1185 ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
1186 } else {
1187 inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MHPMEVENT_BIT_MINH;
1188 inh_avail_mask |= riscv_has_ext(env, RVU) ? MHPMEVENT_BIT_UINH : 0;
1189 inh_avail_mask |= riscv_has_ext(env, RVS) ? MHPMEVENT_BIT_SINH : 0;
1190 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1191 riscv_has_ext(env, RVU)) ? MHPMEVENT_BIT_VUINH : 0;
1192 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1193 riscv_has_ext(env, RVS)) ? MHPMEVENT_BIT_VSINH : 0;
1194 mhpmevt_val = val & inh_avail_mask;
1195 env->mhpmevent_val[evt_index] = mhpmevt_val;
1196 }
1197
1198 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1199
1200 return RISCV_EXCP_NONE;
1201 }
1202
read_mhpmeventh(CPURISCVState * env,int csrno,target_ulong * val)1203 static RISCVException read_mhpmeventh(CPURISCVState *env, int csrno,
1204 target_ulong *val)
1205 {
1206 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
1207
1208 *val = env->mhpmeventh_val[evt_index];
1209
1210 return RISCV_EXCP_NONE;
1211 }
1212
write_mhpmeventh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1213 static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno,
1214 target_ulong val, uintptr_t ra)
1215 {
1216 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
1217 uint64_t mhpmevth_val;
1218 uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
1219 target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK |
1220 MHPMEVENTH_BIT_MINH);
1221
1222 inh_avail_mask |= riscv_has_ext(env, RVU) ? MHPMEVENTH_BIT_UINH : 0;
1223 inh_avail_mask |= riscv_has_ext(env, RVS) ? MHPMEVENTH_BIT_SINH : 0;
1224 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1225 riscv_has_ext(env, RVU)) ? MHPMEVENTH_BIT_VUINH : 0;
1226 inh_avail_mask |= (riscv_has_ext(env, RVH) &&
1227 riscv_has_ext(env, RVS)) ? MHPMEVENTH_BIT_VSINH : 0;
1228
1229 mhpmevth_val = val & inh_avail_mask;
1230 mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
1231 env->mhpmeventh_val[evt_index] = mhpmevth_val;
1232
1233 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1234
1235 return RISCV_EXCP_NONE;
1236 }
1237
riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState * env,int counter_idx,bool upper_half)1238 static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env,
1239 int counter_idx,
1240 bool upper_half)
1241 {
1242 int inst = riscv_pmu_ctr_monitor_instructions(env, counter_idx);
1243 uint64_t *counter_arr_virt = env->pmu_fixed_ctrs[inst].counter_virt;
1244 uint64_t *counter_arr = env->pmu_fixed_ctrs[inst].counter;
1245 target_ulong result = 0;
1246 uint64_t curr_val = 0;
1247 uint64_t cfg_val = 0;
1248
1249 if (counter_idx == 0) {
1250 cfg_val = upper_half ? ((uint64_t)env->mcyclecfgh << 32) :
1251 env->mcyclecfg;
1252 } else if (counter_idx == 2) {
1253 cfg_val = upper_half ? ((uint64_t)env->minstretcfgh << 32) :
1254 env->minstretcfg;
1255 } else {
1256 cfg_val = upper_half ?
1257 ((uint64_t)env->mhpmeventh_val[counter_idx] << 32) :
1258 env->mhpmevent_val[counter_idx];
1259 cfg_val &= MHPMEVENT_FILTER_MASK;
1260 }
1261
1262 if (!cfg_val) {
1263 if (icount_enabled()) {
1264 curr_val = inst ? icount_get_raw() : icount_get();
1265 } else {
1266 curr_val = cpu_get_host_ticks();
1267 }
1268
1269 goto done;
1270 }
1271
1272 /* Update counter before reading. */
1273 riscv_pmu_update_fixed_ctrs(env, env->priv, env->virt_enabled);
1274
1275 if (!(cfg_val & MCYCLECFG_BIT_MINH)) {
1276 curr_val += counter_arr[PRV_M];
1277 }
1278
1279 if (!(cfg_val & MCYCLECFG_BIT_SINH)) {
1280 curr_val += counter_arr[PRV_S];
1281 }
1282
1283 if (!(cfg_val & MCYCLECFG_BIT_UINH)) {
1284 curr_val += counter_arr[PRV_U];
1285 }
1286
1287 if (!(cfg_val & MCYCLECFG_BIT_VSINH)) {
1288 curr_val += counter_arr_virt[PRV_S];
1289 }
1290
1291 if (!(cfg_val & MCYCLECFG_BIT_VUINH)) {
1292 curr_val += counter_arr_virt[PRV_U];
1293 }
1294
1295 done:
1296 if (riscv_cpu_mxl(env) == MXL_RV32) {
1297 result = upper_half ? curr_val >> 32 : curr_val;
1298 } else {
1299 result = curr_val;
1300 }
1301
1302 return result;
1303 }
1304
riscv_pmu_write_ctr(CPURISCVState * env,target_ulong val,uint32_t ctr_idx)1305 static RISCVException riscv_pmu_write_ctr(CPURISCVState *env, target_ulong val,
1306 uint32_t ctr_idx)
1307 {
1308 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1309 uint64_t mhpmctr_val = val;
1310
1311 counter->mhpmcounter_val = val;
1312 if (!get_field(env->mcountinhibit, BIT(ctr_idx)) &&
1313 (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1314 riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) {
1315 counter->mhpmcounter_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
1316 ctr_idx, false);
1317 if (ctr_idx > 2) {
1318 if (riscv_cpu_mxl(env) == MXL_RV32) {
1319 mhpmctr_val = mhpmctr_val |
1320 ((uint64_t)counter->mhpmcounterh_val << 32);
1321 }
1322 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
1323 }
1324 } else {
1325 /* Other counters can keep incrementing from the given value */
1326 counter->mhpmcounter_prev = val;
1327 }
1328
1329 return RISCV_EXCP_NONE;
1330 }
1331
riscv_pmu_write_ctrh(CPURISCVState * env,target_ulong val,uint32_t ctr_idx)1332 static RISCVException riscv_pmu_write_ctrh(CPURISCVState *env, target_ulong val,
1333 uint32_t ctr_idx)
1334 {
1335 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1336 uint64_t mhpmctr_val = counter->mhpmcounter_val;
1337 uint64_t mhpmctrh_val = val;
1338
1339 counter->mhpmcounterh_val = val;
1340 mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
1341 if (!get_field(env->mcountinhibit, BIT(ctr_idx)) &&
1342 (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1343 riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) {
1344 counter->mhpmcounterh_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
1345 ctr_idx, true);
1346 if (ctr_idx > 2) {
1347 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
1348 }
1349 } else {
1350 counter->mhpmcounterh_prev = val;
1351 }
1352
1353 return RISCV_EXCP_NONE;
1354 }
1355
write_mhpmcounter(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1356 static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
1357 target_ulong val, uintptr_t ra)
1358 {
1359 int ctr_idx = csrno - CSR_MCYCLE;
1360
1361 return riscv_pmu_write_ctr(env, val, ctr_idx);
1362 }
1363
write_mhpmcounterh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1364 static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
1365 target_ulong val, uintptr_t ra)
1366 {
1367 int ctr_idx = csrno - CSR_MCYCLEH;
1368
1369 return riscv_pmu_write_ctrh(env, val, ctr_idx);
1370 }
1371
riscv_pmu_read_ctr(CPURISCVState * env,target_ulong * val,bool upper_half,uint32_t ctr_idx)1372 RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
1373 bool upper_half, uint32_t ctr_idx)
1374 {
1375 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
1376 target_ulong ctr_prev = upper_half ? counter->mhpmcounterh_prev :
1377 counter->mhpmcounter_prev;
1378 target_ulong ctr_val = upper_half ? counter->mhpmcounterh_val :
1379 counter->mhpmcounter_val;
1380
1381 if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
1382 /*
1383 * Counter should not increment if inhibit bit is set. Just return the
1384 * current counter value.
1385 */
1386 *val = ctr_val;
1387 return RISCV_EXCP_NONE;
1388 }
1389
1390 /*
1391 * The kernel computes the perf delta by subtracting the current value from
1392 * the value it initialized previously (ctr_val).
1393 */
1394 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
1395 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
1396 *val = riscv_pmu_ctr_get_fixed_counters_val(env, ctr_idx, upper_half) -
1397 ctr_prev + ctr_val;
1398 } else {
1399 *val = ctr_val;
1400 }
1401
1402 return RISCV_EXCP_NONE;
1403 }
1404
read_hpmcounter(CPURISCVState * env,int csrno,target_ulong * val)1405 static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
1406 target_ulong *val)
1407 {
1408 uint16_t ctr_index;
1409
1410 if (csrno >= CSR_MCYCLE && csrno <= CSR_MHPMCOUNTER31) {
1411 ctr_index = csrno - CSR_MCYCLE;
1412 } else if (csrno >= CSR_CYCLE && csrno <= CSR_HPMCOUNTER31) {
1413 ctr_index = csrno - CSR_CYCLE;
1414 } else {
1415 return RISCV_EXCP_ILLEGAL_INST;
1416 }
1417
1418 return riscv_pmu_read_ctr(env, val, false, ctr_index);
1419 }
1420
read_hpmcounterh(CPURISCVState * env,int csrno,target_ulong * val)1421 static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
1422 target_ulong *val)
1423 {
1424 uint16_t ctr_index;
1425
1426 if (csrno >= CSR_MCYCLEH && csrno <= CSR_MHPMCOUNTER31H) {
1427 ctr_index = csrno - CSR_MCYCLEH;
1428 } else if (csrno >= CSR_CYCLEH && csrno <= CSR_HPMCOUNTER31H) {
1429 ctr_index = csrno - CSR_CYCLEH;
1430 } else {
1431 return RISCV_EXCP_ILLEGAL_INST;
1432 }
1433
1434 return riscv_pmu_read_ctr(env, val, true, ctr_index);
1435 }
1436
rmw_cd_mhpmcounter(CPURISCVState * env,int ctr_idx,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1437 static int rmw_cd_mhpmcounter(CPURISCVState *env, int ctr_idx,
1438 target_ulong *val, target_ulong new_val,
1439 target_ulong wr_mask)
1440 {
1441 if (wr_mask != 0 && wr_mask != -1) {
1442 return -EINVAL;
1443 }
1444
1445 if (!wr_mask && val) {
1446 riscv_pmu_read_ctr(env, val, false, ctr_idx);
1447 } else if (wr_mask) {
1448 riscv_pmu_write_ctr(env, new_val, ctr_idx);
1449 } else {
1450 return -EINVAL;
1451 }
1452
1453 return 0;
1454 }
1455
rmw_cd_mhpmcounterh(CPURISCVState * env,int ctr_idx,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1456 static int rmw_cd_mhpmcounterh(CPURISCVState *env, int ctr_idx,
1457 target_ulong *val, target_ulong new_val,
1458 target_ulong wr_mask)
1459 {
1460 if (wr_mask != 0 && wr_mask != -1) {
1461 return -EINVAL;
1462 }
1463
1464 if (!wr_mask && val) {
1465 riscv_pmu_read_ctr(env, val, true, ctr_idx);
1466 } else if (wr_mask) {
1467 riscv_pmu_write_ctrh(env, new_val, ctr_idx);
1468 } else {
1469 return -EINVAL;
1470 }
1471
1472 return 0;
1473 }
1474
rmw_cd_mhpmevent(CPURISCVState * env,int evt_index,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1475 static int rmw_cd_mhpmevent(CPURISCVState *env, int evt_index,
1476 target_ulong *val, target_ulong new_val,
1477 target_ulong wr_mask)
1478 {
1479 uint64_t mhpmevt_val = new_val;
1480
1481 if (wr_mask != 0 && wr_mask != -1) {
1482 return -EINVAL;
1483 }
1484
1485 if (!wr_mask && val) {
1486 *val = env->mhpmevent_val[evt_index];
1487 if (riscv_cpu_cfg(env)->ext_sscofpmf) {
1488 *val &= ~MHPMEVENT_BIT_MINH;
1489 }
1490 } else if (wr_mask) {
1491 wr_mask &= ~MHPMEVENT_BIT_MINH;
1492 mhpmevt_val = (new_val & wr_mask) |
1493 (env->mhpmevent_val[evt_index] & ~wr_mask);
1494 if (riscv_cpu_mxl(env) == MXL_RV32) {
1495 mhpmevt_val = mhpmevt_val |
1496 ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
1497 }
1498 env->mhpmevent_val[evt_index] = mhpmevt_val;
1499 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1500 } else {
1501 return -EINVAL;
1502 }
1503
1504 return 0;
1505 }
1506
rmw_cd_mhpmeventh(CPURISCVState * env,int evt_index,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1507 static int rmw_cd_mhpmeventh(CPURISCVState *env, int evt_index,
1508 target_ulong *val, target_ulong new_val,
1509 target_ulong wr_mask)
1510 {
1511 uint64_t mhpmevth_val;
1512 uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
1513
1514 if (wr_mask != 0 && wr_mask != -1) {
1515 return -EINVAL;
1516 }
1517
1518 if (!wr_mask && val) {
1519 *val = env->mhpmeventh_val[evt_index];
1520 if (riscv_cpu_cfg(env)->ext_sscofpmf) {
1521 *val &= ~MHPMEVENTH_BIT_MINH;
1522 }
1523 } else if (wr_mask) {
1524 wr_mask &= ~MHPMEVENTH_BIT_MINH;
1525 env->mhpmeventh_val[evt_index] =
1526 (new_val & wr_mask) | (env->mhpmeventh_val[evt_index] & ~wr_mask);
1527 mhpmevth_val = env->mhpmeventh_val[evt_index];
1528 mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
1529 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
1530 } else {
1531 return -EINVAL;
1532 }
1533
1534 return 0;
1535 }
1536
rmw_cd_ctr_cfg(CPURISCVState * env,int cfg_index,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1537 static int rmw_cd_ctr_cfg(CPURISCVState *env, int cfg_index, target_ulong *val,
1538 target_ulong new_val, target_ulong wr_mask)
1539 {
1540 switch (cfg_index) {
1541 case 0: /* CYCLECFG */
1542 if (wr_mask) {
1543 wr_mask &= ~MCYCLECFG_BIT_MINH;
1544 env->mcyclecfg = (new_val & wr_mask) | (env->mcyclecfg & ~wr_mask);
1545 } else {
1546 *val = env->mcyclecfg &= ~MHPMEVENTH_BIT_MINH;
1547 }
1548 break;
1549 case 2: /* INSTRETCFG */
1550 if (wr_mask) {
1551 wr_mask &= ~MINSTRETCFG_BIT_MINH;
1552 env->minstretcfg = (new_val & wr_mask) |
1553 (env->minstretcfg & ~wr_mask);
1554 } else {
1555 *val = env->minstretcfg &= ~MHPMEVENTH_BIT_MINH;
1556 }
1557 break;
1558 default:
1559 return -EINVAL;
1560 }
1561 return 0;
1562 }
1563
rmw_cd_ctr_cfgh(CPURISCVState * env,int cfg_index,target_ulong * val,target_ulong new_val,target_ulong wr_mask)1564 static int rmw_cd_ctr_cfgh(CPURISCVState *env, int cfg_index, target_ulong *val,
1565 target_ulong new_val, target_ulong wr_mask)
1566 {
1567
1568 if (riscv_cpu_mxl(env) != MXL_RV32) {
1569 return RISCV_EXCP_ILLEGAL_INST;
1570 }
1571
1572 switch (cfg_index) {
1573 case 0: /* CYCLECFGH */
1574 if (wr_mask) {
1575 wr_mask &= ~MCYCLECFGH_BIT_MINH;
1576 env->mcyclecfgh = (new_val & wr_mask) |
1577 (env->mcyclecfgh & ~wr_mask);
1578 } else {
1579 *val = env->mcyclecfgh;
1580 }
1581 break;
1582 case 2: /* INSTRETCFGH */
1583 if (wr_mask) {
1584 wr_mask &= ~MINSTRETCFGH_BIT_MINH;
1585 env->minstretcfgh = (new_val & wr_mask) |
1586 (env->minstretcfgh & ~wr_mask);
1587 } else {
1588 *val = env->minstretcfgh;
1589 }
1590 break;
1591 default:
1592 return -EINVAL;
1593 }
1594 return 0;
1595 }
1596
1597
read_scountovf(CPURISCVState * env,int csrno,target_ulong * val)1598 static RISCVException read_scountovf(CPURISCVState *env, int csrno,
1599 target_ulong *val)
1600 {
1601 int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT;
1602 int i;
1603 *val = 0;
1604 target_ulong *mhpm_evt_val;
1605 uint64_t of_bit_mask;
1606
1607 /* Virtualize scountovf for counter delegation */
1608 if (riscv_cpu_cfg(env)->ext_sscofpmf &&
1609 riscv_cpu_cfg(env)->ext_ssccfg &&
1610 get_field(env->menvcfg, MENVCFG_CDE) &&
1611 env->virt_enabled) {
1612 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1613 }
1614
1615 if (riscv_cpu_mxl(env) == MXL_RV32) {
1616 mhpm_evt_val = env->mhpmeventh_val;
1617 of_bit_mask = MHPMEVENTH_BIT_OF;
1618 } else {
1619 mhpm_evt_val = env->mhpmevent_val;
1620 of_bit_mask = MHPMEVENT_BIT_OF;
1621 }
1622
1623 for (i = mhpmevt_start; i < RV_MAX_MHPMEVENTS; i++) {
1624 if ((get_field(env->mcounteren, BIT(i))) &&
1625 (mhpm_evt_val[i] & of_bit_mask)) {
1626 *val |= BIT(i);
1627 }
1628 }
1629
1630 return RISCV_EXCP_NONE;
1631 }
1632
read_time(CPURISCVState * env,int csrno,target_ulong * val)1633 static RISCVException read_time(CPURISCVState *env, int csrno,
1634 target_ulong *val)
1635 {
1636 uint64_t delta = env->virt_enabled ? env->htimedelta : 0;
1637
1638 if (!env->rdtime_fn) {
1639 return RISCV_EXCP_ILLEGAL_INST;
1640 }
1641
1642 *val = env->rdtime_fn(env->rdtime_fn_arg) + delta;
1643 return RISCV_EXCP_NONE;
1644 }
1645
read_timeh(CPURISCVState * env,int csrno,target_ulong * val)1646 static RISCVException read_timeh(CPURISCVState *env, int csrno,
1647 target_ulong *val)
1648 {
1649 uint64_t delta = env->virt_enabled ? env->htimedelta : 0;
1650
1651 if (!env->rdtime_fn) {
1652 return RISCV_EXCP_ILLEGAL_INST;
1653 }
1654
1655 *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32;
1656 return RISCV_EXCP_NONE;
1657 }
1658
read_vstimecmp(CPURISCVState * env,int csrno,target_ulong * val)1659 static RISCVException read_vstimecmp(CPURISCVState *env, int csrno,
1660 target_ulong *val)
1661 {
1662 *val = env->vstimecmp;
1663
1664 return RISCV_EXCP_NONE;
1665 }
1666
read_vstimecmph(CPURISCVState * env,int csrno,target_ulong * val)1667 static RISCVException read_vstimecmph(CPURISCVState *env, int csrno,
1668 target_ulong *val)
1669 {
1670 *val = env->vstimecmp >> 32;
1671
1672 return RISCV_EXCP_NONE;
1673 }
1674
write_vstimecmp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1675 static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
1676 target_ulong val, uintptr_t ra)
1677 {
1678 if (riscv_cpu_mxl(env) == MXL_RV32) {
1679 env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val);
1680 } else {
1681 env->vstimecmp = val;
1682 }
1683
1684 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
1685 env->htimedelta, MIP_VSTIP);
1686
1687 return RISCV_EXCP_NONE;
1688 }
1689
write_vstimecmph(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1690 static RISCVException write_vstimecmph(CPURISCVState *env, int csrno,
1691 target_ulong val, uintptr_t ra)
1692 {
1693 env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val);
1694 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
1695 env->htimedelta, MIP_VSTIP);
1696
1697 return RISCV_EXCP_NONE;
1698 }
1699
read_stimecmp(CPURISCVState * env,int csrno,target_ulong * val)1700 static RISCVException read_stimecmp(CPURISCVState *env, int csrno,
1701 target_ulong *val)
1702 {
1703 if (env->virt_enabled) {
1704 *val = env->vstimecmp;
1705 } else {
1706 *val = env->stimecmp;
1707 }
1708
1709 return RISCV_EXCP_NONE;
1710 }
1711
read_stimecmph(CPURISCVState * env,int csrno,target_ulong * val)1712 static RISCVException read_stimecmph(CPURISCVState *env, int csrno,
1713 target_ulong *val)
1714 {
1715 if (env->virt_enabled) {
1716 *val = env->vstimecmp >> 32;
1717 } else {
1718 *val = env->stimecmp >> 32;
1719 }
1720
1721 return RISCV_EXCP_NONE;
1722 }
1723
write_stimecmp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1724 static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
1725 target_ulong val, uintptr_t ra)
1726 {
1727 if (env->virt_enabled) {
1728 if (env->hvictl & HVICTL_VTI) {
1729 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1730 }
1731 return write_vstimecmp(env, csrno, val, ra);
1732 }
1733
1734 if (riscv_cpu_mxl(env) == MXL_RV32) {
1735 env->stimecmp = deposit64(env->stimecmp, 0, 32, (uint64_t)val);
1736 } else {
1737 env->stimecmp = val;
1738 }
1739
1740 riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP);
1741
1742 return RISCV_EXCP_NONE;
1743 }
1744
write_stimecmph(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1745 static RISCVException write_stimecmph(CPURISCVState *env, int csrno,
1746 target_ulong val, uintptr_t ra)
1747 {
1748 if (env->virt_enabled) {
1749 if (env->hvictl & HVICTL_VTI) {
1750 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1751 }
1752 return write_vstimecmph(env, csrno, val, ra);
1753 }
1754
1755 env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val);
1756 riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP);
1757
1758 return RISCV_EXCP_NONE;
1759 }
1760
1761 #define VSTOPI_NUM_SRCS 5
1762
1763 /*
1764 * All core local interrupts except the fixed ones 0:12. This macro is for
1765 * virtual interrupts logic so please don't change this to avoid messing up
1766 * the whole support, For reference see AIA spec: `5.3 Interrupt filtering and
1767 * virtual interrupts for supervisor level` and `6.3.2 Virtual interrupts for
1768 * VS level`.
1769 */
1770 #define LOCAL_INTERRUPTS (~0x1FFFULL)
1771
1772 static const uint64_t delegable_ints =
1773 S_MODE_INTERRUPTS | VS_MODE_INTERRUPTS | MIP_LCOFIP;
1774 static const uint64_t vs_delegable_ints =
1775 (VS_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & ~MIP_LCOFIP;
1776 static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
1777 HS_MODE_INTERRUPTS | LOCAL_INTERRUPTS;
1778 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
1779 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
1780 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
1781 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \
1782 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \
1783 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \
1784 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \
1785 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \
1786 (1ULL << (RISCV_EXCP_U_ECALL)) | \
1787 (1ULL << (RISCV_EXCP_S_ECALL)) | \
1788 (1ULL << (RISCV_EXCP_VS_ECALL)) | \
1789 (1ULL << (RISCV_EXCP_M_ECALL)) | \
1790 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
1791 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
1792 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
1793 (1ULL << (RISCV_EXCP_SW_CHECK)) | \
1794 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
1795 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
1796 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
1797 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)))
1798 static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
1799 ~((1ULL << (RISCV_EXCP_S_ECALL)) |
1800 (1ULL << (RISCV_EXCP_VS_ECALL)) |
1801 (1ULL << (RISCV_EXCP_M_ECALL)) |
1802 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) |
1803 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) |
1804 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) |
1805 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)));
1806 static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
1807 SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
1808 SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
1809
1810 /*
1811 * Spec allows for bits 13:63 to be either read-only or writable.
1812 * So far we have interrupt LCOFIP in that region which is writable.
1813 *
1814 * Also, spec allows to inject virtual interrupts in this region even
1815 * without any hardware interrupts for that interrupt number.
1816 *
1817 * For now interrupt in 13:63 region are all kept writable. 13 being
1818 * LCOFIP and 14:63 being virtual only. Change this in future if we
1819 * introduce more interrupts that are not writable.
1820 */
1821
1822 /* Bit STIP can be an alias of mip.STIP that's why it's writable in mvip. */
1823 static const uint64_t mvip_writable_mask = MIP_SSIP | MIP_STIP | MIP_SEIP |
1824 LOCAL_INTERRUPTS;
1825 static const uint64_t mvien_writable_mask = MIP_SSIP | MIP_SEIP |
1826 LOCAL_INTERRUPTS;
1827
1828 static const uint64_t sip_writable_mask = SIP_SSIP | LOCAL_INTERRUPTS;
1829 static const uint64_t hip_writable_mask = MIP_VSSIP;
1830 static const uint64_t hvip_writable_mask = MIP_VSSIP | MIP_VSTIP |
1831 MIP_VSEIP | LOCAL_INTERRUPTS;
1832 static const uint64_t hvien_writable_mask = LOCAL_INTERRUPTS;
1833
1834 static const uint64_t vsip_writable_mask = MIP_VSSIP | LOCAL_INTERRUPTS;
1835
1836 const bool valid_vm_1_10_32[16] = {
1837 [VM_1_10_MBARE] = true,
1838 [VM_1_10_SV32] = true
1839 };
1840
1841 const bool valid_vm_1_10_64[16] = {
1842 [VM_1_10_MBARE] = true,
1843 [VM_1_10_SV39] = true,
1844 [VM_1_10_SV48] = true,
1845 [VM_1_10_SV57] = true
1846 };
1847
1848 /* Machine Information Registers */
read_zero(CPURISCVState * env,int csrno,target_ulong * val)1849 static RISCVException read_zero(CPURISCVState *env, int csrno,
1850 target_ulong *val)
1851 {
1852 *val = 0;
1853 return RISCV_EXCP_NONE;
1854 }
1855
write_ignore(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1856 static RISCVException write_ignore(CPURISCVState *env, int csrno,
1857 target_ulong val, uintptr_t ra)
1858 {
1859 return RISCV_EXCP_NONE;
1860 }
1861
read_mvendorid(CPURISCVState * env,int csrno,target_ulong * val)1862 static RISCVException read_mvendorid(CPURISCVState *env, int csrno,
1863 target_ulong *val)
1864 {
1865 *val = riscv_cpu_cfg(env)->mvendorid;
1866 return RISCV_EXCP_NONE;
1867 }
1868
read_marchid(CPURISCVState * env,int csrno,target_ulong * val)1869 static RISCVException read_marchid(CPURISCVState *env, int csrno,
1870 target_ulong *val)
1871 {
1872 *val = riscv_cpu_cfg(env)->marchid;
1873 return RISCV_EXCP_NONE;
1874 }
1875
read_mimpid(CPURISCVState * env,int csrno,target_ulong * val)1876 static RISCVException read_mimpid(CPURISCVState *env, int csrno,
1877 target_ulong *val)
1878 {
1879 *val = riscv_cpu_cfg(env)->mimpid;
1880 return RISCV_EXCP_NONE;
1881 }
1882
read_mhartid(CPURISCVState * env,int csrno,target_ulong * val)1883 static RISCVException read_mhartid(CPURISCVState *env, int csrno,
1884 target_ulong *val)
1885 {
1886 *val = env->mhartid;
1887 return RISCV_EXCP_NONE;
1888 }
1889
1890 /* Machine Trap Setup */
1891
1892 /* We do not store SD explicitly, only compute it on demand. */
add_status_sd(RISCVMXL xl,uint64_t status)1893 static uint64_t add_status_sd(RISCVMXL xl, uint64_t status)
1894 {
1895 if ((status & MSTATUS_FS) == MSTATUS_FS ||
1896 (status & MSTATUS_VS) == MSTATUS_VS ||
1897 (status & MSTATUS_XS) == MSTATUS_XS) {
1898 switch (xl) {
1899 case MXL_RV32:
1900 return status | MSTATUS32_SD;
1901 case MXL_RV64:
1902 return status | MSTATUS64_SD;
1903 case MXL_RV128:
1904 return MSTATUSH128_SD;
1905 default:
1906 g_assert_not_reached();
1907 }
1908 }
1909 return status;
1910 }
1911
read_mstatus(CPURISCVState * env,int csrno,target_ulong * val)1912 static RISCVException read_mstatus(CPURISCVState *env, int csrno,
1913 target_ulong *val)
1914 {
1915 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus);
1916 return RISCV_EXCP_NONE;
1917 }
1918
validate_vm(CPURISCVState * env,target_ulong vm)1919 static bool validate_vm(CPURISCVState *env, target_ulong vm)
1920 {
1921 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32;
1922 RISCVCPU *cpu = env_archcpu(env);
1923 int satp_mode_supported_max = cpu->cfg.max_satp_mode;
1924 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
1925
1926 assert(satp_mode_supported_max >= 0);
1927 return vm <= satp_mode_supported_max && valid_vm[vm];
1928 }
1929
legalize_xatp(CPURISCVState * env,target_ulong old_xatp,target_ulong val)1930 static target_ulong legalize_xatp(CPURISCVState *env, target_ulong old_xatp,
1931 target_ulong val)
1932 {
1933 target_ulong mask;
1934 bool vm;
1935 if (riscv_cpu_mxl(env) == MXL_RV32) {
1936 vm = validate_vm(env, get_field(val, SATP32_MODE));
1937 mask = (val ^ old_xatp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN);
1938 } else {
1939 vm = validate_vm(env, get_field(val, SATP64_MODE));
1940 mask = (val ^ old_xatp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN);
1941 }
1942
1943 if (vm && mask) {
1944 /*
1945 * The ISA defines SATP.MODE=Bare as "no translation", but we still
1946 * pass these through QEMU's TLB emulation as it improves
1947 * performance. Flushing the TLB on SATP writes with paging
1948 * enabled avoids leaking those invalid cached mappings.
1949 */
1950 tlb_flush(env_cpu(env));
1951 return val;
1952 }
1953 return old_xatp;
1954 }
1955
legalize_mpp(CPURISCVState * env,target_ulong old_mpp,target_ulong val)1956 static target_ulong legalize_mpp(CPURISCVState *env, target_ulong old_mpp,
1957 target_ulong val)
1958 {
1959 bool valid = false;
1960 target_ulong new_mpp = get_field(val, MSTATUS_MPP);
1961
1962 switch (new_mpp) {
1963 case PRV_M:
1964 valid = true;
1965 break;
1966 case PRV_S:
1967 valid = riscv_has_ext(env, RVS);
1968 break;
1969 case PRV_U:
1970 valid = riscv_has_ext(env, RVU);
1971 break;
1972 }
1973
1974 /* Remain field unchanged if new_mpp value is invalid */
1975 if (!valid) {
1976 val = set_field(val, MSTATUS_MPP, old_mpp);
1977 }
1978
1979 return val;
1980 }
1981
write_mstatus(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)1982 static RISCVException write_mstatus(CPURISCVState *env, int csrno,
1983 target_ulong val, uintptr_t ra)
1984 {
1985 uint64_t mstatus = env->mstatus;
1986 uint64_t mask = 0;
1987 RISCVMXL xl = riscv_cpu_mxl(env);
1988
1989 /*
1990 * MPP field have been made WARL since priv version 1.11. However,
1991 * legalization for it will not break any software running on 1.10.
1992 */
1993 val = legalize_mpp(env, get_field(mstatus, MSTATUS_MPP), val);
1994
1995 /* flush tlb on mstatus fields that affect VM */
1996 if ((val ^ mstatus) & MSTATUS_MXR) {
1997 tlb_flush(env_cpu(env));
1998 }
1999 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
2000 MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM |
2001 MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
2002 MSTATUS_TW;
2003
2004 if (riscv_has_ext(env, RVF)) {
2005 mask |= MSTATUS_FS;
2006 }
2007
2008 if (riscv_cpu_cfg(env)->ext_zve32x) {
2009 mask |= MSTATUS_VS;
2010 }
2011
2012 if (riscv_env_smode_dbltrp_enabled(env, env->virt_enabled)) {
2013 mask |= MSTATUS_SDT;
2014 if ((val & MSTATUS_SDT) != 0) {
2015 val &= ~MSTATUS_SIE;
2016 }
2017 }
2018
2019 if (riscv_cpu_cfg(env)->ext_smdbltrp) {
2020 mask |= MSTATUS_MDT;
2021 if ((val & MSTATUS_MDT) != 0) {
2022 val &= ~MSTATUS_MIE;
2023 }
2024 }
2025
2026 if (xl != MXL_RV32 || env->debugger) {
2027 if (riscv_has_ext(env, RVH)) {
2028 mask |= MSTATUS_MPV | MSTATUS_GVA;
2029 }
2030 if ((val & MSTATUS64_UXL) != 0) {
2031 mask |= MSTATUS64_UXL;
2032 }
2033 }
2034
2035 /* If cfi lp extension is available, then apply cfi lp mask */
2036 if (env_archcpu(env)->cfg.ext_zicfilp) {
2037 mask |= (MSTATUS_MPELP | MSTATUS_SPELP);
2038 }
2039
2040 mstatus = (mstatus & ~mask) | (val & mask);
2041
2042 env->mstatus = mstatus;
2043
2044 /*
2045 * Except in debug mode, UXL/SXL can only be modified by higher
2046 * privilege mode. So xl will not be changed in normal mode.
2047 */
2048 if (env->debugger) {
2049 env->xl = cpu_recompute_xl(env);
2050 }
2051
2052 return RISCV_EXCP_NONE;
2053 }
2054
read_mstatush(CPURISCVState * env,int csrno,target_ulong * val)2055 static RISCVException read_mstatush(CPURISCVState *env, int csrno,
2056 target_ulong *val)
2057 {
2058 *val = env->mstatus >> 32;
2059 return RISCV_EXCP_NONE;
2060 }
2061
write_mstatush(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)2062 static RISCVException write_mstatush(CPURISCVState *env, int csrno,
2063 target_ulong val, uintptr_t ra)
2064 {
2065 uint64_t valh = (uint64_t)val << 32;
2066 uint64_t mask = riscv_has_ext(env, RVH) ? MSTATUS_MPV | MSTATUS_GVA : 0;
2067
2068 if (riscv_cpu_cfg(env)->ext_smdbltrp) {
2069 mask |= MSTATUS_MDT;
2070 if ((valh & MSTATUS_MDT) != 0) {
2071 mask |= MSTATUS_MIE;
2072 }
2073 }
2074 env->mstatus = (env->mstatus & ~mask) | (valh & mask);
2075
2076 return RISCV_EXCP_NONE;
2077 }
2078
read_mstatus_i128(CPURISCVState * env,int csrno,Int128 * val)2079 static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno,
2080 Int128 *val)
2081 {
2082 *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128,
2083 env->mstatus));
2084 return RISCV_EXCP_NONE;
2085 }
2086
read_misa_i128(CPURISCVState * env,int csrno,Int128 * val)2087 static RISCVException read_misa_i128(CPURISCVState *env, int csrno,
2088 Int128 *val)
2089 {
2090 *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62);
2091 return RISCV_EXCP_NONE;
2092 }
2093
read_misa(CPURISCVState * env,int csrno,target_ulong * val)2094 static RISCVException read_misa(CPURISCVState *env, int csrno,
2095 target_ulong *val)
2096 {
2097 target_ulong misa;
2098
2099 switch (env->misa_mxl) {
2100 case MXL_RV32:
2101 misa = (target_ulong)MXL_RV32 << 30;
2102 break;
2103 #ifdef TARGET_RISCV64
2104 case MXL_RV64:
2105 misa = (target_ulong)MXL_RV64 << 62;
2106 break;
2107 #endif
2108 default:
2109 g_assert_not_reached();
2110 }
2111
2112 *val = misa | env->misa_ext;
2113 return RISCV_EXCP_NONE;
2114 }
2115
get_next_pc(CPURISCVState * env,uintptr_t ra)2116 static target_ulong get_next_pc(CPURISCVState *env, uintptr_t ra)
2117 {
2118 uint64_t data[INSN_START_WORDS];
2119
2120 /* Outside of a running cpu, env contains the next pc. */
2121 if (ra == 0 || !cpu_unwind_state_data(env_cpu(env), ra, data)) {
2122 return env->pc;
2123 }
2124
2125 /* Within unwind data, [0] is pc and [1] is the opcode. */
2126 return data[0] + insn_len(data[1]);
2127 }
2128
write_misa(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)2129 static RISCVException write_misa(CPURISCVState *env, int csrno,
2130 target_ulong val, uintptr_t ra)
2131 {
2132 RISCVCPU *cpu = env_archcpu(env);
2133 uint32_t orig_misa_ext = env->misa_ext;
2134 Error *local_err = NULL;
2135
2136 if (!riscv_cpu_cfg(env)->misa_w) {
2137 /* drop write to misa */
2138 return RISCV_EXCP_NONE;
2139 }
2140
2141 /* Mask extensions that are not supported by this hart */
2142 val &= env->misa_ext_mask;
2143
2144 /* Suppress 'C' if next instruction is not aligned. */
2145 if ((val & RVC) && (get_next_pc(env, ra) & 3) != 0) {
2146 val &= ~RVC;
2147 }
2148
2149 /* Disable RVG if any of its dependencies are disabled */
2150 if (!(val & RVI && val & RVM && val & RVA &&
2151 val & RVF && val & RVD)) {
2152 val &= ~RVG;
2153 }
2154
2155 /* If nothing changed, do nothing. */
2156 if (val == env->misa_ext) {
2157 return RISCV_EXCP_NONE;
2158 }
2159
2160 env->misa_ext = val;
2161 riscv_cpu_validate_set_extensions(cpu, &local_err);
2162 if (local_err != NULL) {
2163 /* Rollback on validation error */
2164 qemu_log_mask(LOG_GUEST_ERROR, "Unable to write MISA ext value "
2165 "0x%x, keeping existing MISA ext 0x%x\n",
2166 env->misa_ext, orig_misa_ext);
2167
2168 env->misa_ext = orig_misa_ext;
2169
2170 return RISCV_EXCP_NONE;
2171 }
2172
2173 if (!(env->misa_ext & RVF)) {
2174 env->mstatus &= ~MSTATUS_FS;
2175 }
2176
2177 /* flush translation cache */
2178 tb_flush(env_cpu(env));
2179 env->xl = riscv_cpu_mxl(env);
2180 return RISCV_EXCP_NONE;
2181 }
2182
read_medeleg(CPURISCVState * env,int csrno,target_ulong * val)2183 static RISCVException read_medeleg(CPURISCVState *env, int csrno,
2184 target_ulong *val)
2185 {
2186 *val = env->medeleg;
2187 return RISCV_EXCP_NONE;
2188 }
2189
write_medeleg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)2190 static RISCVException write_medeleg(CPURISCVState *env, int csrno,
2191 target_ulong val, uintptr_t ra)
2192 {
2193 env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS);
2194 return RISCV_EXCP_NONE;
2195 }
2196
rmw_mideleg64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2197 static RISCVException rmw_mideleg64(CPURISCVState *env, int csrno,
2198 uint64_t *ret_val,
2199 uint64_t new_val, uint64_t wr_mask)
2200 {
2201 uint64_t mask = wr_mask & delegable_ints;
2202
2203 if (ret_val) {
2204 *ret_val = env->mideleg;
2205 }
2206
2207 env->mideleg = (env->mideleg & ~mask) | (new_val & mask);
2208
2209 if (riscv_has_ext(env, RVH)) {
2210 env->mideleg |= HS_MODE_INTERRUPTS;
2211 }
2212
2213 return RISCV_EXCP_NONE;
2214 }
2215
rmw_mideleg(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2216 static RISCVException rmw_mideleg(CPURISCVState *env, int csrno,
2217 target_ulong *ret_val,
2218 target_ulong new_val, target_ulong wr_mask)
2219 {
2220 uint64_t rval;
2221 RISCVException ret;
2222
2223 ret = rmw_mideleg64(env, csrno, &rval, new_val, wr_mask);
2224 if (ret_val) {
2225 *ret_val = rval;
2226 }
2227
2228 return ret;
2229 }
2230
rmw_midelegh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2231 static RISCVException rmw_midelegh(CPURISCVState *env, int csrno,
2232 target_ulong *ret_val,
2233 target_ulong new_val,
2234 target_ulong wr_mask)
2235 {
2236 uint64_t rval;
2237 RISCVException ret;
2238
2239 ret = rmw_mideleg64(env, csrno, &rval,
2240 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2241 if (ret_val) {
2242 *ret_val = rval >> 32;
2243 }
2244
2245 return ret;
2246 }
2247
rmw_mie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2248 static RISCVException rmw_mie64(CPURISCVState *env, int csrno,
2249 uint64_t *ret_val,
2250 uint64_t new_val, uint64_t wr_mask)
2251 {
2252 uint64_t mask = wr_mask & all_ints;
2253
2254 if (ret_val) {
2255 *ret_val = env->mie;
2256 }
2257
2258 env->mie = (env->mie & ~mask) | (new_val & mask);
2259
2260 if (!riscv_has_ext(env, RVH)) {
2261 env->mie &= ~((uint64_t)HS_MODE_INTERRUPTS);
2262 }
2263
2264 return RISCV_EXCP_NONE;
2265 }
2266
rmw_mie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2267 static RISCVException rmw_mie(CPURISCVState *env, int csrno,
2268 target_ulong *ret_val,
2269 target_ulong new_val, target_ulong wr_mask)
2270 {
2271 uint64_t rval;
2272 RISCVException ret;
2273
2274 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask);
2275 if (ret_val) {
2276 *ret_val = rval;
2277 }
2278
2279 return ret;
2280 }
2281
rmw_mieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2282 static RISCVException rmw_mieh(CPURISCVState *env, int csrno,
2283 target_ulong *ret_val,
2284 target_ulong new_val, target_ulong wr_mask)
2285 {
2286 uint64_t rval;
2287 RISCVException ret;
2288
2289 ret = rmw_mie64(env, csrno, &rval,
2290 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2291 if (ret_val) {
2292 *ret_val = rval >> 32;
2293 }
2294
2295 return ret;
2296 }
2297
rmw_mvien64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)2298 static RISCVException rmw_mvien64(CPURISCVState *env, int csrno,
2299 uint64_t *ret_val,
2300 uint64_t new_val, uint64_t wr_mask)
2301 {
2302 uint64_t mask = wr_mask & mvien_writable_mask;
2303
2304 if (ret_val) {
2305 *ret_val = env->mvien;
2306 }
2307
2308 env->mvien = (env->mvien & ~mask) | (new_val & mask);
2309
2310 return RISCV_EXCP_NONE;
2311 }
2312
rmw_mvien(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2313 static RISCVException rmw_mvien(CPURISCVState *env, int csrno,
2314 target_ulong *ret_val,
2315 target_ulong new_val, target_ulong wr_mask)
2316 {
2317 uint64_t rval;
2318 RISCVException ret;
2319
2320 ret = rmw_mvien64(env, csrno, &rval, new_val, wr_mask);
2321 if (ret_val) {
2322 *ret_val = rval;
2323 }
2324
2325 return ret;
2326 }
2327
rmw_mvienh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)2328 static RISCVException rmw_mvienh(CPURISCVState *env, int csrno,
2329 target_ulong *ret_val,
2330 target_ulong new_val, target_ulong wr_mask)
2331 {
2332 uint64_t rval;
2333 RISCVException ret;
2334
2335 ret = rmw_mvien64(env, csrno, &rval,
2336 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2337 if (ret_val) {
2338 *ret_val = rval >> 32;
2339 }
2340
2341 return ret;
2342 }
2343
read_mtopi(CPURISCVState * env,int csrno,target_ulong * val)2344 static RISCVException read_mtopi(CPURISCVState *env, int csrno,
2345 target_ulong *val)
2346 {
2347 int irq;
2348 uint8_t iprio;
2349
2350 irq = riscv_cpu_mirq_pending(env);
2351 if (irq <= 0 || irq > 63) {
2352 *val = 0;
2353 } else {
2354 iprio = env->miprio[irq];
2355 if (!iprio) {
2356 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_M) {
2357 iprio = IPRIO_MMAXIPRIO;
2358 }
2359 }
2360 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
2361 *val |= iprio;
2362 }
2363
2364 return RISCV_EXCP_NONE;
2365 }
2366
aia_xlate_vs_csrno(CPURISCVState * env,int csrno)2367 static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
2368 {
2369 if (!env->virt_enabled) {
2370 return csrno;
2371 }
2372
2373 switch (csrno) {
2374 case CSR_SISELECT:
2375 return CSR_VSISELECT;
2376 case CSR_SIREG:
2377 return CSR_VSIREG;
2378 case CSR_STOPEI:
2379 return CSR_VSTOPEI;
2380 default:
2381 return csrno;
2382 };
2383 }
2384
csrind_xlate_vs_csrno(CPURISCVState * env,int csrno)2385 static int csrind_xlate_vs_csrno(CPURISCVState *env, int csrno)
2386 {
2387 if (!env->virt_enabled) {
2388 return csrno;
2389 }
2390
2391 switch (csrno) {
2392 case CSR_SISELECT:
2393 return CSR_VSISELECT;
2394 case CSR_SIREG:
2395 case CSR_SIREG2:
2396 case CSR_SIREG3:
2397 case CSR_SIREG4:
2398 case CSR_SIREG5:
2399 case CSR_SIREG6:
2400 return CSR_VSIREG + (csrno - CSR_SIREG);
2401 default:
2402 return csrno;
2403 };
2404 }
2405
rmw_xiselect(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2406 static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
2407 target_ulong *val, target_ulong new_val,
2408 target_ulong wr_mask)
2409 {
2410 target_ulong *iselect;
2411 int ret;
2412
2413 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
2414 if (ret != RISCV_EXCP_NONE) {
2415 return ret;
2416 }
2417
2418 /* Translate CSR number for VS-mode */
2419 csrno = csrind_xlate_vs_csrno(env, csrno);
2420
2421 /* Find the iselect CSR based on CSR number */
2422 switch (csrno) {
2423 case CSR_MISELECT:
2424 iselect = &env->miselect;
2425 break;
2426 case CSR_SISELECT:
2427 iselect = &env->siselect;
2428 break;
2429 case CSR_VSISELECT:
2430 iselect = &env->vsiselect;
2431 break;
2432 default:
2433 return RISCV_EXCP_ILLEGAL_INST;
2434 };
2435
2436 if (val) {
2437 *val = *iselect;
2438 }
2439
2440 if (riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind) {
2441 wr_mask &= ISELECT_MASK_SXCSRIND;
2442 } else {
2443 wr_mask &= ISELECT_MASK_AIA;
2444 }
2445
2446 if (wr_mask) {
2447 *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask);
2448 }
2449
2450 return RISCV_EXCP_NONE;
2451 }
2452
xiselect_aia_range(target_ulong isel)2453 static bool xiselect_aia_range(target_ulong isel)
2454 {
2455 return (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) ||
2456 (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST);
2457 }
2458
xiselect_cd_range(target_ulong isel)2459 static bool xiselect_cd_range(target_ulong isel)
2460 {
2461 return (ISELECT_CD_FIRST <= isel && isel <= ISELECT_CD_LAST);
2462 }
2463
xiselect_ctr_range(int csrno,target_ulong isel)2464 static bool xiselect_ctr_range(int csrno, target_ulong isel)
2465 {
2466 /* MIREG-MIREG6 for the range 0x200-0x2ff are not used by CTR. */
2467 return CTR_ENTRIES_FIRST <= isel && isel <= CTR_ENTRIES_LAST &&
2468 csrno < CSR_MIREG;
2469 }
2470
rmw_iprio(target_ulong xlen,target_ulong iselect,uint8_t * iprio,target_ulong * val,target_ulong new_val,target_ulong wr_mask,int ext_irq_no)2471 static int rmw_iprio(target_ulong xlen,
2472 target_ulong iselect, uint8_t *iprio,
2473 target_ulong *val, target_ulong new_val,
2474 target_ulong wr_mask, int ext_irq_no)
2475 {
2476 int i, firq, nirqs;
2477 target_ulong old_val;
2478
2479 if (iselect < ISELECT_IPRIO0 || ISELECT_IPRIO15 < iselect) {
2480 return -EINVAL;
2481 }
2482 if (xlen != 32 && iselect & 0x1) {
2483 return -EINVAL;
2484 }
2485
2486 nirqs = 4 * (xlen / 32);
2487 firq = ((iselect - ISELECT_IPRIO0) / (xlen / 32)) * (nirqs);
2488
2489 old_val = 0;
2490 for (i = 0; i < nirqs; i++) {
2491 old_val |= ((target_ulong)iprio[firq + i]) << (IPRIO_IRQ_BITS * i);
2492 }
2493
2494 if (val) {
2495 *val = old_val;
2496 }
2497
2498 if (wr_mask) {
2499 new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
2500 for (i = 0; i < nirqs; i++) {
2501 /*
2502 * M-level and S-level external IRQ priority always read-only
2503 * zero. This means default priority order is always preferred
2504 * for M-level and S-level external IRQs.
2505 */
2506 if ((firq + i) == ext_irq_no) {
2507 continue;
2508 }
2509 iprio[firq + i] = (new_val >> (IPRIO_IRQ_BITS * i)) & 0xff;
2510 }
2511 }
2512
2513 return 0;
2514 }
2515
rmw_ctrsource(CPURISCVState * env,int isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2516 static int rmw_ctrsource(CPURISCVState *env, int isel, target_ulong *val,
2517 target_ulong new_val, target_ulong wr_mask)
2518 {
2519 /*
2520 * CTR arrays are treated as circular buffers and TOS always points to next
2521 * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
2522 * 0 is always the latest one, traversal is a bit different here. See the
2523 * below example.
2524 *
2525 * Depth = 16.
2526 *
2527 * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
2528 * TOS H
2529 * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
2530 */
2531 const uint64_t entry = isel - CTR_ENTRIES_FIRST;
2532 const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
2533 uint64_t idx;
2534
2535 /* Entry greater than depth-1 is read-only zero */
2536 if (entry >= depth) {
2537 if (val) {
2538 *val = 0;
2539 }
2540 return 0;
2541 }
2542
2543 idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
2544 idx = (idx - entry - 1) & (depth - 1);
2545
2546 if (val) {
2547 *val = env->ctr_src[idx];
2548 }
2549
2550 env->ctr_src[idx] = (env->ctr_src[idx] & ~wr_mask) | (new_val & wr_mask);
2551
2552 return 0;
2553 }
2554
rmw_ctrtarget(CPURISCVState * env,int isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2555 static int rmw_ctrtarget(CPURISCVState *env, int isel, target_ulong *val,
2556 target_ulong new_val, target_ulong wr_mask)
2557 {
2558 /*
2559 * CTR arrays are treated as circular buffers and TOS always points to next
2560 * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
2561 * 0 is always the latest one, traversal is a bit different here. See the
2562 * below example.
2563 *
2564 * Depth = 16.
2565 *
2566 * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
2567 * head H
2568 * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
2569 */
2570 const uint64_t entry = isel - CTR_ENTRIES_FIRST;
2571 const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
2572 uint64_t idx;
2573
2574 /* Entry greater than depth-1 is read-only zero */
2575 if (entry >= depth) {
2576 if (val) {
2577 *val = 0;
2578 }
2579 return 0;
2580 }
2581
2582 idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
2583 idx = (idx - entry - 1) & (depth - 1);
2584
2585 if (val) {
2586 *val = env->ctr_dst[idx];
2587 }
2588
2589 env->ctr_dst[idx] = (env->ctr_dst[idx] & ~wr_mask) | (new_val & wr_mask);
2590
2591 return 0;
2592 }
2593
rmw_ctrdata(CPURISCVState * env,int isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2594 static int rmw_ctrdata(CPURISCVState *env, int isel, target_ulong *val,
2595 target_ulong new_val, target_ulong wr_mask)
2596 {
2597 /*
2598 * CTR arrays are treated as circular buffers and TOS always points to next
2599 * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
2600 * 0 is always the latest one, traversal is a bit different here. See the
2601 * below example.
2602 *
2603 * Depth = 16.
2604 *
2605 * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
2606 * head H
2607 * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
2608 */
2609 const uint64_t entry = isel - CTR_ENTRIES_FIRST;
2610 const uint64_t mask = wr_mask & CTRDATA_MASK;
2611 const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
2612 uint64_t idx;
2613
2614 /* Entry greater than depth-1 is read-only zero */
2615 if (entry >= depth) {
2616 if (val) {
2617 *val = 0;
2618 }
2619 return 0;
2620 }
2621
2622 idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
2623 idx = (idx - entry - 1) & (depth - 1);
2624
2625 if (val) {
2626 *val = env->ctr_data[idx];
2627 }
2628
2629 env->ctr_data[idx] = (env->ctr_data[idx] & ~mask) | (new_val & mask);
2630
2631 return 0;
2632 }
2633
rmw_xireg_aia(CPURISCVState * env,int csrno,target_ulong isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2634 static RISCVException rmw_xireg_aia(CPURISCVState *env, int csrno,
2635 target_ulong isel, target_ulong *val,
2636 target_ulong new_val, target_ulong wr_mask)
2637 {
2638 bool virt = false, isel_reserved = false;
2639 int ret = -EINVAL;
2640 uint8_t *iprio;
2641 target_ulong priv, vgein;
2642
2643 /* VS-mode CSR number passed in has already been translated */
2644 switch (csrno) {
2645 case CSR_MIREG:
2646 if (!riscv_cpu_cfg(env)->ext_smaia) {
2647 goto done;
2648 }
2649 iprio = env->miprio;
2650 priv = PRV_M;
2651 break;
2652 case CSR_SIREG:
2653 if (!riscv_cpu_cfg(env)->ext_ssaia ||
2654 (env->priv == PRV_S && env->mvien & MIP_SEIP &&
2655 env->siselect >= ISELECT_IMSIC_EIDELIVERY &&
2656 env->siselect <= ISELECT_IMSIC_EIE63)) {
2657 goto done;
2658 }
2659 iprio = env->siprio;
2660 priv = PRV_S;
2661 break;
2662 case CSR_VSIREG:
2663 if (!riscv_cpu_cfg(env)->ext_ssaia) {
2664 goto done;
2665 }
2666 iprio = env->hviprio;
2667 priv = PRV_S;
2668 virt = true;
2669 break;
2670 default:
2671 goto done;
2672 };
2673
2674 /* Find the selected guest interrupt file */
2675 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
2676
2677 if (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) {
2678 /* Local interrupt priority registers not available for VS-mode */
2679 if (!virt) {
2680 ret = rmw_iprio(riscv_cpu_mxl_bits(env),
2681 isel, iprio, val, new_val, wr_mask,
2682 (priv == PRV_M) ? IRQ_M_EXT : IRQ_S_EXT);
2683 }
2684 } else if (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST) {
2685 /* IMSIC registers only available when machine implements it. */
2686 if (env->aia_ireg_rmw_fn[priv]) {
2687 /* Selected guest interrupt file should not be zero */
2688 if (virt && (!vgein || env->geilen < vgein)) {
2689 goto done;
2690 }
2691 /* Call machine specific IMSIC register emulation */
2692 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
2693 AIA_MAKE_IREG(isel, priv, virt, vgein,
2694 riscv_cpu_mxl_bits(env)),
2695 val, new_val, wr_mask);
2696 }
2697 } else {
2698 isel_reserved = true;
2699 }
2700
2701 done:
2702 /*
2703 * If AIA is not enabled, illegal instruction exception is always
2704 * returned regardless of whether we are in VS-mode or not
2705 */
2706 if (ret) {
2707 return (env->virt_enabled && virt && !isel_reserved) ?
2708 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2709 }
2710
2711 return RISCV_EXCP_NONE;
2712 }
2713
rmw_xireg_cd(CPURISCVState * env,int csrno,target_ulong isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2714 static int rmw_xireg_cd(CPURISCVState *env, int csrno,
2715 target_ulong isel, target_ulong *val,
2716 target_ulong new_val, target_ulong wr_mask)
2717 {
2718 int ret = -EINVAL;
2719 int ctr_index = isel - ISELECT_CD_FIRST;
2720 int isel_hpm_start = ISELECT_CD_FIRST + 3;
2721
2722 if (!riscv_cpu_cfg(env)->ext_smcdeleg || !riscv_cpu_cfg(env)->ext_ssccfg) {
2723 ret = RISCV_EXCP_ILLEGAL_INST;
2724 goto done;
2725 }
2726
2727 /* Invalid siselect value for reserved */
2728 if (ctr_index == 1) {
2729 goto done;
2730 }
2731
2732 /* sireg4 and sireg5 provides access RV32 only CSRs */
2733 if (((csrno == CSR_SIREG5) || (csrno == CSR_SIREG4)) &&
2734 (riscv_cpu_mxl(env) != MXL_RV32)) {
2735 ret = RISCV_EXCP_ILLEGAL_INST;
2736 goto done;
2737 }
2738
2739 /* Check Sscofpmf dependancy */
2740 if (!riscv_cpu_cfg(env)->ext_sscofpmf && csrno == CSR_SIREG5 &&
2741 (isel_hpm_start <= isel && isel <= ISELECT_CD_LAST)) {
2742 goto done;
2743 }
2744
2745 /* Check smcntrpmf dependancy */
2746 if (!riscv_cpu_cfg(env)->ext_smcntrpmf &&
2747 (csrno == CSR_SIREG2 || csrno == CSR_SIREG5) &&
2748 (ISELECT_CD_FIRST <= isel && isel < isel_hpm_start)) {
2749 goto done;
2750 }
2751
2752 if (!get_field(env->mcounteren, BIT(ctr_index)) ||
2753 !get_field(env->menvcfg, MENVCFG_CDE)) {
2754 goto done;
2755 }
2756
2757 switch (csrno) {
2758 case CSR_SIREG:
2759 ret = rmw_cd_mhpmcounter(env, ctr_index, val, new_val, wr_mask);
2760 break;
2761 case CSR_SIREG4:
2762 ret = rmw_cd_mhpmcounterh(env, ctr_index, val, new_val, wr_mask);
2763 break;
2764 case CSR_SIREG2:
2765 if (ctr_index <= 2) {
2766 ret = rmw_cd_ctr_cfg(env, ctr_index, val, new_val, wr_mask);
2767 } else {
2768 ret = rmw_cd_mhpmevent(env, ctr_index, val, new_val, wr_mask);
2769 }
2770 break;
2771 case CSR_SIREG5:
2772 if (ctr_index <= 2) {
2773 ret = rmw_cd_ctr_cfgh(env, ctr_index, val, new_val, wr_mask);
2774 } else {
2775 ret = rmw_cd_mhpmeventh(env, ctr_index, val, new_val, wr_mask);
2776 }
2777 break;
2778 default:
2779 goto done;
2780 }
2781
2782 done:
2783 return ret;
2784 }
2785
rmw_xireg_ctr(CPURISCVState * env,int csrno,target_ulong isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2786 static int rmw_xireg_ctr(CPURISCVState *env, int csrno,
2787 target_ulong isel, target_ulong *val,
2788 target_ulong new_val, target_ulong wr_mask)
2789 {
2790 if (!riscv_cpu_cfg(env)->ext_smctr && !riscv_cpu_cfg(env)->ext_ssctr) {
2791 return -EINVAL;
2792 }
2793
2794 if (csrno == CSR_SIREG || csrno == CSR_VSIREG) {
2795 return rmw_ctrsource(env, isel, val, new_val, wr_mask);
2796 } else if (csrno == CSR_SIREG2 || csrno == CSR_VSIREG2) {
2797 return rmw_ctrtarget(env, isel, val, new_val, wr_mask);
2798 } else if (csrno == CSR_SIREG3 || csrno == CSR_VSIREG3) {
2799 return rmw_ctrdata(env, isel, val, new_val, wr_mask);
2800 } else if (val) {
2801 *val = 0;
2802 }
2803
2804 return 0;
2805 }
2806
2807 /*
2808 * rmw_xireg_csrind: Perform indirect access to xireg and xireg2-xireg6
2809 *
2810 * Perform indirect access to xireg and xireg2-xireg6.
2811 * This is a generic interface for all xireg CSRs. Apart from AIA, all other
2812 * extension using csrind should be implemented here.
2813 */
rmw_xireg_csrind(CPURISCVState * env,int csrno,target_ulong isel,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2814 static int rmw_xireg_csrind(CPURISCVState *env, int csrno,
2815 target_ulong isel, target_ulong *val,
2816 target_ulong new_val, target_ulong wr_mask)
2817 {
2818 bool virt = csrno == CSR_VSIREG ? true : false;
2819 int ret = -EINVAL;
2820
2821 if (xiselect_cd_range(isel)) {
2822 ret = rmw_xireg_cd(env, csrno, isel, val, new_val, wr_mask);
2823 } else if (xiselect_ctr_range(csrno, isel)) {
2824 ret = rmw_xireg_ctr(env, csrno, isel, val, new_val, wr_mask);
2825 } else {
2826 /*
2827 * As per the specification, access to unimplented region is undefined
2828 * but recommendation is to raise illegal instruction exception.
2829 */
2830 return RISCV_EXCP_ILLEGAL_INST;
2831 }
2832
2833 if (ret) {
2834 return (env->virt_enabled && virt) ?
2835 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2836 }
2837
2838 return RISCV_EXCP_NONE;
2839 }
2840
rmw_xiregi(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2841 static int rmw_xiregi(CPURISCVState *env, int csrno, target_ulong *val,
2842 target_ulong new_val, target_ulong wr_mask)
2843 {
2844 int ret = -EINVAL;
2845 target_ulong isel;
2846
2847 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
2848 if (ret != RISCV_EXCP_NONE) {
2849 return ret;
2850 }
2851
2852 /* Translate CSR number for VS-mode */
2853 csrno = csrind_xlate_vs_csrno(env, csrno);
2854
2855 if (CSR_MIREG <= csrno && csrno <= CSR_MIREG6 &&
2856 csrno != CSR_MIREG4 - 1) {
2857 isel = env->miselect;
2858 } else if (CSR_SIREG <= csrno && csrno <= CSR_SIREG6 &&
2859 csrno != CSR_SIREG4 - 1) {
2860 isel = env->siselect;
2861 } else if (CSR_VSIREG <= csrno && csrno <= CSR_VSIREG6 &&
2862 csrno != CSR_VSIREG4 - 1) {
2863 isel = env->vsiselect;
2864 } else {
2865 return RISCV_EXCP_ILLEGAL_INST;
2866 }
2867
2868 return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
2869 }
2870
rmw_xireg(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2871 static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
2872 target_ulong *val, target_ulong new_val,
2873 target_ulong wr_mask)
2874 {
2875 int ret = -EINVAL;
2876 target_ulong isel;
2877
2878 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
2879 if (ret != RISCV_EXCP_NONE) {
2880 return ret;
2881 }
2882
2883 /* Translate CSR number for VS-mode */
2884 csrno = csrind_xlate_vs_csrno(env, csrno);
2885
2886 /* Decode register details from CSR number */
2887 switch (csrno) {
2888 case CSR_MIREG:
2889 isel = env->miselect;
2890 break;
2891 case CSR_SIREG:
2892 isel = env->siselect;
2893 break;
2894 case CSR_VSIREG:
2895 isel = env->vsiselect;
2896 break;
2897 default:
2898 goto done;
2899 };
2900
2901 /*
2902 * Use the xiselect range to determine actual op on xireg.
2903 *
2904 * Since we only checked the existence of AIA or Indirect Access in the
2905 * predicate, we should check the existence of the exact extension when
2906 * we get to a specific range and return illegal instruction exception even
2907 * in VS-mode.
2908 */
2909 if (xiselect_aia_range(isel)) {
2910 return rmw_xireg_aia(env, csrno, isel, val, new_val, wr_mask);
2911 } else if (riscv_cpu_cfg(env)->ext_smcsrind ||
2912 riscv_cpu_cfg(env)->ext_sscsrind) {
2913 return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
2914 }
2915
2916 done:
2917 return RISCV_EXCP_ILLEGAL_INST;
2918 }
2919
rmw_xtopei(CPURISCVState * env,int csrno,target_ulong * val,target_ulong new_val,target_ulong wr_mask)2920 static RISCVException rmw_xtopei(CPURISCVState *env, int csrno,
2921 target_ulong *val, target_ulong new_val,
2922 target_ulong wr_mask)
2923 {
2924 bool virt;
2925 int ret = -EINVAL;
2926 target_ulong priv, vgein;
2927
2928 /* Translate CSR number for VS-mode */
2929 csrno = aia_xlate_vs_csrno(env, csrno);
2930
2931 /* Decode register details from CSR number */
2932 virt = false;
2933 switch (csrno) {
2934 case CSR_MTOPEI:
2935 priv = PRV_M;
2936 break;
2937 case CSR_STOPEI:
2938 if (env->mvien & MIP_SEIP && env->priv == PRV_S) {
2939 goto done;
2940 }
2941 priv = PRV_S;
2942 break;
2943 case CSR_VSTOPEI:
2944 priv = PRV_S;
2945 virt = true;
2946 break;
2947 default:
2948 goto done;
2949 };
2950
2951 /* IMSIC CSRs only available when machine implements IMSIC. */
2952 if (!env->aia_ireg_rmw_fn[priv]) {
2953 goto done;
2954 }
2955
2956 /* Find the selected guest interrupt file */
2957 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
2958
2959 /* Selected guest interrupt file should be valid */
2960 if (virt && (!vgein || env->geilen < vgein)) {
2961 goto done;
2962 }
2963
2964 /* Call machine specific IMSIC register emulation for TOPEI */
2965 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
2966 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, priv, virt, vgein,
2967 riscv_cpu_mxl_bits(env)),
2968 val, new_val, wr_mask);
2969
2970 done:
2971 if (ret) {
2972 return (env->virt_enabled && virt) ?
2973 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2974 }
2975 return RISCV_EXCP_NONE;
2976 }
2977
read_mtvec(CPURISCVState * env,int csrno,target_ulong * val)2978 static RISCVException read_mtvec(CPURISCVState *env, int csrno,
2979 target_ulong *val)
2980 {
2981 *val = env->mtvec;
2982 return RISCV_EXCP_NONE;
2983 }
2984
write_mtvec(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)2985 static RISCVException write_mtvec(CPURISCVState *env, int csrno,
2986 target_ulong val, uintptr_t ra)
2987 {
2988 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
2989 if ((val & 3) < 2) {
2990 env->mtvec = val;
2991 } else {
2992 qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n");
2993 }
2994 return RISCV_EXCP_NONE;
2995 }
2996
read_mcountinhibit(CPURISCVState * env,int csrno,target_ulong * val)2997 static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno,
2998 target_ulong *val)
2999 {
3000 *val = env->mcountinhibit;
3001 return RISCV_EXCP_NONE;
3002 }
3003
write_mcountinhibit(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3004 static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
3005 target_ulong val, uintptr_t ra)
3006 {
3007 int cidx;
3008 PMUCTRState *counter;
3009 RISCVCPU *cpu = env_archcpu(env);
3010 uint32_t present_ctrs = cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_IR;
3011 target_ulong updated_ctrs = (env->mcountinhibit ^ val) & present_ctrs;
3012 uint64_t mhpmctr_val, prev_count, curr_count;
3013
3014 /* WARL register - disable unavailable counters; TM bit is always 0 */
3015 env->mcountinhibit = val & present_ctrs;
3016
3017 /* Check if any other counter is also monitoring cycles/instructions */
3018 for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
3019 if (!(updated_ctrs & BIT(cidx)) ||
3020 (!riscv_pmu_ctr_monitor_cycles(env, cidx) &&
3021 !riscv_pmu_ctr_monitor_instructions(env, cidx))) {
3022 continue;
3023 }
3024
3025 counter = &env->pmu_ctrs[cidx];
3026
3027 if (!get_field(env->mcountinhibit, BIT(cidx))) {
3028 counter->mhpmcounter_prev =
3029 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false);
3030 if (riscv_cpu_mxl(env) == MXL_RV32) {
3031 counter->mhpmcounterh_prev =
3032 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true);
3033 }
3034
3035 if (cidx > 2) {
3036 mhpmctr_val = counter->mhpmcounter_val;
3037 if (riscv_cpu_mxl(env) == MXL_RV32) {
3038 mhpmctr_val = mhpmctr_val |
3039 ((uint64_t)counter->mhpmcounterh_val << 32);
3040 }
3041 riscv_pmu_setup_timer(env, mhpmctr_val, cidx);
3042 }
3043 } else {
3044 curr_count = riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false);
3045
3046 mhpmctr_val = counter->mhpmcounter_val;
3047 prev_count = counter->mhpmcounter_prev;
3048 if (riscv_cpu_mxl(env) == MXL_RV32) {
3049 uint64_t tmp =
3050 riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true);
3051
3052 curr_count = curr_count | (tmp << 32);
3053 mhpmctr_val = mhpmctr_val |
3054 ((uint64_t)counter->mhpmcounterh_val << 32);
3055 prev_count = prev_count |
3056 ((uint64_t)counter->mhpmcounterh_prev << 32);
3057 }
3058
3059 /* Adjust the counter for later reads. */
3060 mhpmctr_val = curr_count - prev_count + mhpmctr_val;
3061 counter->mhpmcounter_val = mhpmctr_val;
3062 if (riscv_cpu_mxl(env) == MXL_RV32) {
3063 counter->mhpmcounterh_val = mhpmctr_val >> 32;
3064 }
3065 }
3066 }
3067
3068 return RISCV_EXCP_NONE;
3069 }
3070
read_scountinhibit(CPURISCVState * env,int csrno,target_ulong * val)3071 static RISCVException read_scountinhibit(CPURISCVState *env, int csrno,
3072 target_ulong *val)
3073 {
3074 /* S-mode can only access the bits delegated by M-mode */
3075 *val = env->mcountinhibit & env->mcounteren;
3076 return RISCV_EXCP_NONE;
3077 }
3078
write_scountinhibit(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3079 static RISCVException write_scountinhibit(CPURISCVState *env, int csrno,
3080 target_ulong val, uintptr_t ra)
3081 {
3082 return write_mcountinhibit(env, csrno, val & env->mcounteren, ra);
3083 }
3084
read_mcounteren(CPURISCVState * env,int csrno,target_ulong * val)3085 static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
3086 target_ulong *val)
3087 {
3088 *val = env->mcounteren;
3089 return RISCV_EXCP_NONE;
3090 }
3091
write_mcounteren(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3092 static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
3093 target_ulong val, uintptr_t ra)
3094 {
3095 RISCVCPU *cpu = env_archcpu(env);
3096
3097 /* WARL register - disable unavailable counters */
3098 env->mcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
3099 COUNTEREN_IR);
3100 return RISCV_EXCP_NONE;
3101 }
3102
3103 /* Machine Trap Handling */
read_mscratch_i128(CPURISCVState * env,int csrno,Int128 * val)3104 static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno,
3105 Int128 *val)
3106 {
3107 *val = int128_make128(env->mscratch, env->mscratchh);
3108 return RISCV_EXCP_NONE;
3109 }
3110
write_mscratch_i128(CPURISCVState * env,int csrno,Int128 val)3111 static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno,
3112 Int128 val)
3113 {
3114 env->mscratch = int128_getlo(val);
3115 env->mscratchh = int128_gethi(val);
3116 return RISCV_EXCP_NONE;
3117 }
3118
read_mscratch(CPURISCVState * env,int csrno,target_ulong * val)3119 static RISCVException read_mscratch(CPURISCVState *env, int csrno,
3120 target_ulong *val)
3121 {
3122 *val = env->mscratch;
3123 return RISCV_EXCP_NONE;
3124 }
3125
write_mscratch(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3126 static RISCVException write_mscratch(CPURISCVState *env, int csrno,
3127 target_ulong val, uintptr_t ra)
3128 {
3129 env->mscratch = val;
3130 return RISCV_EXCP_NONE;
3131 }
3132
read_mepc(CPURISCVState * env,int csrno,target_ulong * val)3133 static RISCVException read_mepc(CPURISCVState *env, int csrno,
3134 target_ulong *val)
3135 {
3136 *val = env->mepc & get_xepc_mask(env);
3137 return RISCV_EXCP_NONE;
3138 }
3139
write_mepc(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3140 static RISCVException write_mepc(CPURISCVState *env, int csrno,
3141 target_ulong val, uintptr_t ra)
3142 {
3143 env->mepc = val & get_xepc_mask(env);
3144 return RISCV_EXCP_NONE;
3145 }
3146
read_mcause(CPURISCVState * env,int csrno,target_ulong * val)3147 static RISCVException read_mcause(CPURISCVState *env, int csrno,
3148 target_ulong *val)
3149 {
3150 *val = env->mcause;
3151 return RISCV_EXCP_NONE;
3152 }
3153
write_mcause(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3154 static RISCVException write_mcause(CPURISCVState *env, int csrno,
3155 target_ulong val, uintptr_t ra)
3156 {
3157 env->mcause = val;
3158 return RISCV_EXCP_NONE;
3159 }
3160
read_mtval(CPURISCVState * env,int csrno,target_ulong * val)3161 static RISCVException read_mtval(CPURISCVState *env, int csrno,
3162 target_ulong *val)
3163 {
3164 *val = env->mtval;
3165 return RISCV_EXCP_NONE;
3166 }
3167
write_mtval(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3168 static RISCVException write_mtval(CPURISCVState *env, int csrno,
3169 target_ulong val, uintptr_t ra)
3170 {
3171 env->mtval = val;
3172 return RISCV_EXCP_NONE;
3173 }
3174
3175 /* Execution environment configuration setup */
read_menvcfg(CPURISCVState * env,int csrno,target_ulong * val)3176 static RISCVException read_menvcfg(CPURISCVState *env, int csrno,
3177 target_ulong *val)
3178 {
3179 *val = env->menvcfg;
3180 return RISCV_EXCP_NONE;
3181 }
3182
3183 static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
3184 target_ulong val, uintptr_t ra);
write_menvcfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3185 static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
3186 target_ulong val, uintptr_t ra)
3187 {
3188 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
3189 uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE |
3190 MENVCFG_CBZE | MENVCFG_CDE;
3191 bool stce_changed = false;
3192
3193 if (riscv_cpu_mxl(env) == MXL_RV64) {
3194 mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
3195 (cfg->ext_sstc ? MENVCFG_STCE : 0) |
3196 (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) |
3197 (cfg->ext_svadu ? MENVCFG_ADUE : 0) |
3198 (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0);
3199
3200 if (env_archcpu(env)->cfg.ext_zicfilp) {
3201 mask |= MENVCFG_LPE;
3202 }
3203
3204 if (env_archcpu(env)->cfg.ext_zicfiss) {
3205 mask |= MENVCFG_SSE;
3206 }
3207
3208 /* Update PMM field only if the value is valid according to Zjpm v1.0 */
3209 if (env_archcpu(env)->cfg.ext_smnpm &&
3210 get_field(val, MENVCFG_PMM) != PMM_FIELD_RESERVED) {
3211 mask |= MENVCFG_PMM;
3212 }
3213
3214 if ((val & MENVCFG_DTE) == 0) {
3215 env->mstatus &= ~MSTATUS_SDT;
3216 }
3217
3218 if (cfg->ext_sstc &&
3219 ((env->menvcfg & MENVCFG_STCE) != (val & MENVCFG_STCE))) {
3220 stce_changed = true;
3221 }
3222 }
3223 env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
3224
3225 if (stce_changed) {
3226 riscv_timer_stce_changed(env, true, !!(val & MENVCFG_STCE));
3227 }
3228
3229 return write_henvcfg(env, CSR_HENVCFG, env->henvcfg, ra);
3230 }
3231
read_menvcfgh(CPURISCVState * env,int csrno,target_ulong * val)3232 static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
3233 target_ulong *val)
3234 {
3235 *val = env->menvcfg >> 32;
3236 return RISCV_EXCP_NONE;
3237 }
3238
3239 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
3240 target_ulong val, uintptr_t ra);
write_menvcfgh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3241 static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
3242 target_ulong val, uintptr_t ra)
3243 {
3244 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
3245 uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
3246 (cfg->ext_sstc ? MENVCFG_STCE : 0) |
3247 (cfg->ext_svadu ? MENVCFG_ADUE : 0) |
3248 (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) |
3249 (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0);
3250 uint64_t valh = (uint64_t)val << 32;
3251 bool stce_changed = false;
3252
3253 if (cfg->ext_sstc &&
3254 ((env->menvcfg & MENVCFG_STCE) != (valh & MENVCFG_STCE))) {
3255 stce_changed = true;
3256 }
3257
3258 if ((valh & MENVCFG_DTE) == 0) {
3259 env->mstatus &= ~MSTATUS_SDT;
3260 }
3261
3262 env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
3263
3264 if (stce_changed) {
3265 riscv_timer_stce_changed(env, true, !!(valh & MENVCFG_STCE));
3266 }
3267
3268 return write_henvcfgh(env, CSR_HENVCFGH, env->henvcfg >> 32, ra);
3269 }
3270
read_senvcfg(CPURISCVState * env,int csrno,target_ulong * val)3271 static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
3272 target_ulong *val)
3273 {
3274 RISCVException ret;
3275
3276 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3277 if (ret != RISCV_EXCP_NONE) {
3278 return ret;
3279 }
3280
3281 *val = env->senvcfg;
3282 return RISCV_EXCP_NONE;
3283 }
3284
write_senvcfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3285 static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
3286 target_ulong val, uintptr_t ra)
3287 {
3288 uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE;
3289 RISCVException ret;
3290 /* Update PMM field only if the value is valid according to Zjpm v1.0 */
3291 if (env_archcpu(env)->cfg.ext_ssnpm &&
3292 riscv_cpu_mxl(env) == MXL_RV64 &&
3293 get_field(val, SENVCFG_PMM) != PMM_FIELD_RESERVED) {
3294 mask |= SENVCFG_PMM;
3295 }
3296
3297 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3298 if (ret != RISCV_EXCP_NONE) {
3299 return ret;
3300 }
3301
3302 if (env_archcpu(env)->cfg.ext_zicfilp) {
3303 mask |= SENVCFG_LPE;
3304 }
3305
3306 /* Higher mode SSE must be ON for next-less mode SSE to be ON */
3307 if (env_archcpu(env)->cfg.ext_zicfiss &&
3308 get_field(env->menvcfg, MENVCFG_SSE) &&
3309 (env->virt_enabled ? get_field(env->henvcfg, HENVCFG_SSE) : true)) {
3310 mask |= SENVCFG_SSE;
3311 }
3312
3313 if (env_archcpu(env)->cfg.ext_svukte) {
3314 mask |= SENVCFG_UKTE;
3315 }
3316
3317 env->senvcfg = (env->senvcfg & ~mask) | (val & mask);
3318 return RISCV_EXCP_NONE;
3319 }
3320
read_henvcfg(CPURISCVState * env,int csrno,target_ulong * val)3321 static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
3322 target_ulong *val)
3323 {
3324 RISCVException ret;
3325
3326 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3327 if (ret != RISCV_EXCP_NONE) {
3328 return ret;
3329 }
3330
3331 /*
3332 * henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0
3333 * henvcfg.stce is read_only 0 when menvcfg.stce = 0
3334 * henvcfg.adue is read_only 0 when menvcfg.adue = 0
3335 * henvcfg.dte is read_only 0 when menvcfg.dte = 0
3336 */
3337 *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
3338 HENVCFG_DTE) | env->menvcfg);
3339 return RISCV_EXCP_NONE;
3340 }
3341
write_henvcfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3342 static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
3343 target_ulong val, uintptr_t ra)
3344 {
3345 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
3346 uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE;
3347 RISCVException ret;
3348 bool stce_changed = false;
3349
3350 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3351 if (ret != RISCV_EXCP_NONE) {
3352 return ret;
3353 }
3354
3355 if (riscv_cpu_mxl(env) == MXL_RV64) {
3356 mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
3357 HENVCFG_DTE);
3358
3359 if (env_archcpu(env)->cfg.ext_zicfilp) {
3360 mask |= HENVCFG_LPE;
3361 }
3362
3363 /* H can light up SSE for VS only if HS had it from menvcfg */
3364 if (env_archcpu(env)->cfg.ext_zicfiss &&
3365 get_field(env->menvcfg, MENVCFG_SSE)) {
3366 mask |= HENVCFG_SSE;
3367 }
3368
3369 /* Update PMM field only if the value is valid according to Zjpm v1.0 */
3370 if (env_archcpu(env)->cfg.ext_ssnpm &&
3371 get_field(val, HENVCFG_PMM) != PMM_FIELD_RESERVED) {
3372 mask |= HENVCFG_PMM;
3373 }
3374
3375 if (cfg->ext_sstc &&
3376 ((env->henvcfg & HENVCFG_STCE) != (val & HENVCFG_STCE))) {
3377 stce_changed = true;
3378 }
3379 }
3380
3381 env->henvcfg = val & mask;
3382 if ((env->henvcfg & HENVCFG_DTE) == 0) {
3383 env->vsstatus &= ~MSTATUS_SDT;
3384 }
3385
3386 if (stce_changed) {
3387 riscv_timer_stce_changed(env, false, !!(val & HENVCFG_STCE));
3388 }
3389
3390 return RISCV_EXCP_NONE;
3391 }
3392
read_henvcfgh(CPURISCVState * env,int csrno,target_ulong * val)3393 static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
3394 target_ulong *val)
3395 {
3396 RISCVException ret;
3397
3398 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3399 if (ret != RISCV_EXCP_NONE) {
3400 return ret;
3401 }
3402
3403 *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
3404 HENVCFG_DTE) | env->menvcfg)) >> 32;
3405 return RISCV_EXCP_NONE;
3406 }
3407
write_henvcfgh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3408 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
3409 target_ulong val, uintptr_t ra)
3410 {
3411 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
3412 uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE |
3413 HENVCFG_ADUE | HENVCFG_DTE);
3414 uint64_t valh = (uint64_t)val << 32;
3415 RISCVException ret;
3416 bool stce_changed = false;
3417
3418 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
3419 if (ret != RISCV_EXCP_NONE) {
3420 return ret;
3421 }
3422
3423 if (cfg->ext_sstc &&
3424 ((env->henvcfg & HENVCFG_STCE) != (valh & HENVCFG_STCE))) {
3425 stce_changed = true;
3426 }
3427
3428 env->henvcfg = (env->henvcfg & 0xFFFFFFFF) | (valh & mask);
3429 if ((env->henvcfg & HENVCFG_DTE) == 0) {
3430 env->vsstatus &= ~MSTATUS_SDT;
3431 }
3432
3433 if (stce_changed) {
3434 riscv_timer_stce_changed(env, false, !!(val & HENVCFG_STCE));
3435 }
3436
3437 return RISCV_EXCP_NONE;
3438 }
3439
read_mstateen(CPURISCVState * env,int csrno,target_ulong * val)3440 static RISCVException read_mstateen(CPURISCVState *env, int csrno,
3441 target_ulong *val)
3442 {
3443 *val = env->mstateen[csrno - CSR_MSTATEEN0];
3444
3445 return RISCV_EXCP_NONE;
3446 }
3447
write_mstateen(CPURISCVState * env,int csrno,uint64_t wr_mask,target_ulong new_val)3448 static RISCVException write_mstateen(CPURISCVState *env, int csrno,
3449 uint64_t wr_mask, target_ulong new_val)
3450 {
3451 uint64_t *reg;
3452
3453 reg = &env->mstateen[csrno - CSR_MSTATEEN0];
3454 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
3455
3456 return RISCV_EXCP_NONE;
3457 }
3458
write_mstateen0(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3459 static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
3460 target_ulong new_val, uintptr_t ra)
3461 {
3462 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
3463 if (!riscv_has_ext(env, RVF)) {
3464 wr_mask |= SMSTATEEN0_FCSR;
3465 }
3466
3467 if (env->priv_ver >= PRIV_VERSION_1_13_0) {
3468 wr_mask |= SMSTATEEN0_P1P13;
3469 }
3470
3471 if (riscv_cpu_cfg(env)->ext_smaia || riscv_cpu_cfg(env)->ext_smcsrind) {
3472 wr_mask |= SMSTATEEN0_SVSLCT;
3473 }
3474
3475 /*
3476 * As per the AIA specification, SMSTATEEN0_IMSIC is valid only if IMSIC is
3477 * implemented. However, that information is with MachineState and we can't
3478 * figure that out in csr.c. Just enable if Smaia is available.
3479 */
3480 if (riscv_cpu_cfg(env)->ext_smaia) {
3481 wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
3482 }
3483
3484 if (riscv_cpu_cfg(env)->ext_ssctr) {
3485 wr_mask |= SMSTATEEN0_CTR;
3486 }
3487
3488 return write_mstateen(env, csrno, wr_mask, new_val);
3489 }
3490
write_mstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3491 static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno,
3492 target_ulong new_val, uintptr_t ra)
3493 {
3494 return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
3495 }
3496
read_mstateenh(CPURISCVState * env,int csrno,target_ulong * val)3497 static RISCVException read_mstateenh(CPURISCVState *env, int csrno,
3498 target_ulong *val)
3499 {
3500 *val = env->mstateen[csrno - CSR_MSTATEEN0H] >> 32;
3501
3502 return RISCV_EXCP_NONE;
3503 }
3504
write_mstateenh(CPURISCVState * env,int csrno,uint64_t wr_mask,target_ulong new_val)3505 static RISCVException write_mstateenh(CPURISCVState *env, int csrno,
3506 uint64_t wr_mask, target_ulong new_val)
3507 {
3508 uint64_t *reg, val;
3509
3510 reg = &env->mstateen[csrno - CSR_MSTATEEN0H];
3511 val = (uint64_t)new_val << 32;
3512 val |= *reg & 0xFFFFFFFF;
3513 *reg = (*reg & ~wr_mask) | (val & wr_mask);
3514
3515 return RISCV_EXCP_NONE;
3516 }
3517
write_mstateen0h(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3518 static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
3519 target_ulong new_val, uintptr_t ra)
3520 {
3521 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
3522
3523 if (env->priv_ver >= PRIV_VERSION_1_13_0) {
3524 wr_mask |= SMSTATEEN0_P1P13;
3525 }
3526
3527 if (riscv_cpu_cfg(env)->ext_ssctr) {
3528 wr_mask |= SMSTATEEN0_CTR;
3529 }
3530
3531 return write_mstateenh(env, csrno, wr_mask, new_val);
3532 }
3533
write_mstateenh_1_3(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3534 static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno,
3535 target_ulong new_val, uintptr_t ra)
3536 {
3537 return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
3538 }
3539
read_hstateen(CPURISCVState * env,int csrno,target_ulong * val)3540 static RISCVException read_hstateen(CPURISCVState *env, int csrno,
3541 target_ulong *val)
3542 {
3543 int index = csrno - CSR_HSTATEEN0;
3544
3545 *val = env->hstateen[index] & env->mstateen[index];
3546
3547 return RISCV_EXCP_NONE;
3548 }
3549
write_hstateen(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)3550 static RISCVException write_hstateen(CPURISCVState *env, int csrno,
3551 uint64_t mask, target_ulong new_val)
3552 {
3553 int index = csrno - CSR_HSTATEEN0;
3554 uint64_t *reg, wr_mask;
3555
3556 reg = &env->hstateen[index];
3557 wr_mask = env->mstateen[index] & mask;
3558 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
3559
3560 return RISCV_EXCP_NONE;
3561 }
3562
write_hstateen0(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3563 static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
3564 target_ulong new_val, uintptr_t ra)
3565 {
3566 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
3567
3568 if (!riscv_has_ext(env, RVF)) {
3569 wr_mask |= SMSTATEEN0_FCSR;
3570 }
3571
3572 if (riscv_cpu_cfg(env)->ext_ssaia || riscv_cpu_cfg(env)->ext_sscsrind) {
3573 wr_mask |= SMSTATEEN0_SVSLCT;
3574 }
3575
3576 /*
3577 * As per the AIA specification, SMSTATEEN0_IMSIC is valid only if IMSIC is
3578 * implemented. However, that information is with MachineState and we can't
3579 * figure that out in csr.c. Just enable if Ssaia is available.
3580 */
3581 if (riscv_cpu_cfg(env)->ext_ssaia) {
3582 wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
3583 }
3584
3585 if (riscv_cpu_cfg(env)->ext_ssctr) {
3586 wr_mask |= SMSTATEEN0_CTR;
3587 }
3588
3589 return write_hstateen(env, csrno, wr_mask, new_val);
3590 }
3591
write_hstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3592 static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno,
3593 target_ulong new_val, uintptr_t ra)
3594 {
3595 return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
3596 }
3597
read_hstateenh(CPURISCVState * env,int csrno,target_ulong * val)3598 static RISCVException read_hstateenh(CPURISCVState *env, int csrno,
3599 target_ulong *val)
3600 {
3601 int index = csrno - CSR_HSTATEEN0H;
3602
3603 *val = (env->hstateen[index] >> 32) & (env->mstateen[index] >> 32);
3604
3605 return RISCV_EXCP_NONE;
3606 }
3607
write_hstateenh(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)3608 static RISCVException write_hstateenh(CPURISCVState *env, int csrno,
3609 uint64_t mask, target_ulong new_val)
3610 {
3611 int index = csrno - CSR_HSTATEEN0H;
3612 uint64_t *reg, wr_mask, val;
3613
3614 reg = &env->hstateen[index];
3615 val = (uint64_t)new_val << 32;
3616 val |= *reg & 0xFFFFFFFF;
3617 wr_mask = env->mstateen[index] & mask;
3618 *reg = (*reg & ~wr_mask) | (val & wr_mask);
3619
3620 return RISCV_EXCP_NONE;
3621 }
3622
write_hstateen0h(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3623 static RISCVException write_hstateen0h(CPURISCVState *env, int csrno,
3624 target_ulong new_val, uintptr_t ra)
3625 {
3626 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
3627
3628 if (riscv_cpu_cfg(env)->ext_ssctr) {
3629 wr_mask |= SMSTATEEN0_CTR;
3630 }
3631
3632 return write_hstateenh(env, csrno, wr_mask, new_val);
3633 }
3634
write_hstateenh_1_3(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3635 static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno,
3636 target_ulong new_val, uintptr_t ra)
3637 {
3638 return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
3639 }
3640
read_sstateen(CPURISCVState * env,int csrno,target_ulong * val)3641 static RISCVException read_sstateen(CPURISCVState *env, int csrno,
3642 target_ulong *val)
3643 {
3644 bool virt = env->virt_enabled;
3645 int index = csrno - CSR_SSTATEEN0;
3646
3647 *val = env->sstateen[index] & env->mstateen[index];
3648 if (virt) {
3649 *val &= env->hstateen[index];
3650 }
3651
3652 return RISCV_EXCP_NONE;
3653 }
3654
write_sstateen(CPURISCVState * env,int csrno,uint64_t mask,target_ulong new_val)3655 static RISCVException write_sstateen(CPURISCVState *env, int csrno,
3656 uint64_t mask, target_ulong new_val)
3657 {
3658 bool virt = env->virt_enabled;
3659 int index = csrno - CSR_SSTATEEN0;
3660 uint64_t wr_mask;
3661 uint64_t *reg;
3662
3663 wr_mask = env->mstateen[index] & mask;
3664 if (virt) {
3665 wr_mask &= env->hstateen[index];
3666 }
3667
3668 reg = &env->sstateen[index];
3669 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
3670
3671 return RISCV_EXCP_NONE;
3672 }
3673
write_sstateen0(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3674 static RISCVException write_sstateen0(CPURISCVState *env, int csrno,
3675 target_ulong new_val, uintptr_t ra)
3676 {
3677 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
3678
3679 if (!riscv_has_ext(env, RVF)) {
3680 wr_mask |= SMSTATEEN0_FCSR;
3681 }
3682
3683 return write_sstateen(env, csrno, wr_mask, new_val);
3684 }
3685
write_sstateen_1_3(CPURISCVState * env,int csrno,target_ulong new_val,uintptr_t ra)3686 static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno,
3687 target_ulong new_val, uintptr_t ra)
3688 {
3689 return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
3690 }
3691
rmw_mip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3692 static RISCVException rmw_mip64(CPURISCVState *env, int csrno,
3693 uint64_t *ret_val,
3694 uint64_t new_val, uint64_t wr_mask)
3695 {
3696 uint64_t old_mip, mask = wr_mask & delegable_ints;
3697 uint32_t gin;
3698
3699 if (mask & MIP_SEIP) {
3700 env->software_seip = new_val & MIP_SEIP;
3701 new_val |= env->external_seip * MIP_SEIP;
3702 }
3703
3704 if (riscv_cpu_cfg(env)->ext_sstc && (env->priv == PRV_M) &&
3705 get_field(env->menvcfg, MENVCFG_STCE)) {
3706 /* sstc extension forbids STIP & VSTIP to be writeable in mip */
3707
3708 /* STIP is not writable when menvcfg.STCE is enabled. */
3709 mask = mask & ~MIP_STIP;
3710
3711 /* VSTIP is not writable when both [mh]envcfg.STCE are enabled. */
3712 if (get_field(env->henvcfg, HENVCFG_STCE)) {
3713 mask = mask & ~MIP_VSTIP;
3714 }
3715 }
3716
3717 if (mask) {
3718 old_mip = riscv_cpu_update_mip(env, mask, (new_val & mask));
3719 } else {
3720 old_mip = env->mip;
3721 }
3722
3723 if (csrno != CSR_HVIP) {
3724 gin = get_field(env->hstatus, HSTATUS_VGEIN);
3725 old_mip |= (env->hgeip & ((target_ulong)1 << gin)) ? MIP_VSEIP : 0;
3726 old_mip |= env->vstime_irq ? MIP_VSTIP : 0;
3727 }
3728
3729 if (ret_val) {
3730 *ret_val = old_mip;
3731 }
3732
3733 return RISCV_EXCP_NONE;
3734 }
3735
rmw_mip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3736 static RISCVException rmw_mip(CPURISCVState *env, int csrno,
3737 target_ulong *ret_val,
3738 target_ulong new_val, target_ulong wr_mask)
3739 {
3740 uint64_t rval;
3741 RISCVException ret;
3742
3743 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask);
3744 if (ret_val) {
3745 *ret_val = rval;
3746 }
3747
3748 return ret;
3749 }
3750
rmw_miph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3751 static RISCVException rmw_miph(CPURISCVState *env, int csrno,
3752 target_ulong *ret_val,
3753 target_ulong new_val, target_ulong wr_mask)
3754 {
3755 uint64_t rval;
3756 RISCVException ret;
3757
3758 ret = rmw_mip64(env, csrno, &rval,
3759 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3760 if (ret_val) {
3761 *ret_val = rval >> 32;
3762 }
3763
3764 return ret;
3765 }
3766
3767 /*
3768 * The function is written for two use-cases:
3769 * 1- To access mvip csr as is for m-mode access.
3770 * 2- To access sip as a combination of mip and mvip for s-mode.
3771 *
3772 * Both report bits 1, 5, 9 and 13:63 but with the exception of
3773 * STIP being read-only zero in case of mvip when sstc extension
3774 * is present.
3775 * Also, sip needs to be read-only zero when both mideleg[i] and
3776 * mvien[i] are zero but mvip needs to be an alias of mip.
3777 */
rmw_mvip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3778 static RISCVException rmw_mvip64(CPURISCVState *env, int csrno,
3779 uint64_t *ret_val,
3780 uint64_t new_val, uint64_t wr_mask)
3781 {
3782 RISCVCPU *cpu = env_archcpu(env);
3783 target_ulong ret_mip = 0;
3784 RISCVException ret;
3785 uint64_t old_mvip;
3786
3787 /*
3788 * mideleg[i] mvien[i]
3789 * 0 0 No delegation. mvip[i] is alias of mip[i].
3790 * 0 1 mvip[i] becomes source of interrupt, mip bypassed.
3791 * 1 X mip[i] is source of interrupt and mvip[i] aliases
3792 * mip[i].
3793 *
3794 * So alias condition would be for bits:
3795 * ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (mideleg | ~mvien)) |
3796 * (!sstc & MIP_STIP)
3797 *
3798 * Non-alias condition will be for bits:
3799 * (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (~mideleg & mvien)
3800 *
3801 * alias_mask denotes the bits that come from mip nalias_mask denotes bits
3802 * that come from hvip.
3803 */
3804 uint64_t alias_mask = ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
3805 (env->mideleg | ~env->mvien)) | MIP_STIP;
3806 uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
3807 (~env->mideleg & env->mvien);
3808 uint64_t wr_mask_mvip;
3809 uint64_t wr_mask_mip;
3810
3811 /*
3812 * mideleg[i] mvien[i]
3813 * 0 0 sip[i] read-only zero.
3814 * 0 1 sip[i] alias of mvip[i].
3815 * 1 X sip[i] alias of mip[i].
3816 *
3817 * Both alias and non-alias mask remain same for sip except for bits
3818 * which are zero in both mideleg and mvien.
3819 */
3820 if (csrno == CSR_SIP) {
3821 /* Remove bits that are zero in both mideleg and mvien. */
3822 alias_mask &= (env->mideleg | env->mvien);
3823 nalias_mask &= (env->mideleg | env->mvien);
3824 }
3825
3826 /*
3827 * If sstc is present, mvip.STIP is not an alias of mip.STIP so clear
3828 * that our in mip returned value.
3829 */
3830 if (cpu->cfg.ext_sstc && (env->priv == PRV_M) &&
3831 get_field(env->menvcfg, MENVCFG_STCE)) {
3832 alias_mask &= ~MIP_STIP;
3833 }
3834
3835 wr_mask_mip = wr_mask & alias_mask & mvip_writable_mask;
3836 wr_mask_mvip = wr_mask & nalias_mask & mvip_writable_mask;
3837
3838 /*
3839 * For bits set in alias_mask, mvip needs to be alias of mip, so forward
3840 * this to rmw_mip.
3841 */
3842 ret = rmw_mip(env, CSR_MIP, &ret_mip, new_val, wr_mask_mip);
3843 if (ret != RISCV_EXCP_NONE) {
3844 return ret;
3845 }
3846
3847 old_mvip = env->mvip;
3848
3849 /*
3850 * Write to mvip. Update only non-alias bits. Alias bits were updated
3851 * in mip in rmw_mip above.
3852 */
3853 if (wr_mask_mvip) {
3854 env->mvip = (env->mvip & ~wr_mask_mvip) | (new_val & wr_mask_mvip);
3855
3856 /*
3857 * Given mvip is separate source from mip, we need to trigger interrupt
3858 * from here separately. Normally this happen from riscv_cpu_update_mip.
3859 */
3860 riscv_cpu_interrupt(env);
3861 }
3862
3863 if (ret_val) {
3864 ret_mip &= alias_mask;
3865 old_mvip &= nalias_mask;
3866
3867 *ret_val = old_mvip | ret_mip;
3868 }
3869
3870 return RISCV_EXCP_NONE;
3871 }
3872
rmw_mvip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3873 static RISCVException rmw_mvip(CPURISCVState *env, int csrno,
3874 target_ulong *ret_val,
3875 target_ulong new_val, target_ulong wr_mask)
3876 {
3877 uint64_t rval;
3878 RISCVException ret;
3879
3880 ret = rmw_mvip64(env, csrno, &rval, new_val, wr_mask);
3881 if (ret_val) {
3882 *ret_val = rval;
3883 }
3884
3885 return ret;
3886 }
3887
rmw_mviph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)3888 static RISCVException rmw_mviph(CPURISCVState *env, int csrno,
3889 target_ulong *ret_val,
3890 target_ulong new_val, target_ulong wr_mask)
3891 {
3892 uint64_t rval;
3893 RISCVException ret;
3894
3895 ret = rmw_mvip64(env, csrno, &rval,
3896 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
3897 if (ret_val) {
3898 *ret_val = rval >> 32;
3899 }
3900
3901 return ret;
3902 }
3903
3904 /* Supervisor Trap Setup */
read_sstatus_i128(CPURISCVState * env,int csrno,Int128 * val)3905 static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
3906 Int128 *val)
3907 {
3908 uint64_t mask = sstatus_v1_10_mask;
3909 uint64_t sstatus = env->mstatus & mask;
3910 if (env->xl != MXL_RV32 || env->debugger) {
3911 mask |= SSTATUS64_UXL;
3912 }
3913 if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
3914 mask |= SSTATUS_SDT;
3915 }
3916
3917 if (env_archcpu(env)->cfg.ext_zicfilp) {
3918 mask |= SSTATUS_SPELP;
3919 }
3920
3921 *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
3922 return RISCV_EXCP_NONE;
3923 }
3924
read_sstatus(CPURISCVState * env,int csrno,target_ulong * val)3925 static RISCVException read_sstatus(CPURISCVState *env, int csrno,
3926 target_ulong *val)
3927 {
3928 target_ulong mask = (sstatus_v1_10_mask);
3929 if (env->xl != MXL_RV32 || env->debugger) {
3930 mask |= SSTATUS64_UXL;
3931 }
3932
3933 if (env_archcpu(env)->cfg.ext_zicfilp) {
3934 mask |= SSTATUS_SPELP;
3935 }
3936 if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
3937 mask |= SSTATUS_SDT;
3938 }
3939 /* TODO: Use SXL not MXL. */
3940 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
3941 return RISCV_EXCP_NONE;
3942 }
3943
write_sstatus(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)3944 static RISCVException write_sstatus(CPURISCVState *env, int csrno,
3945 target_ulong val, uintptr_t ra)
3946 {
3947 target_ulong mask = (sstatus_v1_10_mask);
3948
3949 if (env->xl != MXL_RV32 || env->debugger) {
3950 if ((val & SSTATUS64_UXL) != 0) {
3951 mask |= SSTATUS64_UXL;
3952 }
3953 }
3954
3955 if (env_archcpu(env)->cfg.ext_zicfilp) {
3956 mask |= SSTATUS_SPELP;
3957 }
3958 if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
3959 mask |= SSTATUS_SDT;
3960 }
3961 target_ulong newval = (env->mstatus & ~mask) | (val & mask);
3962 return write_mstatus(env, CSR_MSTATUS, newval, ra);
3963 }
3964
rmw_vsie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)3965 static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
3966 uint64_t *ret_val,
3967 uint64_t new_val, uint64_t wr_mask)
3968 {
3969 uint64_t alias_mask = (LOCAL_INTERRUPTS | VS_MODE_INTERRUPTS) &
3970 env->hideleg;
3971 uint64_t nalias_mask = LOCAL_INTERRUPTS & (~env->hideleg & env->hvien);
3972 uint64_t rval, rval_vs, vsbits;
3973 uint64_t wr_mask_vsie;
3974 uint64_t wr_mask_mie;
3975 RISCVException ret;
3976
3977 /* Bring VS-level bits to correct position */
3978 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
3979 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
3980 new_val |= vsbits << 1;
3981
3982 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
3983 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
3984 wr_mask |= vsbits << 1;
3985
3986 wr_mask_mie = wr_mask & alias_mask;
3987 wr_mask_vsie = wr_mask & nalias_mask;
3988
3989 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask_mie);
3990
3991 rval_vs = env->vsie & nalias_mask;
3992 env->vsie = (env->vsie & ~wr_mask_vsie) | (new_val & wr_mask_vsie);
3993
3994 if (ret_val) {
3995 rval &= alias_mask;
3996 vsbits = rval & VS_MODE_INTERRUPTS;
3997 rval &= ~VS_MODE_INTERRUPTS;
3998 *ret_val = rval | (vsbits >> 1) | rval_vs;
3999 }
4000
4001 return ret;
4002 }
4003
rmw_vsie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4004 static RISCVException rmw_vsie(CPURISCVState *env, int csrno,
4005 target_ulong *ret_val,
4006 target_ulong new_val, target_ulong wr_mask)
4007 {
4008 uint64_t rval;
4009 RISCVException ret;
4010
4011 ret = rmw_vsie64(env, csrno, &rval, new_val, wr_mask);
4012 if (ret_val) {
4013 *ret_val = rval;
4014 }
4015
4016 return ret;
4017 }
4018
rmw_vsieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4019 static RISCVException rmw_vsieh(CPURISCVState *env, int csrno,
4020 target_ulong *ret_val,
4021 target_ulong new_val, target_ulong wr_mask)
4022 {
4023 uint64_t rval;
4024 RISCVException ret;
4025
4026 ret = rmw_vsie64(env, csrno, &rval,
4027 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4028 if (ret_val) {
4029 *ret_val = rval >> 32;
4030 }
4031
4032 return ret;
4033 }
4034
rmw_sie64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4035 static RISCVException rmw_sie64(CPURISCVState *env, int csrno,
4036 uint64_t *ret_val,
4037 uint64_t new_val, uint64_t wr_mask)
4038 {
4039 uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
4040 (~env->mideleg & env->mvien);
4041 uint64_t alias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & env->mideleg;
4042 uint64_t sie_mask = wr_mask & nalias_mask;
4043 RISCVException ret;
4044
4045 /*
4046 * mideleg[i] mvien[i]
4047 * 0 0 sie[i] read-only zero.
4048 * 0 1 sie[i] is a separate writable bit.
4049 * 1 X sie[i] alias of mie[i].
4050 *
4051 * Both alias and non-alias mask remain same for sip except for bits
4052 * which are zero in both mideleg and mvien.
4053 */
4054 if (env->virt_enabled) {
4055 if (env->hvictl & HVICTL_VTI) {
4056 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
4057 }
4058 ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask);
4059 if (ret_val) {
4060 *ret_val &= alias_mask;
4061 }
4062 } else {
4063 ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & alias_mask);
4064 if (ret_val) {
4065 *ret_val &= alias_mask;
4066 *ret_val |= env->sie & nalias_mask;
4067 }
4068
4069 env->sie = (env->sie & ~sie_mask) | (new_val & sie_mask);
4070 }
4071
4072 return ret;
4073 }
4074
rmw_sie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4075 static RISCVException rmw_sie(CPURISCVState *env, int csrno,
4076 target_ulong *ret_val,
4077 target_ulong new_val, target_ulong wr_mask)
4078 {
4079 uint64_t rval;
4080 RISCVException ret;
4081
4082 ret = rmw_sie64(env, csrno, &rval, new_val, wr_mask);
4083 if (ret == RISCV_EXCP_NONE && ret_val) {
4084 *ret_val = rval;
4085 }
4086
4087 return ret;
4088 }
4089
rmw_sieh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4090 static RISCVException rmw_sieh(CPURISCVState *env, int csrno,
4091 target_ulong *ret_val,
4092 target_ulong new_val, target_ulong wr_mask)
4093 {
4094 uint64_t rval;
4095 RISCVException ret;
4096
4097 ret = rmw_sie64(env, csrno, &rval,
4098 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4099 if (ret_val) {
4100 *ret_val = rval >> 32;
4101 }
4102
4103 return ret;
4104 }
4105
read_stvec(CPURISCVState * env,int csrno,target_ulong * val)4106 static RISCVException read_stvec(CPURISCVState *env, int csrno,
4107 target_ulong *val)
4108 {
4109 *val = env->stvec;
4110 return RISCV_EXCP_NONE;
4111 }
4112
write_stvec(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4113 static RISCVException write_stvec(CPURISCVState *env, int csrno,
4114 target_ulong val, uintptr_t ra)
4115 {
4116 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
4117 if ((val & 3) < 2) {
4118 env->stvec = val;
4119 } else {
4120 qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n");
4121 }
4122 return RISCV_EXCP_NONE;
4123 }
4124
read_scounteren(CPURISCVState * env,int csrno,target_ulong * val)4125 static RISCVException read_scounteren(CPURISCVState *env, int csrno,
4126 target_ulong *val)
4127 {
4128 *val = env->scounteren;
4129 return RISCV_EXCP_NONE;
4130 }
4131
write_scounteren(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4132 static RISCVException write_scounteren(CPURISCVState *env, int csrno,
4133 target_ulong val, uintptr_t ra)
4134 {
4135 RISCVCPU *cpu = env_archcpu(env);
4136
4137 /* WARL register - disable unavailable counters */
4138 env->scounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
4139 COUNTEREN_IR);
4140 return RISCV_EXCP_NONE;
4141 }
4142
4143 /* Supervisor Trap Handling */
read_sscratch_i128(CPURISCVState * env,int csrno,Int128 * val)4144 static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno,
4145 Int128 *val)
4146 {
4147 *val = int128_make128(env->sscratch, env->sscratchh);
4148 return RISCV_EXCP_NONE;
4149 }
4150
write_sscratch_i128(CPURISCVState * env,int csrno,Int128 val)4151 static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno,
4152 Int128 val)
4153 {
4154 env->sscratch = int128_getlo(val);
4155 env->sscratchh = int128_gethi(val);
4156 return RISCV_EXCP_NONE;
4157 }
4158
read_sscratch(CPURISCVState * env,int csrno,target_ulong * val)4159 static RISCVException read_sscratch(CPURISCVState *env, int csrno,
4160 target_ulong *val)
4161 {
4162 *val = env->sscratch;
4163 return RISCV_EXCP_NONE;
4164 }
4165
write_sscratch(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4166 static RISCVException write_sscratch(CPURISCVState *env, int csrno,
4167 target_ulong val, uintptr_t ra)
4168 {
4169 env->sscratch = val;
4170 return RISCV_EXCP_NONE;
4171 }
4172
read_sepc(CPURISCVState * env,int csrno,target_ulong * val)4173 static RISCVException read_sepc(CPURISCVState *env, int csrno,
4174 target_ulong *val)
4175 {
4176 *val = env->sepc & get_xepc_mask(env);
4177 return RISCV_EXCP_NONE;
4178 }
4179
write_sepc(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4180 static RISCVException write_sepc(CPURISCVState *env, int csrno,
4181 target_ulong val, uintptr_t ra)
4182 {
4183 env->sepc = val & get_xepc_mask(env);
4184 return RISCV_EXCP_NONE;
4185 }
4186
read_scause(CPURISCVState * env,int csrno,target_ulong * val)4187 static RISCVException read_scause(CPURISCVState *env, int csrno,
4188 target_ulong *val)
4189 {
4190 *val = env->scause;
4191 return RISCV_EXCP_NONE;
4192 }
4193
write_scause(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4194 static RISCVException write_scause(CPURISCVState *env, int csrno,
4195 target_ulong val, uintptr_t ra)
4196 {
4197 env->scause = val;
4198 return RISCV_EXCP_NONE;
4199 }
4200
read_stval(CPURISCVState * env,int csrno,target_ulong * val)4201 static RISCVException read_stval(CPURISCVState *env, int csrno,
4202 target_ulong *val)
4203 {
4204 *val = env->stval;
4205 return RISCV_EXCP_NONE;
4206 }
4207
write_stval(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4208 static RISCVException write_stval(CPURISCVState *env, int csrno,
4209 target_ulong val, uintptr_t ra)
4210 {
4211 env->stval = val;
4212 return RISCV_EXCP_NONE;
4213 }
4214
4215 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
4216 uint64_t *ret_val,
4217 uint64_t new_val, uint64_t wr_mask);
4218
rmw_vsip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4219 static RISCVException rmw_vsip64(CPURISCVState *env, int csrno,
4220 uint64_t *ret_val,
4221 uint64_t new_val, uint64_t wr_mask)
4222 {
4223 RISCVException ret;
4224 uint64_t rval, mask = env->hideleg & VS_MODE_INTERRUPTS;
4225 uint64_t vsbits;
4226
4227 /* Add virtualized bits into vsip mask. */
4228 mask |= env->hvien & ~env->hideleg;
4229
4230 /* Bring VS-level bits to correct position */
4231 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
4232 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
4233 new_val |= vsbits << 1;
4234 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
4235 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
4236 wr_mask |= vsbits << 1;
4237
4238 ret = rmw_hvip64(env, csrno, &rval, new_val,
4239 wr_mask & mask & vsip_writable_mask);
4240 if (ret_val) {
4241 rval &= mask;
4242 vsbits = rval & VS_MODE_INTERRUPTS;
4243 rval &= ~VS_MODE_INTERRUPTS;
4244 *ret_val = rval | (vsbits >> 1);
4245 }
4246
4247 return ret;
4248 }
4249
rmw_vsip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4250 static RISCVException rmw_vsip(CPURISCVState *env, int csrno,
4251 target_ulong *ret_val,
4252 target_ulong new_val, target_ulong wr_mask)
4253 {
4254 uint64_t rval;
4255 RISCVException ret;
4256
4257 ret = rmw_vsip64(env, csrno, &rval, new_val, wr_mask);
4258 if (ret_val) {
4259 *ret_val = rval;
4260 }
4261
4262 return ret;
4263 }
4264
rmw_vsiph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4265 static RISCVException rmw_vsiph(CPURISCVState *env, int csrno,
4266 target_ulong *ret_val,
4267 target_ulong new_val, target_ulong wr_mask)
4268 {
4269 uint64_t rval;
4270 RISCVException ret;
4271
4272 ret = rmw_vsip64(env, csrno, &rval,
4273 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4274 if (ret_val) {
4275 *ret_val = rval >> 32;
4276 }
4277
4278 return ret;
4279 }
4280
rmw_sip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4281 static RISCVException rmw_sip64(CPURISCVState *env, int csrno,
4282 uint64_t *ret_val,
4283 uint64_t new_val, uint64_t wr_mask)
4284 {
4285 RISCVException ret;
4286 uint64_t mask = (env->mideleg | env->mvien) & sip_writable_mask;
4287
4288 if (env->virt_enabled) {
4289 if (env->hvictl & HVICTL_VTI) {
4290 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
4291 }
4292 ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask);
4293 } else {
4294 ret = rmw_mvip64(env, csrno, ret_val, new_val, wr_mask & mask);
4295 }
4296
4297 if (ret_val) {
4298 *ret_val &= (env->mideleg | env->mvien) &
4299 (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS);
4300 }
4301
4302 return ret;
4303 }
4304
rmw_sip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4305 static RISCVException rmw_sip(CPURISCVState *env, int csrno,
4306 target_ulong *ret_val,
4307 target_ulong new_val, target_ulong wr_mask)
4308 {
4309 uint64_t rval;
4310 RISCVException ret;
4311
4312 ret = rmw_sip64(env, csrno, &rval, new_val, wr_mask);
4313 if (ret_val) {
4314 *ret_val = rval;
4315 }
4316
4317 return ret;
4318 }
4319
rmw_siph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4320 static RISCVException rmw_siph(CPURISCVState *env, int csrno,
4321 target_ulong *ret_val,
4322 target_ulong new_val, target_ulong wr_mask)
4323 {
4324 uint64_t rval;
4325 RISCVException ret;
4326
4327 ret = rmw_sip64(env, csrno, &rval,
4328 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4329 if (ret_val) {
4330 *ret_val = rval >> 32;
4331 }
4332
4333 return ret;
4334 }
4335
4336 /* Supervisor Protection and Translation */
read_satp(CPURISCVState * env,int csrno,target_ulong * val)4337 static RISCVException read_satp(CPURISCVState *env, int csrno,
4338 target_ulong *val)
4339 {
4340 if (!riscv_cpu_cfg(env)->mmu) {
4341 *val = 0;
4342 return RISCV_EXCP_NONE;
4343 }
4344 *val = env->satp;
4345 return RISCV_EXCP_NONE;
4346 }
4347
write_satp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4348 static RISCVException write_satp(CPURISCVState *env, int csrno,
4349 target_ulong val, uintptr_t ra)
4350 {
4351 if (!riscv_cpu_cfg(env)->mmu) {
4352 return RISCV_EXCP_NONE;
4353 }
4354
4355 env->satp = legalize_xatp(env, env->satp, val);
4356 return RISCV_EXCP_NONE;
4357 }
4358
rmw_sctrdepth(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4359 static RISCVException rmw_sctrdepth(CPURISCVState *env, int csrno,
4360 target_ulong *ret_val,
4361 target_ulong new_val, target_ulong wr_mask)
4362 {
4363 uint64_t mask = wr_mask & SCTRDEPTH_MASK;
4364
4365 if (ret_val) {
4366 *ret_val = env->sctrdepth;
4367 }
4368
4369 env->sctrdepth = (env->sctrdepth & ~mask) | (new_val & mask);
4370
4371 /* Correct depth. */
4372 if (mask) {
4373 uint64_t depth = get_field(env->sctrdepth, SCTRDEPTH_MASK);
4374
4375 if (depth > SCTRDEPTH_MAX) {
4376 depth = SCTRDEPTH_MAX;
4377 env->sctrdepth = set_field(env->sctrdepth, SCTRDEPTH_MASK, depth);
4378 }
4379
4380 /* Update sctrstatus.WRPTR with a legal value */
4381 depth = 16ULL << depth;
4382 env->sctrstatus =
4383 env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
4384 }
4385
4386 return RISCV_EXCP_NONE;
4387 }
4388
rmw_sctrstatus(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4389 static RISCVException rmw_sctrstatus(CPURISCVState *env, int csrno,
4390 target_ulong *ret_val,
4391 target_ulong new_val, target_ulong wr_mask)
4392 {
4393 uint32_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
4394 uint32_t mask = wr_mask & SCTRSTATUS_MASK;
4395
4396 if (ret_val) {
4397 *ret_val = env->sctrstatus;
4398 }
4399
4400 env->sctrstatus = (env->sctrstatus & ~mask) | (new_val & mask);
4401
4402 /* Update sctrstatus.WRPTR with a legal value */
4403 env->sctrstatus = env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
4404
4405 return RISCV_EXCP_NONE;
4406 }
4407
rmw_xctrctl(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4408 static RISCVException rmw_xctrctl(CPURISCVState *env, int csrno,
4409 target_ulong *ret_val,
4410 target_ulong new_val, target_ulong wr_mask)
4411 {
4412 uint64_t csr_mask, mask = wr_mask;
4413 uint64_t *ctl_ptr = &env->mctrctl;
4414
4415 if (csrno == CSR_MCTRCTL) {
4416 csr_mask = MCTRCTL_MASK;
4417 } else if (csrno == CSR_SCTRCTL && !env->virt_enabled) {
4418 csr_mask = SCTRCTL_MASK;
4419 } else {
4420 /*
4421 * This is for csrno == CSR_SCTRCTL and env->virt_enabled == true
4422 * or csrno == CSR_VSCTRCTL.
4423 */
4424 csr_mask = VSCTRCTL_MASK;
4425 ctl_ptr = &env->vsctrctl;
4426 }
4427
4428 mask &= csr_mask;
4429
4430 if (ret_val) {
4431 *ret_val = *ctl_ptr & csr_mask;
4432 }
4433
4434 *ctl_ptr = (*ctl_ptr & ~mask) | (new_val & mask);
4435
4436 return RISCV_EXCP_NONE;
4437 }
4438
read_vstopi(CPURISCVState * env,int csrno,target_ulong * val)4439 static RISCVException read_vstopi(CPURISCVState *env, int csrno,
4440 target_ulong *val)
4441 {
4442 int irq, ret;
4443 target_ulong topei;
4444 uint64_t vseip, vsgein;
4445 uint32_t iid, iprio, hviid, hviprio, gein;
4446 uint32_t s, scount = 0, siid[VSTOPI_NUM_SRCS], siprio[VSTOPI_NUM_SRCS];
4447
4448 gein = get_field(env->hstatus, HSTATUS_VGEIN);
4449 hviid = get_field(env->hvictl, HVICTL_IID);
4450 hviprio = get_field(env->hvictl, HVICTL_IPRIO);
4451
4452 if (gein) {
4453 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
4454 vseip = env->mie & (env->mip | vsgein) & MIP_VSEIP;
4455 if (gein <= env->geilen && vseip) {
4456 siid[scount] = IRQ_S_EXT;
4457 siprio[scount] = IPRIO_MMAXIPRIO + 1;
4458 if (env->aia_ireg_rmw_fn[PRV_S]) {
4459 /*
4460 * Call machine specific IMSIC register emulation for
4461 * reading TOPEI.
4462 */
4463 ret = env->aia_ireg_rmw_fn[PRV_S](
4464 env->aia_ireg_rmw_fn_arg[PRV_S],
4465 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, PRV_S, true, gein,
4466 riscv_cpu_mxl_bits(env)),
4467 &topei, 0, 0);
4468 if (!ret && topei) {
4469 siprio[scount] = topei & IMSIC_TOPEI_IPRIO_MASK;
4470 }
4471 }
4472 scount++;
4473 }
4474 } else {
4475 if (hviid == IRQ_S_EXT && hviprio) {
4476 siid[scount] = IRQ_S_EXT;
4477 siprio[scount] = hviprio;
4478 scount++;
4479 }
4480 }
4481
4482 if (env->hvictl & HVICTL_VTI) {
4483 if (hviid != IRQ_S_EXT) {
4484 siid[scount] = hviid;
4485 siprio[scount] = hviprio;
4486 scount++;
4487 }
4488 } else {
4489 irq = riscv_cpu_vsirq_pending(env);
4490 if (irq != IRQ_S_EXT && 0 < irq && irq <= 63) {
4491 siid[scount] = irq;
4492 siprio[scount] = env->hviprio[irq];
4493 scount++;
4494 }
4495 }
4496
4497 iid = 0;
4498 iprio = UINT_MAX;
4499 for (s = 0; s < scount; s++) {
4500 if (siprio[s] < iprio) {
4501 iid = siid[s];
4502 iprio = siprio[s];
4503 }
4504 }
4505
4506 if (iid) {
4507 if (env->hvictl & HVICTL_IPRIOM) {
4508 if (iprio > IPRIO_MMAXIPRIO) {
4509 iprio = IPRIO_MMAXIPRIO;
4510 }
4511 if (!iprio) {
4512 if (riscv_cpu_default_priority(iid) > IPRIO_DEFAULT_S) {
4513 iprio = IPRIO_MMAXIPRIO;
4514 }
4515 }
4516 } else {
4517 iprio = 1;
4518 }
4519 } else {
4520 iprio = 0;
4521 }
4522
4523 *val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT;
4524 *val |= iprio;
4525
4526 return RISCV_EXCP_NONE;
4527 }
4528
read_stopi(CPURISCVState * env,int csrno,target_ulong * val)4529 static RISCVException read_stopi(CPURISCVState *env, int csrno,
4530 target_ulong *val)
4531 {
4532 int irq;
4533 uint8_t iprio;
4534
4535 if (env->virt_enabled) {
4536 return read_vstopi(env, CSR_VSTOPI, val);
4537 }
4538
4539 irq = riscv_cpu_sirq_pending(env);
4540 if (irq <= 0 || irq > 63) {
4541 *val = 0;
4542 } else {
4543 iprio = env->siprio[irq];
4544 if (!iprio) {
4545 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_S) {
4546 iprio = IPRIO_MMAXIPRIO;
4547 }
4548 }
4549 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
4550 *val |= iprio;
4551 }
4552
4553 return RISCV_EXCP_NONE;
4554 }
4555
4556 /* Hypervisor Extensions */
read_hstatus(CPURISCVState * env,int csrno,target_ulong * val)4557 static RISCVException read_hstatus(CPURISCVState *env, int csrno,
4558 target_ulong *val)
4559 {
4560 *val = env->hstatus;
4561 if (riscv_cpu_mxl(env) != MXL_RV32) {
4562 /* We only support 64-bit VSXL */
4563 *val = set_field(*val, HSTATUS_VSXL, 2);
4564 }
4565 /* We only support little endian */
4566 *val = set_field(*val, HSTATUS_VSBE, 0);
4567 return RISCV_EXCP_NONE;
4568 }
4569
write_hstatus(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4570 static RISCVException write_hstatus(CPURISCVState *env, int csrno,
4571 target_ulong val, uintptr_t ra)
4572 {
4573 uint64_t mask = (target_ulong)-1;
4574 if (!env_archcpu(env)->cfg.ext_svukte) {
4575 mask &= ~HSTATUS_HUKTE;
4576 }
4577 /* Update PMM field only if the value is valid according to Zjpm v1.0 */
4578 if (!env_archcpu(env)->cfg.ext_ssnpm ||
4579 riscv_cpu_mxl(env) != MXL_RV64 ||
4580 get_field(val, HSTATUS_HUPMM) == PMM_FIELD_RESERVED) {
4581 mask &= ~HSTATUS_HUPMM;
4582 }
4583 env->hstatus = (env->hstatus & ~mask) | (val & mask);
4584
4585 if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) {
4586 qemu_log_mask(LOG_UNIMP,
4587 "QEMU does not support mixed HSXLEN options.");
4588 }
4589 if (get_field(val, HSTATUS_VSBE) != 0) {
4590 qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests.");
4591 }
4592 return RISCV_EXCP_NONE;
4593 }
4594
read_hedeleg(CPURISCVState * env,int csrno,target_ulong * val)4595 static RISCVException read_hedeleg(CPURISCVState *env, int csrno,
4596 target_ulong *val)
4597 {
4598 *val = env->hedeleg;
4599 return RISCV_EXCP_NONE;
4600 }
4601
write_hedeleg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4602 static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
4603 target_ulong val, uintptr_t ra)
4604 {
4605 env->hedeleg = val & vs_delegable_excps;
4606 return RISCV_EXCP_NONE;
4607 }
4608
read_hedelegh(CPURISCVState * env,int csrno,target_ulong * val)4609 static RISCVException read_hedelegh(CPURISCVState *env, int csrno,
4610 target_ulong *val)
4611 {
4612 RISCVException ret;
4613 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13);
4614 if (ret != RISCV_EXCP_NONE) {
4615 return ret;
4616 }
4617
4618 /* Reserved, now read zero */
4619 *val = 0;
4620 return RISCV_EXCP_NONE;
4621 }
4622
write_hedelegh(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4623 static RISCVException write_hedelegh(CPURISCVState *env, int csrno,
4624 target_ulong val, uintptr_t ra)
4625 {
4626 RISCVException ret;
4627 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13);
4628 if (ret != RISCV_EXCP_NONE) {
4629 return ret;
4630 }
4631
4632 /* Reserved, now write ignore */
4633 return RISCV_EXCP_NONE;
4634 }
4635
rmw_hvien64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4636 static RISCVException rmw_hvien64(CPURISCVState *env, int csrno,
4637 uint64_t *ret_val,
4638 uint64_t new_val, uint64_t wr_mask)
4639 {
4640 uint64_t mask = wr_mask & hvien_writable_mask;
4641
4642 if (ret_val) {
4643 *ret_val = env->hvien;
4644 }
4645
4646 env->hvien = (env->hvien & ~mask) | (new_val & mask);
4647
4648 return RISCV_EXCP_NONE;
4649 }
4650
rmw_hvien(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4651 static RISCVException rmw_hvien(CPURISCVState *env, int csrno,
4652 target_ulong *ret_val,
4653 target_ulong new_val, target_ulong wr_mask)
4654 {
4655 uint64_t rval;
4656 RISCVException ret;
4657
4658 ret = rmw_hvien64(env, csrno, &rval, new_val, wr_mask);
4659 if (ret_val) {
4660 *ret_val = rval;
4661 }
4662
4663 return ret;
4664 }
4665
rmw_hvienh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4666 static RISCVException rmw_hvienh(CPURISCVState *env, int csrno,
4667 target_ulong *ret_val,
4668 target_ulong new_val, target_ulong wr_mask)
4669 {
4670 uint64_t rval;
4671 RISCVException ret;
4672
4673 ret = rmw_hvien64(env, csrno, &rval,
4674 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4675 if (ret_val) {
4676 *ret_val = rval >> 32;
4677 }
4678
4679 return ret;
4680 }
4681
rmw_hideleg64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4682 static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno,
4683 uint64_t *ret_val,
4684 uint64_t new_val, uint64_t wr_mask)
4685 {
4686 uint64_t mask = wr_mask & vs_delegable_ints;
4687
4688 if (ret_val) {
4689 *ret_val = env->hideleg & vs_delegable_ints;
4690 }
4691
4692 env->hideleg = (env->hideleg & ~mask) | (new_val & mask);
4693 return RISCV_EXCP_NONE;
4694 }
4695
rmw_hideleg(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4696 static RISCVException rmw_hideleg(CPURISCVState *env, int csrno,
4697 target_ulong *ret_val,
4698 target_ulong new_val, target_ulong wr_mask)
4699 {
4700 uint64_t rval;
4701 RISCVException ret;
4702
4703 ret = rmw_hideleg64(env, csrno, &rval, new_val, wr_mask);
4704 if (ret_val) {
4705 *ret_val = rval;
4706 }
4707
4708 return ret;
4709 }
4710
rmw_hidelegh(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4711 static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno,
4712 target_ulong *ret_val,
4713 target_ulong new_val, target_ulong wr_mask)
4714 {
4715 uint64_t rval;
4716 RISCVException ret;
4717
4718 ret = rmw_hideleg64(env, csrno, &rval,
4719 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4720 if (ret_val) {
4721 *ret_val = rval >> 32;
4722 }
4723
4724 return ret;
4725 }
4726
4727 /*
4728 * The function is written for two use-cases:
4729 * 1- To access hvip csr as is for HS-mode access.
4730 * 2- To access vsip as a combination of hvip, and mip for vs-mode.
4731 *
4732 * Both report bits 2, 6, 10 and 13:63.
4733 * vsip needs to be read-only zero when both hideleg[i] and
4734 * hvien[i] are zero.
4735 */
rmw_hvip64(CPURISCVState * env,int csrno,uint64_t * ret_val,uint64_t new_val,uint64_t wr_mask)4736 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
4737 uint64_t *ret_val,
4738 uint64_t new_val, uint64_t wr_mask)
4739 {
4740 RISCVException ret;
4741 uint64_t old_hvip;
4742 uint64_t ret_mip;
4743
4744 /*
4745 * For bits 10, 6 and 2, vsip[i] is an alias of hip[i]. These bits are
4746 * present in hip, hvip and mip. Where mip[i] is alias of hip[i] and hvip[i]
4747 * is OR'ed in hip[i] to inject virtual interrupts from hypervisor. These
4748 * bits are actually being maintained in mip so we read them from there.
4749 * This way we have a single source of truth and allows for easier
4750 * implementation.
4751 *
4752 * For bits 13:63 we have:
4753 *
4754 * hideleg[i] hvien[i]
4755 * 0 0 No delegation. vsip[i] readonly zero.
4756 * 0 1 vsip[i] is alias of hvip[i], sip bypassed.
4757 * 1 X vsip[i] is alias of sip[i], hvip bypassed.
4758 *
4759 * alias_mask denotes the bits that come from sip (mip here given we
4760 * maintain all bits there). nalias_mask denotes bits that come from
4761 * hvip.
4762 */
4763 uint64_t alias_mask = (env->hideleg | ~env->hvien) | VS_MODE_INTERRUPTS;
4764 uint64_t nalias_mask = (~env->hideleg & env->hvien);
4765 uint64_t wr_mask_hvip;
4766 uint64_t wr_mask_mip;
4767
4768 /*
4769 * Both alias and non-alias mask remain same for vsip except:
4770 * 1- For VS* bits if they are zero in hideleg.
4771 * 2- For 13:63 bits if they are zero in both hideleg and hvien.
4772 */
4773 if (csrno == CSR_VSIP) {
4774 /* zero-out VS* bits that are not delegated to VS mode. */
4775 alias_mask &= (env->hideleg | ~VS_MODE_INTERRUPTS);
4776
4777 /*
4778 * zero-out 13:63 bits that are zero in both hideleg and hvien.
4779 * nalias_mask mask can not contain any VS* bits so only second
4780 * condition applies on it.
4781 */
4782 nalias_mask &= (env->hideleg | env->hvien);
4783 alias_mask &= (env->hideleg | env->hvien);
4784 }
4785
4786 wr_mask_hvip = wr_mask & nalias_mask & hvip_writable_mask;
4787 wr_mask_mip = wr_mask & alias_mask & hvip_writable_mask;
4788
4789 /* Aliased bits, bits 10, 6, 2 need to come from mip. */
4790 ret = rmw_mip64(env, csrno, &ret_mip, new_val, wr_mask_mip);
4791 if (ret != RISCV_EXCP_NONE) {
4792 return ret;
4793 }
4794
4795 old_hvip = env->hvip;
4796
4797 if (wr_mask_hvip) {
4798 env->hvip = (env->hvip & ~wr_mask_hvip) | (new_val & wr_mask_hvip);
4799
4800 /*
4801 * Given hvip is separate source from mip, we need to trigger interrupt
4802 * from here separately. Normally this happen from riscv_cpu_update_mip.
4803 */
4804 riscv_cpu_interrupt(env);
4805 }
4806
4807 if (ret_val) {
4808 /* Only take VS* bits from mip. */
4809 ret_mip &= alias_mask;
4810
4811 /* Take in non-delegated 13:63 bits from hvip. */
4812 old_hvip &= nalias_mask;
4813
4814 *ret_val = ret_mip | old_hvip;
4815 }
4816
4817 return ret;
4818 }
4819
rmw_hvip(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4820 static RISCVException rmw_hvip(CPURISCVState *env, int csrno,
4821 target_ulong *ret_val,
4822 target_ulong new_val, target_ulong wr_mask)
4823 {
4824 uint64_t rval;
4825 RISCVException ret;
4826
4827 ret = rmw_hvip64(env, csrno, &rval, new_val, wr_mask);
4828 if (ret_val) {
4829 *ret_val = rval;
4830 }
4831
4832 return ret;
4833 }
4834
rmw_hviph(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4835 static RISCVException rmw_hviph(CPURISCVState *env, int csrno,
4836 target_ulong *ret_val,
4837 target_ulong new_val, target_ulong wr_mask)
4838 {
4839 uint64_t rval;
4840 RISCVException ret;
4841
4842 ret = rmw_hvip64(env, csrno, &rval,
4843 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
4844 if (ret_val) {
4845 *ret_val = rval >> 32;
4846 }
4847
4848 return ret;
4849 }
4850
rmw_hip(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)4851 static RISCVException rmw_hip(CPURISCVState *env, int csrno,
4852 target_ulong *ret_value,
4853 target_ulong new_value, target_ulong write_mask)
4854 {
4855 int ret = rmw_mip(env, csrno, ret_value, new_value,
4856 write_mask & hip_writable_mask);
4857
4858 if (ret_value) {
4859 *ret_value &= HS_MODE_INTERRUPTS;
4860 }
4861 return ret;
4862 }
4863
rmw_hie(CPURISCVState * env,int csrno,target_ulong * ret_val,target_ulong new_val,target_ulong wr_mask)4864 static RISCVException rmw_hie(CPURISCVState *env, int csrno,
4865 target_ulong *ret_val,
4866 target_ulong new_val, target_ulong wr_mask)
4867 {
4868 uint64_t rval;
4869 RISCVException ret;
4870
4871 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & HS_MODE_INTERRUPTS);
4872 if (ret_val) {
4873 *ret_val = rval & HS_MODE_INTERRUPTS;
4874 }
4875
4876 return ret;
4877 }
4878
read_hcounteren(CPURISCVState * env,int csrno,target_ulong * val)4879 static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
4880 target_ulong *val)
4881 {
4882 *val = env->hcounteren;
4883 return RISCV_EXCP_NONE;
4884 }
4885
write_hcounteren(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4886 static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
4887 target_ulong val, uintptr_t ra)
4888 {
4889 RISCVCPU *cpu = env_archcpu(env);
4890
4891 /* WARL register - disable unavailable counters */
4892 env->hcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
4893 COUNTEREN_IR);
4894 return RISCV_EXCP_NONE;
4895 }
4896
read_hgeie(CPURISCVState * env,int csrno,target_ulong * val)4897 static RISCVException read_hgeie(CPURISCVState *env, int csrno,
4898 target_ulong *val)
4899 {
4900 if (val) {
4901 *val = env->hgeie;
4902 }
4903 return RISCV_EXCP_NONE;
4904 }
4905
write_hgeie(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4906 static RISCVException write_hgeie(CPURISCVState *env, int csrno,
4907 target_ulong val, uintptr_t ra)
4908 {
4909 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */
4910 val &= ((((target_ulong)1) << env->geilen) - 1) << 1;
4911 env->hgeie = val;
4912 /* Update mip.SGEIP bit */
4913 riscv_cpu_update_mip(env, MIP_SGEIP,
4914 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
4915 return RISCV_EXCP_NONE;
4916 }
4917
read_htval(CPURISCVState * env,int csrno,target_ulong * val)4918 static RISCVException read_htval(CPURISCVState *env, int csrno,
4919 target_ulong *val)
4920 {
4921 *val = env->htval;
4922 return RISCV_EXCP_NONE;
4923 }
4924
write_htval(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4925 static RISCVException write_htval(CPURISCVState *env, int csrno,
4926 target_ulong val, uintptr_t ra)
4927 {
4928 env->htval = val;
4929 return RISCV_EXCP_NONE;
4930 }
4931
read_htinst(CPURISCVState * env,int csrno,target_ulong * val)4932 static RISCVException read_htinst(CPURISCVState *env, int csrno,
4933 target_ulong *val)
4934 {
4935 *val = env->htinst;
4936 return RISCV_EXCP_NONE;
4937 }
4938
write_htinst(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4939 static RISCVException write_htinst(CPURISCVState *env, int csrno,
4940 target_ulong val, uintptr_t ra)
4941 {
4942 return RISCV_EXCP_NONE;
4943 }
4944
read_hgeip(CPURISCVState * env,int csrno,target_ulong * val)4945 static RISCVException read_hgeip(CPURISCVState *env, int csrno,
4946 target_ulong *val)
4947 {
4948 if (val) {
4949 *val = env->hgeip;
4950 }
4951 return RISCV_EXCP_NONE;
4952 }
4953
read_hgatp(CPURISCVState * env,int csrno,target_ulong * val)4954 static RISCVException read_hgatp(CPURISCVState *env, int csrno,
4955 target_ulong *val)
4956 {
4957 *val = env->hgatp;
4958 return RISCV_EXCP_NONE;
4959 }
4960
write_hgatp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4961 static RISCVException write_hgatp(CPURISCVState *env, int csrno,
4962 target_ulong val, uintptr_t ra)
4963 {
4964 env->hgatp = legalize_xatp(env, env->hgatp, val);
4965 return RISCV_EXCP_NONE;
4966 }
4967
read_htimedelta(CPURISCVState * env,int csrno,target_ulong * val)4968 static RISCVException read_htimedelta(CPURISCVState *env, int csrno,
4969 target_ulong *val)
4970 {
4971 if (!env->rdtime_fn) {
4972 return RISCV_EXCP_ILLEGAL_INST;
4973 }
4974
4975 *val = env->htimedelta;
4976 return RISCV_EXCP_NONE;
4977 }
4978
write_htimedelta(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)4979 static RISCVException write_htimedelta(CPURISCVState *env, int csrno,
4980 target_ulong val, uintptr_t ra)
4981 {
4982 if (!env->rdtime_fn) {
4983 return RISCV_EXCP_ILLEGAL_INST;
4984 }
4985
4986 if (riscv_cpu_mxl(env) == MXL_RV32) {
4987 env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val);
4988 } else {
4989 env->htimedelta = val;
4990 }
4991
4992 if (riscv_cpu_cfg(env)->ext_sstc && env->rdtime_fn) {
4993 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
4994 env->htimedelta, MIP_VSTIP);
4995 }
4996
4997 return RISCV_EXCP_NONE;
4998 }
4999
read_htimedeltah(CPURISCVState * env,int csrno,target_ulong * val)5000 static RISCVException read_htimedeltah(CPURISCVState *env, int csrno,
5001 target_ulong *val)
5002 {
5003 if (!env->rdtime_fn) {
5004 return RISCV_EXCP_ILLEGAL_INST;
5005 }
5006
5007 *val = env->htimedelta >> 32;
5008 return RISCV_EXCP_NONE;
5009 }
5010
write_htimedeltah(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5011 static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
5012 target_ulong val, uintptr_t ra)
5013 {
5014 if (!env->rdtime_fn) {
5015 return RISCV_EXCP_ILLEGAL_INST;
5016 }
5017
5018 env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val);
5019
5020 if (riscv_cpu_cfg(env)->ext_sstc && env->rdtime_fn) {
5021 riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp,
5022 env->htimedelta, MIP_VSTIP);
5023 }
5024
5025 return RISCV_EXCP_NONE;
5026 }
5027
read_hvictl(CPURISCVState * env,int csrno,target_ulong * val)5028 static RISCVException read_hvictl(CPURISCVState *env, int csrno,
5029 target_ulong *val)
5030 {
5031 *val = env->hvictl;
5032 return RISCV_EXCP_NONE;
5033 }
5034
write_hvictl(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5035 static RISCVException write_hvictl(CPURISCVState *env, int csrno,
5036 target_ulong val, uintptr_t ra)
5037 {
5038 env->hvictl = val & HVICTL_VALID_MASK;
5039 return RISCV_EXCP_NONE;
5040 }
5041
read_hvipriox(CPURISCVState * env,int first_index,uint8_t * iprio,target_ulong * val)5042 static RISCVException read_hvipriox(CPURISCVState *env, int first_index,
5043 uint8_t *iprio, target_ulong *val)
5044 {
5045 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
5046
5047 /* First index has to be a multiple of number of irqs per register */
5048 if (first_index % num_irqs) {
5049 return (env->virt_enabled) ?
5050 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
5051 }
5052
5053 /* Fill-up return value */
5054 *val = 0;
5055 for (i = 0; i < num_irqs; i++) {
5056 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
5057 continue;
5058 }
5059 if (rdzero) {
5060 continue;
5061 }
5062 *val |= ((target_ulong)iprio[irq]) << (i * 8);
5063 }
5064
5065 return RISCV_EXCP_NONE;
5066 }
5067
write_hvipriox(CPURISCVState * env,int first_index,uint8_t * iprio,target_ulong val)5068 static RISCVException write_hvipriox(CPURISCVState *env, int first_index,
5069 uint8_t *iprio, target_ulong val)
5070 {
5071 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
5072
5073 /* First index has to be a multiple of number of irqs per register */
5074 if (first_index % num_irqs) {
5075 return (env->virt_enabled) ?
5076 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
5077 }
5078
5079 /* Fill-up priority array */
5080 for (i = 0; i < num_irqs; i++) {
5081 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
5082 continue;
5083 }
5084 if (rdzero) {
5085 iprio[irq] = 0;
5086 } else {
5087 iprio[irq] = (val >> (i * 8)) & 0xff;
5088 }
5089 }
5090
5091 return RISCV_EXCP_NONE;
5092 }
5093
read_hviprio1(CPURISCVState * env,int csrno,target_ulong * val)5094 static RISCVException read_hviprio1(CPURISCVState *env, int csrno,
5095 target_ulong *val)
5096 {
5097 return read_hvipriox(env, 0, env->hviprio, val);
5098 }
5099
write_hviprio1(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5100 static RISCVException write_hviprio1(CPURISCVState *env, int csrno,
5101 target_ulong val, uintptr_t ra)
5102 {
5103 return write_hvipriox(env, 0, env->hviprio, val);
5104 }
5105
read_hviprio1h(CPURISCVState * env,int csrno,target_ulong * val)5106 static RISCVException read_hviprio1h(CPURISCVState *env, int csrno,
5107 target_ulong *val)
5108 {
5109 return read_hvipriox(env, 4, env->hviprio, val);
5110 }
5111
write_hviprio1h(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5112 static RISCVException write_hviprio1h(CPURISCVState *env, int csrno,
5113 target_ulong val, uintptr_t ra)
5114 {
5115 return write_hvipriox(env, 4, env->hviprio, val);
5116 }
5117
read_hviprio2(CPURISCVState * env,int csrno,target_ulong * val)5118 static RISCVException read_hviprio2(CPURISCVState *env, int csrno,
5119 target_ulong *val)
5120 {
5121 return read_hvipriox(env, 8, env->hviprio, val);
5122 }
5123
write_hviprio2(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5124 static RISCVException write_hviprio2(CPURISCVState *env, int csrno,
5125 target_ulong val, uintptr_t ra)
5126 {
5127 return write_hvipriox(env, 8, env->hviprio, val);
5128 }
5129
read_hviprio2h(CPURISCVState * env,int csrno,target_ulong * val)5130 static RISCVException read_hviprio2h(CPURISCVState *env, int csrno,
5131 target_ulong *val)
5132 {
5133 return read_hvipriox(env, 12, env->hviprio, val);
5134 }
5135
write_hviprio2h(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5136 static RISCVException write_hviprio2h(CPURISCVState *env, int csrno,
5137 target_ulong val, uintptr_t ra)
5138 {
5139 return write_hvipriox(env, 12, env->hviprio, val);
5140 }
5141
5142 /* Virtual CSR Registers */
read_vsstatus(CPURISCVState * env,int csrno,target_ulong * val)5143 static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
5144 target_ulong *val)
5145 {
5146 *val = env->vsstatus;
5147 return RISCV_EXCP_NONE;
5148 }
5149
write_vsstatus(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5150 static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
5151 target_ulong val, uintptr_t ra)
5152 {
5153 uint64_t mask = (target_ulong)-1;
5154 if ((val & VSSTATUS64_UXL) == 0) {
5155 mask &= ~VSSTATUS64_UXL;
5156 }
5157 if ((env->henvcfg & HENVCFG_DTE)) {
5158 if ((val & SSTATUS_SDT) != 0) {
5159 val &= ~SSTATUS_SIE;
5160 }
5161 } else {
5162 val &= ~SSTATUS_SDT;
5163 }
5164 env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
5165 return RISCV_EXCP_NONE;
5166 }
5167
read_vstvec(CPURISCVState * env,int csrno,target_ulong * val)5168 static RISCVException read_vstvec(CPURISCVState *env, int csrno,
5169 target_ulong *val)
5170 {
5171 *val = env->vstvec;
5172 return RISCV_EXCP_NONE;
5173 }
5174
write_vstvec(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5175 static RISCVException write_vstvec(CPURISCVState *env, int csrno,
5176 target_ulong val, uintptr_t ra)
5177 {
5178 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
5179 if ((val & 3) < 2) {
5180 env->vstvec = val;
5181 } else {
5182 qemu_log_mask(LOG_UNIMP, "CSR_VSTVEC: reserved mode not supported\n");
5183 }
5184 return RISCV_EXCP_NONE;
5185 }
5186
read_vsscratch(CPURISCVState * env,int csrno,target_ulong * val)5187 static RISCVException read_vsscratch(CPURISCVState *env, int csrno,
5188 target_ulong *val)
5189 {
5190 *val = env->vsscratch;
5191 return RISCV_EXCP_NONE;
5192 }
5193
write_vsscratch(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5194 static RISCVException write_vsscratch(CPURISCVState *env, int csrno,
5195 target_ulong val, uintptr_t ra)
5196 {
5197 env->vsscratch = val;
5198 return RISCV_EXCP_NONE;
5199 }
5200
read_vsepc(CPURISCVState * env,int csrno,target_ulong * val)5201 static RISCVException read_vsepc(CPURISCVState *env, int csrno,
5202 target_ulong *val)
5203 {
5204 *val = env->vsepc;
5205 return RISCV_EXCP_NONE;
5206 }
5207
write_vsepc(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5208 static RISCVException write_vsepc(CPURISCVState *env, int csrno,
5209 target_ulong val, uintptr_t ra)
5210 {
5211 env->vsepc = val;
5212 return RISCV_EXCP_NONE;
5213 }
5214
read_vscause(CPURISCVState * env,int csrno,target_ulong * val)5215 static RISCVException read_vscause(CPURISCVState *env, int csrno,
5216 target_ulong *val)
5217 {
5218 *val = env->vscause;
5219 return RISCV_EXCP_NONE;
5220 }
5221
write_vscause(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5222 static RISCVException write_vscause(CPURISCVState *env, int csrno,
5223 target_ulong val, uintptr_t ra)
5224 {
5225 env->vscause = val;
5226 return RISCV_EXCP_NONE;
5227 }
5228
read_vstval(CPURISCVState * env,int csrno,target_ulong * val)5229 static RISCVException read_vstval(CPURISCVState *env, int csrno,
5230 target_ulong *val)
5231 {
5232 *val = env->vstval;
5233 return RISCV_EXCP_NONE;
5234 }
5235
write_vstval(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5236 static RISCVException write_vstval(CPURISCVState *env, int csrno,
5237 target_ulong val, uintptr_t ra)
5238 {
5239 env->vstval = val;
5240 return RISCV_EXCP_NONE;
5241 }
5242
read_vsatp(CPURISCVState * env,int csrno,target_ulong * val)5243 static RISCVException read_vsatp(CPURISCVState *env, int csrno,
5244 target_ulong *val)
5245 {
5246 *val = env->vsatp;
5247 return RISCV_EXCP_NONE;
5248 }
5249
write_vsatp(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5250 static RISCVException write_vsatp(CPURISCVState *env, int csrno,
5251 target_ulong val, uintptr_t ra)
5252 {
5253 env->vsatp = legalize_xatp(env, env->vsatp, val);
5254 return RISCV_EXCP_NONE;
5255 }
5256
read_mtval2(CPURISCVState * env,int csrno,target_ulong * val)5257 static RISCVException read_mtval2(CPURISCVState *env, int csrno,
5258 target_ulong *val)
5259 {
5260 *val = env->mtval2;
5261 return RISCV_EXCP_NONE;
5262 }
5263
write_mtval2(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5264 static RISCVException write_mtval2(CPURISCVState *env, int csrno,
5265 target_ulong val, uintptr_t ra)
5266 {
5267 env->mtval2 = val;
5268 return RISCV_EXCP_NONE;
5269 }
5270
read_mtinst(CPURISCVState * env,int csrno,target_ulong * val)5271 static RISCVException read_mtinst(CPURISCVState *env, int csrno,
5272 target_ulong *val)
5273 {
5274 *val = env->mtinst;
5275 return RISCV_EXCP_NONE;
5276 }
5277
write_mtinst(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5278 static RISCVException write_mtinst(CPURISCVState *env, int csrno,
5279 target_ulong val, uintptr_t ra)
5280 {
5281 env->mtinst = val;
5282 return RISCV_EXCP_NONE;
5283 }
5284
5285 /* Physical Memory Protection */
read_mseccfg(CPURISCVState * env,int csrno,target_ulong * val)5286 static RISCVException read_mseccfg(CPURISCVState *env, int csrno,
5287 target_ulong *val)
5288 {
5289 *val = mseccfg_csr_read(env);
5290 return RISCV_EXCP_NONE;
5291 }
5292
write_mseccfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5293 static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
5294 target_ulong val, uintptr_t ra)
5295 {
5296 mseccfg_csr_write(env, val);
5297 return RISCV_EXCP_NONE;
5298 }
5299
read_pmpcfg(CPURISCVState * env,int csrno,target_ulong * val)5300 static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
5301 target_ulong *val)
5302 {
5303 uint32_t reg_index = csrno - CSR_PMPCFG0;
5304
5305 *val = pmpcfg_csr_read(env, reg_index);
5306 return RISCV_EXCP_NONE;
5307 }
5308
write_pmpcfg(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5309 static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
5310 target_ulong val, uintptr_t ra)
5311 {
5312 uint32_t reg_index = csrno - CSR_PMPCFG0;
5313
5314 pmpcfg_csr_write(env, reg_index, val);
5315 return RISCV_EXCP_NONE;
5316 }
5317
read_pmpaddr(CPURISCVState * env,int csrno,target_ulong * val)5318 static RISCVException read_pmpaddr(CPURISCVState *env, int csrno,
5319 target_ulong *val)
5320 {
5321 *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0);
5322 return RISCV_EXCP_NONE;
5323 }
5324
write_pmpaddr(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5325 static RISCVException write_pmpaddr(CPURISCVState *env, int csrno,
5326 target_ulong val, uintptr_t ra)
5327 {
5328 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val);
5329 return RISCV_EXCP_NONE;
5330 }
5331
read_tselect(CPURISCVState * env,int csrno,target_ulong * val)5332 static RISCVException read_tselect(CPURISCVState *env, int csrno,
5333 target_ulong *val)
5334 {
5335 *val = tselect_csr_read(env);
5336 return RISCV_EXCP_NONE;
5337 }
5338
write_tselect(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5339 static RISCVException write_tselect(CPURISCVState *env, int csrno,
5340 target_ulong val, uintptr_t ra)
5341 {
5342 tselect_csr_write(env, val);
5343 return RISCV_EXCP_NONE;
5344 }
5345
read_tdata(CPURISCVState * env,int csrno,target_ulong * val)5346 static RISCVException read_tdata(CPURISCVState *env, int csrno,
5347 target_ulong *val)
5348 {
5349 /* return 0 in tdata1 to end the trigger enumeration */
5350 if (env->trigger_cur >= RV_MAX_TRIGGERS && csrno == CSR_TDATA1) {
5351 *val = 0;
5352 return RISCV_EXCP_NONE;
5353 }
5354
5355 if (!tdata_available(env, csrno - CSR_TDATA1)) {
5356 return RISCV_EXCP_ILLEGAL_INST;
5357 }
5358
5359 *val = tdata_csr_read(env, csrno - CSR_TDATA1);
5360 return RISCV_EXCP_NONE;
5361 }
5362
write_tdata(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5363 static RISCVException write_tdata(CPURISCVState *env, int csrno,
5364 target_ulong val, uintptr_t ra)
5365 {
5366 if (!tdata_available(env, csrno - CSR_TDATA1)) {
5367 return RISCV_EXCP_ILLEGAL_INST;
5368 }
5369
5370 tdata_csr_write(env, csrno - CSR_TDATA1, val);
5371 return RISCV_EXCP_NONE;
5372 }
5373
read_tinfo(CPURISCVState * env,int csrno,target_ulong * val)5374 static RISCVException read_tinfo(CPURISCVState *env, int csrno,
5375 target_ulong *val)
5376 {
5377 *val = tinfo_csr_read(env);
5378 return RISCV_EXCP_NONE;
5379 }
5380
read_mcontext(CPURISCVState * env,int csrno,target_ulong * val)5381 static RISCVException read_mcontext(CPURISCVState *env, int csrno,
5382 target_ulong *val)
5383 {
5384 *val = env->mcontext;
5385 return RISCV_EXCP_NONE;
5386 }
5387
write_mcontext(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5388 static RISCVException write_mcontext(CPURISCVState *env, int csrno,
5389 target_ulong val, uintptr_t ra)
5390 {
5391 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
5392 int32_t mask;
5393
5394 if (riscv_has_ext(env, RVH)) {
5395 /* Spec suggest 7-bit for RV32 and 14-bit for RV64 w/ H extension */
5396 mask = rv32 ? MCONTEXT32_HCONTEXT : MCONTEXT64_HCONTEXT;
5397 } else {
5398 /* Spec suggest 6-bit for RV32 and 13-bit for RV64 w/o H extension */
5399 mask = rv32 ? MCONTEXT32 : MCONTEXT64;
5400 }
5401
5402 env->mcontext = val & mask;
5403 return RISCV_EXCP_NONE;
5404 }
5405
read_mnscratch(CPURISCVState * env,int csrno,target_ulong * val)5406 static RISCVException read_mnscratch(CPURISCVState *env, int csrno,
5407 target_ulong *val)
5408 {
5409 *val = env->mnscratch;
5410 return RISCV_EXCP_NONE;
5411 }
5412
write_mnscratch(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5413 static RISCVException write_mnscratch(CPURISCVState *env, int csrno,
5414 target_ulong val, uintptr_t ra)
5415 {
5416 env->mnscratch = val;
5417 return RISCV_EXCP_NONE;
5418 }
5419
read_mnepc(CPURISCVState * env,int csrno,target_ulong * val)5420 static RISCVException read_mnepc(CPURISCVState *env, int csrno,
5421 target_ulong *val)
5422 {
5423 *val = env->mnepc;
5424 return RISCV_EXCP_NONE;
5425 }
5426
write_mnepc(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5427 static RISCVException write_mnepc(CPURISCVState *env, int csrno,
5428 target_ulong val, uintptr_t ra)
5429 {
5430 env->mnepc = val;
5431 return RISCV_EXCP_NONE;
5432 }
5433
read_mncause(CPURISCVState * env,int csrno,target_ulong * val)5434 static RISCVException read_mncause(CPURISCVState *env, int csrno,
5435 target_ulong *val)
5436 {
5437 *val = env->mncause;
5438 return RISCV_EXCP_NONE;
5439 }
5440
write_mncause(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5441 static RISCVException write_mncause(CPURISCVState *env, int csrno,
5442 target_ulong val, uintptr_t ra)
5443 {
5444 env->mncause = val;
5445 return RISCV_EXCP_NONE;
5446 }
5447
read_mnstatus(CPURISCVState * env,int csrno,target_ulong * val)5448 static RISCVException read_mnstatus(CPURISCVState *env, int csrno,
5449 target_ulong *val)
5450 {
5451 *val = env->mnstatus;
5452 return RISCV_EXCP_NONE;
5453 }
5454
write_mnstatus(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5455 static RISCVException write_mnstatus(CPURISCVState *env, int csrno,
5456 target_ulong val, uintptr_t ra)
5457 {
5458 target_ulong mask = (MNSTATUS_NMIE | MNSTATUS_MNPP);
5459
5460 if (riscv_has_ext(env, RVH)) {
5461 /* Flush tlb on mnstatus fields that affect VM. */
5462 if ((val ^ env->mnstatus) & MNSTATUS_MNPV) {
5463 tlb_flush(env_cpu(env));
5464 }
5465
5466 mask |= MNSTATUS_MNPV;
5467 }
5468
5469 /* mnstatus.mnie can only be cleared by hardware. */
5470 env->mnstatus = (env->mnstatus & MNSTATUS_NMIE) | (val & mask);
5471 return RISCV_EXCP_NONE;
5472 }
5473
5474 #endif
5475
5476 /* Crypto Extension */
riscv_new_csr_seed(target_ulong new_value,target_ulong write_mask)5477 target_ulong riscv_new_csr_seed(target_ulong new_value,
5478 target_ulong write_mask)
5479 {
5480 uint16_t random_v;
5481 Error *random_e = NULL;
5482 int random_r;
5483 target_ulong rval;
5484
5485 random_r = qemu_guest_getrandom(&random_v, 2, &random_e);
5486 if (unlikely(random_r < 0)) {
5487 /*
5488 * Failed, for unknown reasons in the crypto subsystem.
5489 * The best we can do is log the reason and return a
5490 * failure indication to the guest. There is no reason
5491 * we know to expect the failure to be transitory, so
5492 * indicate DEAD to avoid having the guest spin on WAIT.
5493 */
5494 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
5495 __func__, error_get_pretty(random_e));
5496 error_free(random_e);
5497 rval = SEED_OPST_DEAD;
5498 } else {
5499 rval = random_v | SEED_OPST_ES16;
5500 }
5501
5502 return rval;
5503 }
5504
rmw_seed(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)5505 static RISCVException rmw_seed(CPURISCVState *env, int csrno,
5506 target_ulong *ret_value,
5507 target_ulong new_value,
5508 target_ulong write_mask)
5509 {
5510 target_ulong rval;
5511
5512 rval = riscv_new_csr_seed(new_value, write_mask);
5513
5514 if (ret_value) {
5515 *ret_value = rval;
5516 }
5517
5518 return RISCV_EXCP_NONE;
5519 }
5520
5521 /*
5522 * riscv_csrrw - read and/or update control and status register
5523 *
5524 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0);
5525 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1);
5526 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value);
5527 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
5528 */
5529
riscv_csrrw_check(CPURISCVState * env,int csrno,bool write)5530 static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
5531 int csrno,
5532 bool write)
5533 {
5534 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
5535 bool read_only = get_field(csrno, 0xC00) == 3;
5536 int csr_min_priv = csr_ops[csrno].min_priv_ver;
5537
5538 /* ensure the CSR extension is enabled */
5539 if (!riscv_cpu_cfg(env)->ext_zicsr) {
5540 return RISCV_EXCP_ILLEGAL_INST;
5541 }
5542
5543 /* ensure CSR is implemented by checking predicate */
5544 if (!csr_ops[csrno].predicate) {
5545 return RISCV_EXCP_ILLEGAL_INST;
5546 }
5547
5548 /* privileged spec version check */
5549 if (env->priv_ver < csr_min_priv) {
5550 return RISCV_EXCP_ILLEGAL_INST;
5551 }
5552
5553 /* read / write check */
5554 if (write && read_only) {
5555 return RISCV_EXCP_ILLEGAL_INST;
5556 }
5557
5558 /*
5559 * The predicate() not only does existence check but also does some
5560 * access control check which triggers for example virtual instruction
5561 * exception in some cases. When writing read-only CSRs in those cases
5562 * illegal instruction exception should be triggered instead of virtual
5563 * instruction exception. Hence this comes after the read / write check.
5564 */
5565 RISCVException ret = csr_ops[csrno].predicate(env, csrno);
5566 if (ret != RISCV_EXCP_NONE) {
5567 return ret;
5568 }
5569
5570 #if !defined(CONFIG_USER_ONLY)
5571 int csr_priv, effective_priv = env->priv;
5572
5573 if (riscv_has_ext(env, RVH) && env->priv == PRV_S &&
5574 !env->virt_enabled) {
5575 /*
5576 * We are in HS mode. Add 1 to the effective privilege level to
5577 * allow us to access the Hypervisor CSRs.
5578 */
5579 effective_priv++;
5580 }
5581
5582 csr_priv = get_field(csrno, 0x300);
5583 if (!env->debugger && (effective_priv < csr_priv)) {
5584 if (csr_priv <= (PRV_S + 1) && env->virt_enabled) {
5585 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
5586 }
5587 return RISCV_EXCP_ILLEGAL_INST;
5588 }
5589 #endif
5590 return RISCV_EXCP_NONE;
5591 }
5592
riscv_csrrw_do64(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask,uintptr_t ra)5593 static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
5594 target_ulong *ret_value,
5595 target_ulong new_value,
5596 target_ulong write_mask,
5597 uintptr_t ra)
5598 {
5599 RISCVException ret;
5600 target_ulong old_value = 0;
5601
5602 /* execute combined read/write operation if it exists */
5603 if (csr_ops[csrno].op) {
5604 return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask);
5605 }
5606
5607 /*
5608 * ret_value == NULL means that rd=x0 and we're coming from helper_csrw()
5609 * and we can't throw side effects caused by CSR reads.
5610 */
5611 if (ret_value) {
5612 /* if no accessor exists then return failure */
5613 if (!csr_ops[csrno].read) {
5614 return RISCV_EXCP_ILLEGAL_INST;
5615 }
5616 /* read old value */
5617 ret = csr_ops[csrno].read(env, csrno, &old_value);
5618 if (ret != RISCV_EXCP_NONE) {
5619 return ret;
5620 }
5621 }
5622
5623 /* write value if writable and write mask set, otherwise drop writes */
5624 if (write_mask) {
5625 new_value = (old_value & ~write_mask) | (new_value & write_mask);
5626 if (csr_ops[csrno].write) {
5627 ret = csr_ops[csrno].write(env, csrno, new_value, ra);
5628 if (ret != RISCV_EXCP_NONE) {
5629 return ret;
5630 }
5631 }
5632 }
5633
5634 /* return old value */
5635 if (ret_value) {
5636 *ret_value = old_value;
5637 }
5638
5639 return RISCV_EXCP_NONE;
5640 }
5641
riscv_csrr(CPURISCVState * env,int csrno,target_ulong * ret_value)5642 RISCVException riscv_csrr(CPURISCVState *env, int csrno,
5643 target_ulong *ret_value)
5644 {
5645 RISCVException ret = riscv_csrrw_check(env, csrno, false);
5646 if (ret != RISCV_EXCP_NONE) {
5647 return ret;
5648 }
5649
5650 return riscv_csrrw_do64(env, csrno, ret_value, 0, 0, 0);
5651 }
5652
riscv_csrrw(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask,uintptr_t ra)5653 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
5654 target_ulong *ret_value, target_ulong new_value,
5655 target_ulong write_mask, uintptr_t ra)
5656 {
5657 RISCVException ret = riscv_csrrw_check(env, csrno, true);
5658 if (ret != RISCV_EXCP_NONE) {
5659 return ret;
5660 }
5661
5662 return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask, ra);
5663 }
5664
riscv_csrrw_do128(CPURISCVState * env,int csrno,Int128 * ret_value,Int128 new_value,Int128 write_mask,uintptr_t ra)5665 static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
5666 Int128 *ret_value,
5667 Int128 new_value,
5668 Int128 write_mask, uintptr_t ra)
5669 {
5670 RISCVException ret;
5671 Int128 old_value;
5672
5673 /* read old value */
5674 ret = csr_ops[csrno].read128(env, csrno, &old_value);
5675 if (ret != RISCV_EXCP_NONE) {
5676 return ret;
5677 }
5678
5679 /* write value if writable and write mask set, otherwise drop writes */
5680 if (int128_nz(write_mask)) {
5681 new_value = int128_or(int128_and(old_value, int128_not(write_mask)),
5682 int128_and(new_value, write_mask));
5683 if (csr_ops[csrno].write128) {
5684 ret = csr_ops[csrno].write128(env, csrno, new_value);
5685 if (ret != RISCV_EXCP_NONE) {
5686 return ret;
5687 }
5688 } else if (csr_ops[csrno].write) {
5689 /* avoids having to write wrappers for all registers */
5690 ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value), ra);
5691 if (ret != RISCV_EXCP_NONE) {
5692 return ret;
5693 }
5694 }
5695 }
5696
5697 /* return old value */
5698 if (ret_value) {
5699 *ret_value = old_value;
5700 }
5701
5702 return RISCV_EXCP_NONE;
5703 }
5704
riscv_csrr_i128(CPURISCVState * env,int csrno,Int128 * ret_value)5705 RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
5706 Int128 *ret_value)
5707 {
5708 RISCVException ret;
5709
5710 ret = riscv_csrrw_check(env, csrno, false);
5711 if (ret != RISCV_EXCP_NONE) {
5712 return ret;
5713 }
5714
5715 if (csr_ops[csrno].read128) {
5716 return riscv_csrrw_do128(env, csrno, ret_value,
5717 int128_zero(), int128_zero(), 0);
5718 }
5719
5720 /*
5721 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
5722 * at all defined.
5723 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
5724 * significant), for those, this fallback is correctly handling the
5725 * accesses
5726 */
5727 target_ulong old_value;
5728 ret = riscv_csrrw_do64(env, csrno, &old_value, 0, 0, 0);
5729 if (ret == RISCV_EXCP_NONE && ret_value) {
5730 *ret_value = int128_make64(old_value);
5731 }
5732 return ret;
5733 }
5734
riscv_csrrw_i128(CPURISCVState * env,int csrno,Int128 * ret_value,Int128 new_value,Int128 write_mask,uintptr_t ra)5735 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
5736 Int128 *ret_value, Int128 new_value,
5737 Int128 write_mask, uintptr_t ra)
5738 {
5739 RISCVException ret;
5740
5741 ret = riscv_csrrw_check(env, csrno, true);
5742 if (ret != RISCV_EXCP_NONE) {
5743 return ret;
5744 }
5745
5746 if (csr_ops[csrno].read128) {
5747 return riscv_csrrw_do128(env, csrno, ret_value,
5748 new_value, write_mask, ra);
5749 }
5750
5751 /*
5752 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
5753 * at all defined.
5754 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
5755 * significant), for those, this fallback is correctly handling the
5756 * accesses
5757 */
5758 target_ulong old_value;
5759 ret = riscv_csrrw_do64(env, csrno, &old_value,
5760 int128_getlo(new_value),
5761 int128_getlo(write_mask), ra);
5762 if (ret == RISCV_EXCP_NONE && ret_value) {
5763 *ret_value = int128_make64(old_value);
5764 }
5765 return ret;
5766 }
5767
5768 /*
5769 * Debugger support. If not in user mode, set env->debugger before the
5770 * riscv_csrrw call and clear it after the call.
5771 */
riscv_csrrw_debug(CPURISCVState * env,int csrno,target_ulong * ret_value,target_ulong new_value,target_ulong write_mask)5772 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
5773 target_ulong *ret_value,
5774 target_ulong new_value,
5775 target_ulong write_mask)
5776 {
5777 RISCVException ret;
5778 #if !defined(CONFIG_USER_ONLY)
5779 env->debugger = true;
5780 #endif
5781 if (!write_mask) {
5782 ret = riscv_csrr(env, csrno, ret_value);
5783 } else {
5784 ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask, 0);
5785 }
5786 #if !defined(CONFIG_USER_ONLY)
5787 env->debugger = false;
5788 #endif
5789 return ret;
5790 }
5791
read_jvt(CPURISCVState * env,int csrno,target_ulong * val)5792 static RISCVException read_jvt(CPURISCVState *env, int csrno,
5793 target_ulong *val)
5794 {
5795 *val = env->jvt;
5796 return RISCV_EXCP_NONE;
5797 }
5798
write_jvt(CPURISCVState * env,int csrno,target_ulong val,uintptr_t ra)5799 static RISCVException write_jvt(CPURISCVState *env, int csrno,
5800 target_ulong val, uintptr_t ra)
5801 {
5802 env->jvt = val;
5803 return RISCV_EXCP_NONE;
5804 }
5805
5806 /*
5807 * Control and Status Register function table
5808 * riscv_csr_operations::predicate() must be provided for an implemented CSR
5809 */
5810 riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
5811 /* User Floating-Point CSRs */
5812 [CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags },
5813 [CSR_FRM] = { "frm", fs, read_frm, write_frm },
5814 [CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr },
5815 /* Vector CSRs */
5816 [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart },
5817 [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat },
5818 [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm },
5819 [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr },
5820 [CSR_VL] = { "vl", vs, read_vl },
5821 [CSR_VTYPE] = { "vtype", vs, read_vtype },
5822 [CSR_VLENB] = { "vlenb", vs, read_vlenb },
5823 /* User Timers and Counters */
5824 [CSR_CYCLE] = { "cycle", ctr, read_hpmcounter },
5825 [CSR_INSTRET] = { "instret", ctr, read_hpmcounter },
5826 [CSR_CYCLEH] = { "cycleh", ctr32, read_hpmcounterh },
5827 [CSR_INSTRETH] = { "instreth", ctr32, read_hpmcounterh },
5828
5829 /*
5830 * In privileged mode, the monitor will have to emulate TIME CSRs only if
5831 * rdtime callback is not provided by machine/platform emulation.
5832 */
5833 [CSR_TIME] = { "time", ctr, read_time },
5834 [CSR_TIMEH] = { "timeh", ctr32, read_timeh },
5835
5836 /* Crypto Extension */
5837 [CSR_SEED] = { "seed", seed, NULL, NULL, rmw_seed },
5838
5839 /* Zcmt Extension */
5840 [CSR_JVT] = {"jvt", zcmt, read_jvt, write_jvt},
5841
5842 /* zicfiss Extension, shadow stack register */
5843 [CSR_SSP] = { "ssp", cfi_ss, read_ssp, write_ssp },
5844
5845 #if !defined(CONFIG_USER_ONLY)
5846 /* Machine Timers and Counters */
5847 [CSR_MCYCLE] = { "mcycle", any, read_hpmcounter,
5848 write_mhpmcounter },
5849 [CSR_MINSTRET] = { "minstret", any, read_hpmcounter,
5850 write_mhpmcounter },
5851 [CSR_MCYCLEH] = { "mcycleh", any32, read_hpmcounterh,
5852 write_mhpmcounterh },
5853 [CSR_MINSTRETH] = { "minstreth", any32, read_hpmcounterh,
5854 write_mhpmcounterh },
5855
5856 /* Machine Information Registers */
5857 [CSR_MVENDORID] = { "mvendorid", any, read_mvendorid },
5858 [CSR_MARCHID] = { "marchid", any, read_marchid },
5859 [CSR_MIMPID] = { "mimpid", any, read_mimpid },
5860 [CSR_MHARTID] = { "mhartid", any, read_mhartid },
5861
5862 [CSR_MCONFIGPTR] = { "mconfigptr", any, read_zero,
5863 .min_priv_ver = PRIV_VERSION_1_12_0 },
5864 /* Machine Trap Setup */
5865 [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus,
5866 NULL, read_mstatus_i128 },
5867 [CSR_MISA] = { "misa", any, read_misa, write_misa,
5868 NULL, read_misa_i128 },
5869 [CSR_MIDELEG] = { "mideleg", smode, NULL, NULL, rmw_mideleg },
5870 [CSR_MEDELEG] = { "medeleg", smode, read_medeleg, write_medeleg },
5871 [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie },
5872 [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec },
5873 [CSR_MCOUNTEREN] = { "mcounteren", umode, read_mcounteren,
5874 write_mcounteren },
5875
5876 [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush,
5877 write_mstatush },
5878 [CSR_MEDELEGH] = { "medelegh", smode32, read_zero, write_ignore,
5879 .min_priv_ver = PRIV_VERSION_1_13_0 },
5880 [CSR_HEDELEGH] = { "hedelegh", hmode32, read_hedelegh, write_hedelegh,
5881 .min_priv_ver = PRIV_VERSION_1_13_0 },
5882
5883 /* Machine Trap Handling */
5884 [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch,
5885 NULL, read_mscratch_i128, write_mscratch_i128 },
5886 [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc },
5887 [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause },
5888 [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval },
5889 [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
5890
5891 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */
5892 [CSR_MISELECT] = { "miselect", csrind_or_aia_any, NULL, NULL,
5893 rmw_xiselect },
5894 [CSR_MIREG] = { "mireg", csrind_or_aia_any, NULL, NULL,
5895 rmw_xireg },
5896
5897 /* Machine Indirect Register Alias */
5898 [CSR_MIREG2] = { "mireg2", csrind_any, NULL, NULL, rmw_xiregi,
5899 .min_priv_ver = PRIV_VERSION_1_12_0 },
5900 [CSR_MIREG3] = { "mireg3", csrind_any, NULL, NULL, rmw_xiregi,
5901 .min_priv_ver = PRIV_VERSION_1_12_0 },
5902 [CSR_MIREG4] = { "mireg4", csrind_any, NULL, NULL, rmw_xiregi,
5903 .min_priv_ver = PRIV_VERSION_1_12_0 },
5904 [CSR_MIREG5] = { "mireg5", csrind_any, NULL, NULL, rmw_xiregi,
5905 .min_priv_ver = PRIV_VERSION_1_12_0 },
5906 [CSR_MIREG6] = { "mireg6", csrind_any, NULL, NULL, rmw_xiregi,
5907 .min_priv_ver = PRIV_VERSION_1_12_0 },
5908
5909 /* Machine-Level Interrupts (AIA) */
5910 [CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei },
5911 [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi },
5912
5913 /* Virtual Interrupts for Supervisor Level (AIA) */
5914 [CSR_MVIEN] = { "mvien", aia_any, NULL, NULL, rmw_mvien },
5915 [CSR_MVIP] = { "mvip", aia_any, NULL, NULL, rmw_mvip },
5916
5917 /* Machine-Level High-Half CSRs (AIA) */
5918 [CSR_MIDELEGH] = { "midelegh", aia_smode32, NULL, NULL, rmw_midelegh },
5919 [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh },
5920 [CSR_MVIENH] = { "mvienh", aia_any32, NULL, NULL, rmw_mvienh },
5921 [CSR_MVIPH] = { "mviph", aia_any32, NULL, NULL, rmw_mviph },
5922 [CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph },
5923
5924 /* Execution environment configuration */
5925 [CSR_MENVCFG] = { "menvcfg", umode, read_menvcfg, write_menvcfg,
5926 .min_priv_ver = PRIV_VERSION_1_12_0 },
5927 [CSR_MENVCFGH] = { "menvcfgh", umode32, read_menvcfgh, write_menvcfgh,
5928 .min_priv_ver = PRIV_VERSION_1_12_0 },
5929 [CSR_SENVCFG] = { "senvcfg", smode, read_senvcfg, write_senvcfg,
5930 .min_priv_ver = PRIV_VERSION_1_12_0 },
5931 [CSR_HENVCFG] = { "henvcfg", hmode, read_henvcfg, write_henvcfg,
5932 .min_priv_ver = PRIV_VERSION_1_12_0 },
5933 [CSR_HENVCFGH] = { "henvcfgh", hmode32, read_henvcfgh, write_henvcfgh,
5934 .min_priv_ver = PRIV_VERSION_1_12_0 },
5935
5936 /* Smstateen extension CSRs */
5937 [CSR_MSTATEEN0] = { "mstateen0", mstateen, read_mstateen, write_mstateen0,
5938 .min_priv_ver = PRIV_VERSION_1_12_0 },
5939 [CSR_MSTATEEN0H] = { "mstateen0h", mstateen, read_mstateenh,
5940 write_mstateen0h,
5941 .min_priv_ver = PRIV_VERSION_1_12_0 },
5942 [CSR_MSTATEEN1] = { "mstateen1", mstateen, read_mstateen,
5943 write_mstateen_1_3,
5944 .min_priv_ver = PRIV_VERSION_1_12_0 },
5945 [CSR_MSTATEEN1H] = { "mstateen1h", mstateen, read_mstateenh,
5946 write_mstateenh_1_3,
5947 .min_priv_ver = PRIV_VERSION_1_12_0 },
5948 [CSR_MSTATEEN2] = { "mstateen2", mstateen, read_mstateen,
5949 write_mstateen_1_3,
5950 .min_priv_ver = PRIV_VERSION_1_12_0 },
5951 [CSR_MSTATEEN2H] = { "mstateen2h", mstateen, read_mstateenh,
5952 write_mstateenh_1_3,
5953 .min_priv_ver = PRIV_VERSION_1_12_0 },
5954 [CSR_MSTATEEN3] = { "mstateen3", mstateen, read_mstateen,
5955 write_mstateen_1_3,
5956 .min_priv_ver = PRIV_VERSION_1_12_0 },
5957 [CSR_MSTATEEN3H] = { "mstateen3h", mstateen, read_mstateenh,
5958 write_mstateenh_1_3,
5959 .min_priv_ver = PRIV_VERSION_1_12_0 },
5960 [CSR_HSTATEEN0] = { "hstateen0", hstateen, read_hstateen, write_hstateen0,
5961 .min_priv_ver = PRIV_VERSION_1_12_0 },
5962 [CSR_HSTATEEN0H] = { "hstateen0h", hstateenh, read_hstateenh,
5963 write_hstateen0h,
5964 .min_priv_ver = PRIV_VERSION_1_12_0 },
5965 [CSR_HSTATEEN1] = { "hstateen1", hstateen, read_hstateen,
5966 write_hstateen_1_3,
5967 .min_priv_ver = PRIV_VERSION_1_12_0 },
5968 [CSR_HSTATEEN1H] = { "hstateen1h", hstateenh, read_hstateenh,
5969 write_hstateenh_1_3,
5970 .min_priv_ver = PRIV_VERSION_1_12_0 },
5971 [CSR_HSTATEEN2] = { "hstateen2", hstateen, read_hstateen,
5972 write_hstateen_1_3,
5973 .min_priv_ver = PRIV_VERSION_1_12_0 },
5974 [CSR_HSTATEEN2H] = { "hstateen2h", hstateenh, read_hstateenh,
5975 write_hstateenh_1_3,
5976 .min_priv_ver = PRIV_VERSION_1_12_0 },
5977 [CSR_HSTATEEN3] = { "hstateen3", hstateen, read_hstateen,
5978 write_hstateen_1_3,
5979 .min_priv_ver = PRIV_VERSION_1_12_0 },
5980 [CSR_HSTATEEN3H] = { "hstateen3h", hstateenh, read_hstateenh,
5981 write_hstateenh_1_3,
5982 .min_priv_ver = PRIV_VERSION_1_12_0 },
5983 [CSR_SSTATEEN0] = { "sstateen0", sstateen, read_sstateen, write_sstateen0,
5984 .min_priv_ver = PRIV_VERSION_1_12_0 },
5985 [CSR_SSTATEEN1] = { "sstateen1", sstateen, read_sstateen,
5986 write_sstateen_1_3,
5987 .min_priv_ver = PRIV_VERSION_1_12_0 },
5988 [CSR_SSTATEEN2] = { "sstateen2", sstateen, read_sstateen,
5989 write_sstateen_1_3,
5990 .min_priv_ver = PRIV_VERSION_1_12_0 },
5991 [CSR_SSTATEEN3] = { "sstateen3", sstateen, read_sstateen,
5992 write_sstateen_1_3,
5993 .min_priv_ver = PRIV_VERSION_1_12_0 },
5994
5995 /* RNMI */
5996 [CSR_MNSCRATCH] = { "mnscratch", rnmi, read_mnscratch, write_mnscratch,
5997 .min_priv_ver = PRIV_VERSION_1_12_0 },
5998 [CSR_MNEPC] = { "mnepc", rnmi, read_mnepc, write_mnepc,
5999 .min_priv_ver = PRIV_VERSION_1_12_0 },
6000 [CSR_MNCAUSE] = { "mncause", rnmi, read_mncause, write_mncause,
6001 .min_priv_ver = PRIV_VERSION_1_12_0 },
6002 [CSR_MNSTATUS] = { "mnstatus", rnmi, read_mnstatus, write_mnstatus,
6003 .min_priv_ver = PRIV_VERSION_1_12_0 },
6004
6005 /* Supervisor Counter Delegation */
6006 [CSR_SCOUNTINHIBIT] = {"scountinhibit", scountinhibit_pred,
6007 read_scountinhibit, write_scountinhibit,
6008 .min_priv_ver = PRIV_VERSION_1_12_0 },
6009
6010 /* Supervisor Trap Setup */
6011 [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus,
6012 NULL, read_sstatus_i128 },
6013 [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie },
6014 [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec },
6015 [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren,
6016 write_scounteren },
6017
6018 /* Supervisor Trap Handling */
6019 [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch,
6020 NULL, read_sscratch_i128, write_sscratch_i128 },
6021 [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc },
6022 [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause },
6023 [CSR_STVAL] = { "stval", smode, read_stval, write_stval },
6024 [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip },
6025 [CSR_STIMECMP] = { "stimecmp", sstc, read_stimecmp, write_stimecmp,
6026 .min_priv_ver = PRIV_VERSION_1_12_0 },
6027 [CSR_STIMECMPH] = { "stimecmph", sstc_32, read_stimecmph, write_stimecmph,
6028 .min_priv_ver = PRIV_VERSION_1_12_0 },
6029 [CSR_VSTIMECMP] = { "vstimecmp", sstc, read_vstimecmp,
6030 write_vstimecmp,
6031 .min_priv_ver = PRIV_VERSION_1_12_0 },
6032 [CSR_VSTIMECMPH] = { "vstimecmph", sstc_32, read_vstimecmph,
6033 write_vstimecmph,
6034 .min_priv_ver = PRIV_VERSION_1_12_0 },
6035
6036 /* Supervisor Protection and Translation */
6037 [CSR_SATP] = { "satp", satp, read_satp, write_satp },
6038
6039 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
6040 [CSR_SISELECT] = { "siselect", csrind_or_aia_smode, NULL, NULL,
6041 rmw_xiselect },
6042 [CSR_SIREG] = { "sireg", csrind_or_aia_smode, NULL, NULL,
6043 rmw_xireg },
6044
6045 /* Supervisor Indirect Register Alias */
6046 [CSR_SIREG2] = { "sireg2", csrind_smode, NULL, NULL, rmw_xiregi,
6047 .min_priv_ver = PRIV_VERSION_1_12_0 },
6048 [CSR_SIREG3] = { "sireg3", csrind_smode, NULL, NULL, rmw_xiregi,
6049 .min_priv_ver = PRIV_VERSION_1_12_0 },
6050 [CSR_SIREG4] = { "sireg4", csrind_smode, NULL, NULL, rmw_xiregi,
6051 .min_priv_ver = PRIV_VERSION_1_12_0 },
6052 [CSR_SIREG5] = { "sireg5", csrind_smode, NULL, NULL, rmw_xiregi,
6053 .min_priv_ver = PRIV_VERSION_1_12_0 },
6054 [CSR_SIREG6] = { "sireg6", csrind_smode, NULL, NULL, rmw_xiregi,
6055 .min_priv_ver = PRIV_VERSION_1_12_0 },
6056
6057 /* Supervisor-Level Interrupts (AIA) */
6058 [CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei },
6059 [CSR_STOPI] = { "stopi", aia_smode, read_stopi },
6060
6061 /* Supervisor-Level High-Half CSRs (AIA) */
6062 [CSR_SIEH] = { "sieh", aia_smode32, NULL, NULL, rmw_sieh },
6063 [CSR_SIPH] = { "siph", aia_smode32, NULL, NULL, rmw_siph },
6064
6065 [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus,
6066 .min_priv_ver = PRIV_VERSION_1_12_0 },
6067 [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg,
6068 .min_priv_ver = PRIV_VERSION_1_12_0 },
6069 [CSR_HIDELEG] = { "hideleg", hmode, NULL, NULL, rmw_hideleg,
6070 .min_priv_ver = PRIV_VERSION_1_12_0 },
6071 [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip,
6072 .min_priv_ver = PRIV_VERSION_1_12_0 },
6073 [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip,
6074 .min_priv_ver = PRIV_VERSION_1_12_0 },
6075 [CSR_HIE] = { "hie", hmode, NULL, NULL, rmw_hie,
6076 .min_priv_ver = PRIV_VERSION_1_12_0 },
6077 [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren,
6078 write_hcounteren,
6079 .min_priv_ver = PRIV_VERSION_1_12_0 },
6080 [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie,
6081 .min_priv_ver = PRIV_VERSION_1_12_0 },
6082 [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval,
6083 .min_priv_ver = PRIV_VERSION_1_12_0 },
6084 [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst,
6085 .min_priv_ver = PRIV_VERSION_1_12_0 },
6086 [CSR_HGEIP] = { "hgeip", hmode, read_hgeip,
6087 .min_priv_ver = PRIV_VERSION_1_12_0 },
6088 [CSR_HGATP] = { "hgatp", hgatp, read_hgatp, write_hgatp,
6089 .min_priv_ver = PRIV_VERSION_1_12_0 },
6090 [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta,
6091 write_htimedelta,
6092 .min_priv_ver = PRIV_VERSION_1_12_0 },
6093 [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah,
6094 write_htimedeltah,
6095 .min_priv_ver = PRIV_VERSION_1_12_0 },
6096
6097 [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus,
6098 write_vsstatus,
6099 .min_priv_ver = PRIV_VERSION_1_12_0 },
6100 [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip,
6101 .min_priv_ver = PRIV_VERSION_1_12_0 },
6102 [CSR_VSIE] = { "vsie", hmode, NULL, NULL, rmw_vsie ,
6103 .min_priv_ver = PRIV_VERSION_1_12_0 },
6104 [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec,
6105 .min_priv_ver = PRIV_VERSION_1_12_0 },
6106 [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch,
6107 write_vsscratch,
6108 .min_priv_ver = PRIV_VERSION_1_12_0 },
6109 [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc,
6110 .min_priv_ver = PRIV_VERSION_1_12_0 },
6111 [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause,
6112 .min_priv_ver = PRIV_VERSION_1_12_0 },
6113 [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval,
6114 .min_priv_ver = PRIV_VERSION_1_12_0 },
6115 [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp,
6116 .min_priv_ver = PRIV_VERSION_1_12_0 },
6117
6118 [CSR_MTVAL2] = { "mtval2", dbltrp_hmode, read_mtval2, write_mtval2,
6119 .min_priv_ver = PRIV_VERSION_1_12_0 },
6120 [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst,
6121 .min_priv_ver = PRIV_VERSION_1_12_0 },
6122
6123 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
6124 [CSR_HVIEN] = { "hvien", aia_hmode, NULL, NULL, rmw_hvien },
6125 [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl,
6126 write_hvictl },
6127 [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1,
6128 write_hviprio1 },
6129 [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2,
6130 write_hviprio2 },
6131 /*
6132 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
6133 */
6134 [CSR_VSISELECT] = { "vsiselect", csrind_or_aia_hmode, NULL, NULL,
6135 rmw_xiselect },
6136 [CSR_VSIREG] = { "vsireg", csrind_or_aia_hmode, NULL, NULL,
6137 rmw_xireg },
6138
6139 /* Virtual Supervisor Indirect Alias */
6140 [CSR_VSIREG2] = { "vsireg2", csrind_hmode, NULL, NULL, rmw_xiregi,
6141 .min_priv_ver = PRIV_VERSION_1_12_0 },
6142 [CSR_VSIREG3] = { "vsireg3", csrind_hmode, NULL, NULL, rmw_xiregi,
6143 .min_priv_ver = PRIV_VERSION_1_12_0 },
6144 [CSR_VSIREG4] = { "vsireg4", csrind_hmode, NULL, NULL, rmw_xiregi,
6145 .min_priv_ver = PRIV_VERSION_1_12_0 },
6146 [CSR_VSIREG5] = { "vsireg5", csrind_hmode, NULL, NULL, rmw_xiregi,
6147 .min_priv_ver = PRIV_VERSION_1_12_0 },
6148 [CSR_VSIREG6] = { "vsireg6", csrind_hmode, NULL, NULL, rmw_xiregi,
6149 .min_priv_ver = PRIV_VERSION_1_12_0 },
6150
6151 /* VS-Level Interrupts (H-extension with AIA) */
6152 [CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei },
6153 [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi },
6154
6155 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
6156 [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL,
6157 rmw_hidelegh },
6158 [CSR_HVIENH] = { "hvienh", aia_hmode32, NULL, NULL, rmw_hvienh },
6159 [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph },
6160 [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h,
6161 write_hviprio1h },
6162 [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h,
6163 write_hviprio2h },
6164 [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh },
6165 [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph },
6166
6167 /* Physical Memory Protection */
6168 [CSR_MSECCFG] = { "mseccfg", have_mseccfg, read_mseccfg, write_mseccfg,
6169 .min_priv_ver = PRIV_VERSION_1_11_0 },
6170 [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg },
6171 [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg },
6172 [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg },
6173 [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg },
6174 [CSR_PMPCFG4] = { "pmpcfg4", pmp, read_pmpcfg, write_pmpcfg,
6175 .min_priv_ver = PRIV_VERSION_1_12_0 },
6176 [CSR_PMPCFG5] = { "pmpcfg5", pmp, read_pmpcfg, write_pmpcfg,
6177 .min_priv_ver = PRIV_VERSION_1_12_0 },
6178 [CSR_PMPCFG6] = { "pmpcfg6", pmp, read_pmpcfg, write_pmpcfg,
6179 .min_priv_ver = PRIV_VERSION_1_12_0 },
6180 [CSR_PMPCFG7] = { "pmpcfg7", pmp, read_pmpcfg, write_pmpcfg,
6181 .min_priv_ver = PRIV_VERSION_1_12_0 },
6182 [CSR_PMPCFG8] = { "pmpcfg8", pmp, read_pmpcfg, write_pmpcfg,
6183 .min_priv_ver = PRIV_VERSION_1_12_0 },
6184 [CSR_PMPCFG9] = { "pmpcfg9", pmp, read_pmpcfg, write_pmpcfg,
6185 .min_priv_ver = PRIV_VERSION_1_12_0 },
6186 [CSR_PMPCFG10] = { "pmpcfg10", pmp, read_pmpcfg, write_pmpcfg,
6187 .min_priv_ver = PRIV_VERSION_1_12_0 },
6188 [CSR_PMPCFG11] = { "pmpcfg11", pmp, read_pmpcfg, write_pmpcfg,
6189 .min_priv_ver = PRIV_VERSION_1_12_0 },
6190 [CSR_PMPCFG12] = { "pmpcfg12", pmp, read_pmpcfg, write_pmpcfg,
6191 .min_priv_ver = PRIV_VERSION_1_12_0 },
6192 [CSR_PMPCFG13] = { "pmpcfg13", pmp, read_pmpcfg, write_pmpcfg,
6193 .min_priv_ver = PRIV_VERSION_1_12_0 },
6194 [CSR_PMPCFG14] = { "pmpcfg14", pmp, read_pmpcfg, write_pmpcfg,
6195 .min_priv_ver = PRIV_VERSION_1_12_0 },
6196 [CSR_PMPCFG15] = { "pmpcfg15", pmp, read_pmpcfg, write_pmpcfg,
6197 .min_priv_ver = PRIV_VERSION_1_12_0 },
6198 [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr },
6199 [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr },
6200 [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr },
6201 [CSR_PMPADDR3] = { "pmpaddr3", pmp, read_pmpaddr, write_pmpaddr },
6202 [CSR_PMPADDR4] = { "pmpaddr4", pmp, read_pmpaddr, write_pmpaddr },
6203 [CSR_PMPADDR5] = { "pmpaddr5", pmp, read_pmpaddr, write_pmpaddr },
6204 [CSR_PMPADDR6] = { "pmpaddr6", pmp, read_pmpaddr, write_pmpaddr },
6205 [CSR_PMPADDR7] = { "pmpaddr7", pmp, read_pmpaddr, write_pmpaddr },
6206 [CSR_PMPADDR8] = { "pmpaddr8", pmp, read_pmpaddr, write_pmpaddr },
6207 [CSR_PMPADDR9] = { "pmpaddr9", pmp, read_pmpaddr, write_pmpaddr },
6208 [CSR_PMPADDR10] = { "pmpaddr10", pmp, read_pmpaddr, write_pmpaddr },
6209 [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr },
6210 [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr },
6211 [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr },
6212 [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
6213 [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
6214 [CSR_PMPADDR16] = { "pmpaddr16", pmp, read_pmpaddr, write_pmpaddr,
6215 .min_priv_ver = PRIV_VERSION_1_12_0 },
6216 [CSR_PMPADDR17] = { "pmpaddr17", pmp, read_pmpaddr, write_pmpaddr,
6217 .min_priv_ver = PRIV_VERSION_1_12_0 },
6218 [CSR_PMPADDR18] = { "pmpaddr18", pmp, read_pmpaddr, write_pmpaddr,
6219 .min_priv_ver = PRIV_VERSION_1_12_0 },
6220 [CSR_PMPADDR19] = { "pmpaddr19", pmp, read_pmpaddr, write_pmpaddr,
6221 .min_priv_ver = PRIV_VERSION_1_12_0 },
6222 [CSR_PMPADDR20] = { "pmpaddr20", pmp, read_pmpaddr, write_pmpaddr,
6223 .min_priv_ver = PRIV_VERSION_1_12_0 },
6224 [CSR_PMPADDR21] = { "pmpaddr21", pmp, read_pmpaddr, write_pmpaddr,
6225 .min_priv_ver = PRIV_VERSION_1_12_0 },
6226 [CSR_PMPADDR22] = { "pmpaddr22", pmp, read_pmpaddr, write_pmpaddr,
6227 .min_priv_ver = PRIV_VERSION_1_12_0 },
6228 [CSR_PMPADDR23] = { "pmpaddr23", pmp, read_pmpaddr, write_pmpaddr,
6229 .min_priv_ver = PRIV_VERSION_1_12_0 },
6230 [CSR_PMPADDR24] = { "pmpaddr24", pmp, read_pmpaddr, write_pmpaddr,
6231 .min_priv_ver = PRIV_VERSION_1_12_0 },
6232 [CSR_PMPADDR25] = { "pmpaddr25", pmp, read_pmpaddr, write_pmpaddr,
6233 .min_priv_ver = PRIV_VERSION_1_12_0 },
6234 [CSR_PMPADDR26] = { "pmpaddr26", pmp, read_pmpaddr, write_pmpaddr,
6235 .min_priv_ver = PRIV_VERSION_1_12_0 },
6236 [CSR_PMPADDR27] = { "pmpaddr27", pmp, read_pmpaddr, write_pmpaddr,
6237 .min_priv_ver = PRIV_VERSION_1_12_0 },
6238 [CSR_PMPADDR28] = { "pmpaddr28", pmp, read_pmpaddr, write_pmpaddr,
6239 .min_priv_ver = PRIV_VERSION_1_12_0 },
6240 [CSR_PMPADDR29] = { "pmpaddr29", pmp, read_pmpaddr, write_pmpaddr,
6241 .min_priv_ver = PRIV_VERSION_1_12_0 },
6242 [CSR_PMPADDR30] = { "pmpaddr30", pmp, read_pmpaddr, write_pmpaddr,
6243 .min_priv_ver = PRIV_VERSION_1_12_0 },
6244 [CSR_PMPADDR31] = { "pmpaddr31", pmp, read_pmpaddr, write_pmpaddr,
6245 .min_priv_ver = PRIV_VERSION_1_12_0 },
6246 [CSR_PMPADDR32] = { "pmpaddr32", pmp, read_pmpaddr, write_pmpaddr,
6247 .min_priv_ver = PRIV_VERSION_1_12_0 },
6248 [CSR_PMPADDR33] = { "pmpaddr33", pmp, read_pmpaddr, write_pmpaddr,
6249 .min_priv_ver = PRIV_VERSION_1_12_0 },
6250 [CSR_PMPADDR34] = { "pmpaddr34", pmp, read_pmpaddr, write_pmpaddr,
6251 .min_priv_ver = PRIV_VERSION_1_12_0 },
6252 [CSR_PMPADDR35] = { "pmpaddr35", pmp, read_pmpaddr, write_pmpaddr,
6253 .min_priv_ver = PRIV_VERSION_1_12_0 },
6254 [CSR_PMPADDR36] = { "pmpaddr36", pmp, read_pmpaddr, write_pmpaddr,
6255 .min_priv_ver = PRIV_VERSION_1_12_0 },
6256 [CSR_PMPADDR37] = { "pmpaddr37", pmp, read_pmpaddr, write_pmpaddr,
6257 .min_priv_ver = PRIV_VERSION_1_12_0 },
6258 [CSR_PMPADDR38] = { "pmpaddr38", pmp, read_pmpaddr, write_pmpaddr,
6259 .min_priv_ver = PRIV_VERSION_1_12_0 },
6260 [CSR_PMPADDR39] = { "pmpaddr39", pmp, read_pmpaddr, write_pmpaddr,
6261 .min_priv_ver = PRIV_VERSION_1_12_0 },
6262 [CSR_PMPADDR40] = { "pmpaddr40", pmp, read_pmpaddr, write_pmpaddr,
6263 .min_priv_ver = PRIV_VERSION_1_12_0 },
6264 [CSR_PMPADDR41] = { "pmpaddr41", pmp, read_pmpaddr, write_pmpaddr,
6265 .min_priv_ver = PRIV_VERSION_1_12_0 },
6266 [CSR_PMPADDR42] = { "pmpaddr42", pmp, read_pmpaddr, write_pmpaddr,
6267 .min_priv_ver = PRIV_VERSION_1_12_0 },
6268 [CSR_PMPADDR43] = { "pmpaddr43", pmp, read_pmpaddr, write_pmpaddr,
6269 .min_priv_ver = PRIV_VERSION_1_12_0 },
6270 [CSR_PMPADDR44] = { "pmpaddr44", pmp, read_pmpaddr, write_pmpaddr,
6271 .min_priv_ver = PRIV_VERSION_1_12_0 },
6272 [CSR_PMPADDR45] = { "pmpaddr45", pmp, read_pmpaddr, write_pmpaddr,
6273 .min_priv_ver = PRIV_VERSION_1_12_0 },
6274 [CSR_PMPADDR46] = { "pmpaddr46", pmp, read_pmpaddr, write_pmpaddr,
6275 .min_priv_ver = PRIV_VERSION_1_12_0 },
6276 [CSR_PMPADDR47] = { "pmpaddr47", pmp, read_pmpaddr, write_pmpaddr,
6277 .min_priv_ver = PRIV_VERSION_1_12_0 },
6278 [CSR_PMPADDR48] = { "pmpaddr48", pmp, read_pmpaddr, write_pmpaddr,
6279 .min_priv_ver = PRIV_VERSION_1_12_0 },
6280 [CSR_PMPADDR49] = { "pmpaddr49", pmp, read_pmpaddr, write_pmpaddr,
6281 .min_priv_ver = PRIV_VERSION_1_12_0 },
6282 [CSR_PMPADDR50] = { "pmpaddr50", pmp, read_pmpaddr, write_pmpaddr,
6283 .min_priv_ver = PRIV_VERSION_1_12_0 },
6284 [CSR_PMPADDR51] = { "pmpaddr51", pmp, read_pmpaddr, write_pmpaddr,
6285 .min_priv_ver = PRIV_VERSION_1_12_0 },
6286 [CSR_PMPADDR52] = { "pmpaddr52", pmp, read_pmpaddr, write_pmpaddr,
6287 .min_priv_ver = PRIV_VERSION_1_12_0 },
6288 [CSR_PMPADDR53] = { "pmpaddr53", pmp, read_pmpaddr, write_pmpaddr,
6289 .min_priv_ver = PRIV_VERSION_1_12_0 },
6290 [CSR_PMPADDR54] = { "pmpaddr54", pmp, read_pmpaddr, write_pmpaddr,
6291 .min_priv_ver = PRIV_VERSION_1_12_0 },
6292 [CSR_PMPADDR55] = { "pmpaddr55", pmp, read_pmpaddr, write_pmpaddr,
6293 .min_priv_ver = PRIV_VERSION_1_12_0 },
6294 [CSR_PMPADDR56] = { "pmpaddr56", pmp, read_pmpaddr, write_pmpaddr,
6295 .min_priv_ver = PRIV_VERSION_1_12_0 },
6296 [CSR_PMPADDR57] = { "pmpaddr57", pmp, read_pmpaddr, write_pmpaddr,
6297 .min_priv_ver = PRIV_VERSION_1_12_0 },
6298 [CSR_PMPADDR58] = { "pmpaddr58", pmp, read_pmpaddr, write_pmpaddr,
6299 .min_priv_ver = PRIV_VERSION_1_12_0 },
6300 [CSR_PMPADDR59] = { "pmpaddr59", pmp, read_pmpaddr, write_pmpaddr,
6301 .min_priv_ver = PRIV_VERSION_1_12_0 },
6302 [CSR_PMPADDR60] = { "pmpaddr60", pmp, read_pmpaddr, write_pmpaddr,
6303 .min_priv_ver = PRIV_VERSION_1_12_0 },
6304 [CSR_PMPADDR61] = { "pmpaddr61", pmp, read_pmpaddr, write_pmpaddr,
6305 .min_priv_ver = PRIV_VERSION_1_12_0 },
6306 [CSR_PMPADDR62] = { "pmpaddr62", pmp, read_pmpaddr, write_pmpaddr,
6307 .min_priv_ver = PRIV_VERSION_1_12_0 },
6308 [CSR_PMPADDR63] = { "pmpaddr63", pmp, read_pmpaddr, write_pmpaddr,
6309 .min_priv_ver = PRIV_VERSION_1_12_0 },
6310
6311 /* Debug CSRs */
6312 [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect },
6313 [CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata },
6314 [CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata },
6315 [CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata },
6316 [CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore },
6317 [CSR_MCONTEXT] = { "mcontext", debug, read_mcontext, write_mcontext },
6318
6319 [CSR_MCTRCTL] = { "mctrctl", ctr_mmode, NULL, NULL, rmw_xctrctl },
6320 [CSR_SCTRCTL] = { "sctrctl", ctr_smode, NULL, NULL, rmw_xctrctl },
6321 [CSR_VSCTRCTL] = { "vsctrctl", ctr_smode, NULL, NULL, rmw_xctrctl },
6322 [CSR_SCTRDEPTH] = { "sctrdepth", ctr_smode, NULL, NULL, rmw_sctrdepth },
6323 [CSR_SCTRSTATUS] = { "sctrstatus", ctr_smode, NULL, NULL, rmw_sctrstatus },
6324
6325 /* Performance Counters */
6326 [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter },
6327 [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter },
6328 [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_hpmcounter },
6329 [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_hpmcounter },
6330 [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_hpmcounter },
6331 [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_hpmcounter },
6332 [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_hpmcounter },
6333 [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_hpmcounter },
6334 [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_hpmcounter },
6335 [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_hpmcounter },
6336 [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_hpmcounter },
6337 [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_hpmcounter },
6338 [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_hpmcounter },
6339 [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_hpmcounter },
6340 [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_hpmcounter },
6341 [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_hpmcounter },
6342 [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_hpmcounter },
6343 [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_hpmcounter },
6344 [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_hpmcounter },
6345 [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_hpmcounter },
6346 [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_hpmcounter },
6347 [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_hpmcounter },
6348 [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_hpmcounter },
6349 [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_hpmcounter },
6350 [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_hpmcounter },
6351 [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_hpmcounter },
6352 [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_hpmcounter },
6353 [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_hpmcounter },
6354 [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_hpmcounter },
6355
6356 [CSR_MHPMCOUNTER3] = { "mhpmcounter3", mctr, read_hpmcounter,
6357 write_mhpmcounter },
6358 [CSR_MHPMCOUNTER4] = { "mhpmcounter4", mctr, read_hpmcounter,
6359 write_mhpmcounter },
6360 [CSR_MHPMCOUNTER5] = { "mhpmcounter5", mctr, read_hpmcounter,
6361 write_mhpmcounter },
6362 [CSR_MHPMCOUNTER6] = { "mhpmcounter6", mctr, read_hpmcounter,
6363 write_mhpmcounter },
6364 [CSR_MHPMCOUNTER7] = { "mhpmcounter7", mctr, read_hpmcounter,
6365 write_mhpmcounter },
6366 [CSR_MHPMCOUNTER8] = { "mhpmcounter8", mctr, read_hpmcounter,
6367 write_mhpmcounter },
6368 [CSR_MHPMCOUNTER9] = { "mhpmcounter9", mctr, read_hpmcounter,
6369 write_mhpmcounter },
6370 [CSR_MHPMCOUNTER10] = { "mhpmcounter10", mctr, read_hpmcounter,
6371 write_mhpmcounter },
6372 [CSR_MHPMCOUNTER11] = { "mhpmcounter11", mctr, read_hpmcounter,
6373 write_mhpmcounter },
6374 [CSR_MHPMCOUNTER12] = { "mhpmcounter12", mctr, read_hpmcounter,
6375 write_mhpmcounter },
6376 [CSR_MHPMCOUNTER13] = { "mhpmcounter13", mctr, read_hpmcounter,
6377 write_mhpmcounter },
6378 [CSR_MHPMCOUNTER14] = { "mhpmcounter14", mctr, read_hpmcounter,
6379 write_mhpmcounter },
6380 [CSR_MHPMCOUNTER15] = { "mhpmcounter15", mctr, read_hpmcounter,
6381 write_mhpmcounter },
6382 [CSR_MHPMCOUNTER16] = { "mhpmcounter16", mctr, read_hpmcounter,
6383 write_mhpmcounter },
6384 [CSR_MHPMCOUNTER17] = { "mhpmcounter17", mctr, read_hpmcounter,
6385 write_mhpmcounter },
6386 [CSR_MHPMCOUNTER18] = { "mhpmcounter18", mctr, read_hpmcounter,
6387 write_mhpmcounter },
6388 [CSR_MHPMCOUNTER19] = { "mhpmcounter19", mctr, read_hpmcounter,
6389 write_mhpmcounter },
6390 [CSR_MHPMCOUNTER20] = { "mhpmcounter20", mctr, read_hpmcounter,
6391 write_mhpmcounter },
6392 [CSR_MHPMCOUNTER21] = { "mhpmcounter21", mctr, read_hpmcounter,
6393 write_mhpmcounter },
6394 [CSR_MHPMCOUNTER22] = { "mhpmcounter22", mctr, read_hpmcounter,
6395 write_mhpmcounter },
6396 [CSR_MHPMCOUNTER23] = { "mhpmcounter23", mctr, read_hpmcounter,
6397 write_mhpmcounter },
6398 [CSR_MHPMCOUNTER24] = { "mhpmcounter24", mctr, read_hpmcounter,
6399 write_mhpmcounter },
6400 [CSR_MHPMCOUNTER25] = { "mhpmcounter25", mctr, read_hpmcounter,
6401 write_mhpmcounter },
6402 [CSR_MHPMCOUNTER26] = { "mhpmcounter26", mctr, read_hpmcounter,
6403 write_mhpmcounter },
6404 [CSR_MHPMCOUNTER27] = { "mhpmcounter27", mctr, read_hpmcounter,
6405 write_mhpmcounter },
6406 [CSR_MHPMCOUNTER28] = { "mhpmcounter28", mctr, read_hpmcounter,
6407 write_mhpmcounter },
6408 [CSR_MHPMCOUNTER29] = { "mhpmcounter29", mctr, read_hpmcounter,
6409 write_mhpmcounter },
6410 [CSR_MHPMCOUNTER30] = { "mhpmcounter30", mctr, read_hpmcounter,
6411 write_mhpmcounter },
6412 [CSR_MHPMCOUNTER31] = { "mhpmcounter31", mctr, read_hpmcounter,
6413 write_mhpmcounter },
6414
6415 [CSR_MCOUNTINHIBIT] = { "mcountinhibit", any, read_mcountinhibit,
6416 write_mcountinhibit,
6417 .min_priv_ver = PRIV_VERSION_1_11_0 },
6418
6419 [CSR_MCYCLECFG] = { "mcyclecfg", smcntrpmf, read_mcyclecfg,
6420 write_mcyclecfg,
6421 .min_priv_ver = PRIV_VERSION_1_12_0 },
6422 [CSR_MINSTRETCFG] = { "minstretcfg", smcntrpmf, read_minstretcfg,
6423 write_minstretcfg,
6424 .min_priv_ver = PRIV_VERSION_1_12_0 },
6425
6426 [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent,
6427 write_mhpmevent },
6428 [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent,
6429 write_mhpmevent },
6430 [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_mhpmevent,
6431 write_mhpmevent },
6432 [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_mhpmevent,
6433 write_mhpmevent },
6434 [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_mhpmevent,
6435 write_mhpmevent },
6436 [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_mhpmevent,
6437 write_mhpmevent },
6438 [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_mhpmevent,
6439 write_mhpmevent },
6440 [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_mhpmevent,
6441 write_mhpmevent },
6442 [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_mhpmevent,
6443 write_mhpmevent },
6444 [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_mhpmevent,
6445 write_mhpmevent },
6446 [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_mhpmevent,
6447 write_mhpmevent },
6448 [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_mhpmevent,
6449 write_mhpmevent },
6450 [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_mhpmevent,
6451 write_mhpmevent },
6452 [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_mhpmevent,
6453 write_mhpmevent },
6454 [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_mhpmevent,
6455 write_mhpmevent },
6456 [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_mhpmevent,
6457 write_mhpmevent },
6458 [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_mhpmevent,
6459 write_mhpmevent },
6460 [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_mhpmevent,
6461 write_mhpmevent },
6462 [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_mhpmevent,
6463 write_mhpmevent },
6464 [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_mhpmevent,
6465 write_mhpmevent },
6466 [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_mhpmevent,
6467 write_mhpmevent },
6468 [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_mhpmevent,
6469 write_mhpmevent },
6470 [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_mhpmevent,
6471 write_mhpmevent },
6472 [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_mhpmevent,
6473 write_mhpmevent },
6474 [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_mhpmevent,
6475 write_mhpmevent },
6476 [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_mhpmevent,
6477 write_mhpmevent },
6478 [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_mhpmevent,
6479 write_mhpmevent },
6480 [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_mhpmevent,
6481 write_mhpmevent },
6482 [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent,
6483 write_mhpmevent },
6484
6485 [CSR_MCYCLECFGH] = { "mcyclecfgh", smcntrpmf_32, read_mcyclecfgh,
6486 write_mcyclecfgh,
6487 .min_priv_ver = PRIV_VERSION_1_12_0 },
6488 [CSR_MINSTRETCFGH] = { "minstretcfgh", smcntrpmf_32, read_minstretcfgh,
6489 write_minstretcfgh,
6490 .min_priv_ver = PRIV_VERSION_1_12_0 },
6491
6492 [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf_32, read_mhpmeventh,
6493 write_mhpmeventh,
6494 .min_priv_ver = PRIV_VERSION_1_12_0 },
6495 [CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf_32, read_mhpmeventh,
6496 write_mhpmeventh,
6497 .min_priv_ver = PRIV_VERSION_1_12_0 },
6498 [CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf_32, read_mhpmeventh,
6499 write_mhpmeventh,
6500 .min_priv_ver = PRIV_VERSION_1_12_0 },
6501 [CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf_32, read_mhpmeventh,
6502 write_mhpmeventh,
6503 .min_priv_ver = PRIV_VERSION_1_12_0 },
6504 [CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf_32, read_mhpmeventh,
6505 write_mhpmeventh,
6506 .min_priv_ver = PRIV_VERSION_1_12_0 },
6507 [CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf_32, read_mhpmeventh,
6508 write_mhpmeventh,
6509 .min_priv_ver = PRIV_VERSION_1_12_0 },
6510 [CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf_32, read_mhpmeventh,
6511 write_mhpmeventh,
6512 .min_priv_ver = PRIV_VERSION_1_12_0 },
6513 [CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf_32, read_mhpmeventh,
6514 write_mhpmeventh,
6515 .min_priv_ver = PRIV_VERSION_1_12_0 },
6516 [CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf_32, read_mhpmeventh,
6517 write_mhpmeventh,
6518 .min_priv_ver = PRIV_VERSION_1_12_0 },
6519 [CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf_32, read_mhpmeventh,
6520 write_mhpmeventh,
6521 .min_priv_ver = PRIV_VERSION_1_12_0 },
6522 [CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf_32, read_mhpmeventh,
6523 write_mhpmeventh,
6524 .min_priv_ver = PRIV_VERSION_1_12_0 },
6525 [CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf_32, read_mhpmeventh,
6526 write_mhpmeventh,
6527 .min_priv_ver = PRIV_VERSION_1_12_0 },
6528 [CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf_32, read_mhpmeventh,
6529 write_mhpmeventh,
6530 .min_priv_ver = PRIV_VERSION_1_12_0 },
6531 [CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf_32, read_mhpmeventh,
6532 write_mhpmeventh,
6533 .min_priv_ver = PRIV_VERSION_1_12_0 },
6534 [CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf_32, read_mhpmeventh,
6535 write_mhpmeventh,
6536 .min_priv_ver = PRIV_VERSION_1_12_0 },
6537 [CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf_32, read_mhpmeventh,
6538 write_mhpmeventh,
6539 .min_priv_ver = PRIV_VERSION_1_12_0 },
6540 [CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf_32, read_mhpmeventh,
6541 write_mhpmeventh,
6542 .min_priv_ver = PRIV_VERSION_1_12_0 },
6543 [CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf_32, read_mhpmeventh,
6544 write_mhpmeventh,
6545 .min_priv_ver = PRIV_VERSION_1_12_0 },
6546 [CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf_32, read_mhpmeventh,
6547 write_mhpmeventh,
6548 .min_priv_ver = PRIV_VERSION_1_12_0 },
6549 [CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf_32, read_mhpmeventh,
6550 write_mhpmeventh,
6551 .min_priv_ver = PRIV_VERSION_1_12_0 },
6552 [CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf_32, read_mhpmeventh,
6553 write_mhpmeventh,
6554 .min_priv_ver = PRIV_VERSION_1_12_0 },
6555 [CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf_32, read_mhpmeventh,
6556 write_mhpmeventh,
6557 .min_priv_ver = PRIV_VERSION_1_12_0 },
6558 [CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf_32, read_mhpmeventh,
6559 write_mhpmeventh,
6560 .min_priv_ver = PRIV_VERSION_1_12_0 },
6561 [CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf_32, read_mhpmeventh,
6562 write_mhpmeventh,
6563 .min_priv_ver = PRIV_VERSION_1_12_0 },
6564 [CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf_32, read_mhpmeventh,
6565 write_mhpmeventh,
6566 .min_priv_ver = PRIV_VERSION_1_12_0 },
6567 [CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf_32, read_mhpmeventh,
6568 write_mhpmeventh,
6569 .min_priv_ver = PRIV_VERSION_1_12_0 },
6570 [CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf_32, read_mhpmeventh,
6571 write_mhpmeventh,
6572 .min_priv_ver = PRIV_VERSION_1_12_0 },
6573 [CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf_32, read_mhpmeventh,
6574 write_mhpmeventh,
6575 .min_priv_ver = PRIV_VERSION_1_12_0 },
6576 [CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf_32, read_mhpmeventh,
6577 write_mhpmeventh,
6578 .min_priv_ver = PRIV_VERSION_1_12_0 },
6579
6580 [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_hpmcounterh },
6581 [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_hpmcounterh },
6582 [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_hpmcounterh },
6583 [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_hpmcounterh },
6584 [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_hpmcounterh },
6585 [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_hpmcounterh },
6586 [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_hpmcounterh },
6587 [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_hpmcounterh },
6588 [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_hpmcounterh },
6589 [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_hpmcounterh },
6590 [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_hpmcounterh },
6591 [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_hpmcounterh },
6592 [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_hpmcounterh },
6593 [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_hpmcounterh },
6594 [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_hpmcounterh },
6595 [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_hpmcounterh },
6596 [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_hpmcounterh },
6597 [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_hpmcounterh },
6598 [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_hpmcounterh },
6599 [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_hpmcounterh },
6600 [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_hpmcounterh },
6601 [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_hpmcounterh },
6602 [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_hpmcounterh },
6603 [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_hpmcounterh },
6604 [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_hpmcounterh },
6605 [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_hpmcounterh },
6606 [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_hpmcounterh },
6607 [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_hpmcounterh },
6608 [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_hpmcounterh },
6609
6610 [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", mctr32, read_hpmcounterh,
6611 write_mhpmcounterh },
6612 [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", mctr32, read_hpmcounterh,
6613 write_mhpmcounterh },
6614 [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", mctr32, read_hpmcounterh,
6615 write_mhpmcounterh },
6616 [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", mctr32, read_hpmcounterh,
6617 write_mhpmcounterh },
6618 [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", mctr32, read_hpmcounterh,
6619 write_mhpmcounterh },
6620 [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", mctr32, read_hpmcounterh,
6621 write_mhpmcounterh },
6622 [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", mctr32, read_hpmcounterh,
6623 write_mhpmcounterh },
6624 [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", mctr32, read_hpmcounterh,
6625 write_mhpmcounterh },
6626 [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", mctr32, read_hpmcounterh,
6627 write_mhpmcounterh },
6628 [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", mctr32, read_hpmcounterh,
6629 write_mhpmcounterh },
6630 [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", mctr32, read_hpmcounterh,
6631 write_mhpmcounterh },
6632 [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", mctr32, read_hpmcounterh,
6633 write_mhpmcounterh },
6634 [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", mctr32, read_hpmcounterh,
6635 write_mhpmcounterh },
6636 [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", mctr32, read_hpmcounterh,
6637 write_mhpmcounterh },
6638 [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", mctr32, read_hpmcounterh,
6639 write_mhpmcounterh },
6640 [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", mctr32, read_hpmcounterh,
6641 write_mhpmcounterh },
6642 [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", mctr32, read_hpmcounterh,
6643 write_mhpmcounterh },
6644 [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", mctr32, read_hpmcounterh,
6645 write_mhpmcounterh },
6646 [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", mctr32, read_hpmcounterh,
6647 write_mhpmcounterh },
6648 [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", mctr32, read_hpmcounterh,
6649 write_mhpmcounterh },
6650 [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", mctr32, read_hpmcounterh,
6651 write_mhpmcounterh },
6652 [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", mctr32, read_hpmcounterh,
6653 write_mhpmcounterh },
6654 [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", mctr32, read_hpmcounterh,
6655 write_mhpmcounterh },
6656 [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", mctr32, read_hpmcounterh,
6657 write_mhpmcounterh },
6658 [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", mctr32, read_hpmcounterh,
6659 write_mhpmcounterh },
6660 [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", mctr32, read_hpmcounterh,
6661 write_mhpmcounterh },
6662 [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", mctr32, read_hpmcounterh,
6663 write_mhpmcounterh },
6664 [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", mctr32, read_hpmcounterh,
6665 write_mhpmcounterh },
6666 [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", mctr32, read_hpmcounterh,
6667 write_mhpmcounterh },
6668 [CSR_SCOUNTOVF] = { "scountovf", sscofpmf, read_scountovf,
6669 .min_priv_ver = PRIV_VERSION_1_12_0 },
6670
6671 #endif /* !CONFIG_USER_ONLY */
6672 };
6673