1 /*
2 * riscv TCG cpu class initialization
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "exec/exec-all.h"
22 #include "tcg-cpu.h"
23 #include "cpu.h"
24 #include "internals.h"
25 #include "pmu.h"
26 #include "time_helper.h"
27 #include "qapi/error.h"
28 #include "qapi/visitor.h"
29 #include "qemu/accel.h"
30 #include "qemu/error-report.h"
31 #include "qemu/log.h"
32 #include "hw/core/accel-cpu.h"
33 #include "hw/core/tcg-cpu-ops.h"
34 #include "tcg/tcg.h"
35 #ifndef CONFIG_USER_ONLY
36 #include "hw/boards.h"
37 #endif
38
39 /* Hash that stores user set extensions */
40 static GHashTable *multi_ext_user_opts;
41 static GHashTable *misa_ext_user_opts;
42
43 static GHashTable *multi_ext_implied_rules;
44 static GHashTable *misa_ext_implied_rules;
45
cpu_cfg_ext_is_user_set(uint32_t ext_offset)46 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset)
47 {
48 return g_hash_table_contains(multi_ext_user_opts,
49 GUINT_TO_POINTER(ext_offset));
50 }
51
cpu_misa_ext_is_user_set(uint32_t misa_bit)52 static bool cpu_misa_ext_is_user_set(uint32_t misa_bit)
53 {
54 return g_hash_table_contains(misa_ext_user_opts,
55 GUINT_TO_POINTER(misa_bit));
56 }
57
cpu_cfg_ext_add_user_opt(uint32_t ext_offset,bool value)58 static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset, bool value)
59 {
60 g_hash_table_insert(multi_ext_user_opts, GUINT_TO_POINTER(ext_offset),
61 (gpointer)value);
62 }
63
cpu_misa_ext_add_user_opt(uint32_t bit,bool value)64 static void cpu_misa_ext_add_user_opt(uint32_t bit, bool value)
65 {
66 g_hash_table_insert(misa_ext_user_opts, GUINT_TO_POINTER(bit),
67 (gpointer)value);
68 }
69
riscv_cpu_write_misa_bit(RISCVCPU * cpu,uint32_t bit,bool enabled)70 static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit,
71 bool enabled)
72 {
73 CPURISCVState *env = &cpu->env;
74
75 if (enabled) {
76 env->misa_ext |= bit;
77 env->misa_ext_mask |= bit;
78 } else {
79 env->misa_ext &= ~bit;
80 env->misa_ext_mask &= ~bit;
81 }
82 }
83
cpu_priv_ver_to_str(int priv_ver)84 static const char *cpu_priv_ver_to_str(int priv_ver)
85 {
86 const char *priv_spec_str = priv_spec_to_str(priv_ver);
87
88 g_assert(priv_spec_str);
89
90 return priv_spec_str;
91 }
92
riscv_cpu_synchronize_from_tb(CPUState * cs,const TranslationBlock * tb)93 static void riscv_cpu_synchronize_from_tb(CPUState *cs,
94 const TranslationBlock *tb)
95 {
96 if (!(tb_cflags(tb) & CF_PCREL)) {
97 RISCVCPU *cpu = RISCV_CPU(cs);
98 CPURISCVState *env = &cpu->env;
99 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
100
101 tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL));
102
103 if (xl == MXL_RV32) {
104 env->pc = (int32_t) tb->pc;
105 } else {
106 env->pc = tb->pc;
107 }
108 }
109 }
110
riscv_restore_state_to_opc(CPUState * cs,const TranslationBlock * tb,const uint64_t * data)111 static void riscv_restore_state_to_opc(CPUState *cs,
112 const TranslationBlock *tb,
113 const uint64_t *data)
114 {
115 RISCVCPU *cpu = RISCV_CPU(cs);
116 CPURISCVState *env = &cpu->env;
117 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
118 target_ulong pc;
119
120 if (tb_cflags(tb) & CF_PCREL) {
121 pc = (env->pc & TARGET_PAGE_MASK) | data[0];
122 } else {
123 pc = data[0];
124 }
125
126 if (xl == MXL_RV32) {
127 env->pc = (int32_t)pc;
128 } else {
129 env->pc = pc;
130 }
131 env->bins = data[1];
132 env->excp_uw2 = data[2];
133 }
134
135 static const TCGCPUOps riscv_tcg_ops = {
136 .initialize = riscv_translate_init,
137 .synchronize_from_tb = riscv_cpu_synchronize_from_tb,
138 .restore_state_to_opc = riscv_restore_state_to_opc,
139
140 #ifndef CONFIG_USER_ONLY
141 .tlb_fill = riscv_cpu_tlb_fill,
142 .cpu_exec_interrupt = riscv_cpu_exec_interrupt,
143 .cpu_exec_halt = riscv_cpu_has_work,
144 .do_interrupt = riscv_cpu_do_interrupt,
145 .do_transaction_failed = riscv_cpu_do_transaction_failed,
146 .do_unaligned_access = riscv_cpu_do_unaligned_access,
147 .debug_excp_handler = riscv_cpu_debug_excp_handler,
148 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint,
149 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint,
150 #endif /* !CONFIG_USER_ONLY */
151 };
152
cpu_cfg_ext_get_min_version(uint32_t ext_offset)153 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset)
154 {
155 const RISCVIsaExtData *edata;
156
157 for (edata = isa_edata_arr; edata && edata->name; edata++) {
158 if (edata->ext_enable_offset != ext_offset) {
159 continue;
160 }
161
162 return edata->min_version;
163 }
164
165 g_assert_not_reached();
166 }
167
cpu_cfg_ext_get_name(uint32_t ext_offset)168 static const char *cpu_cfg_ext_get_name(uint32_t ext_offset)
169 {
170 const RISCVCPUMultiExtConfig *feat;
171 const RISCVIsaExtData *edata;
172
173 for (edata = isa_edata_arr; edata->name != NULL; edata++) {
174 if (edata->ext_enable_offset == ext_offset) {
175 return edata->name;
176 }
177 }
178
179 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) {
180 if (feat->offset == ext_offset) {
181 return feat->name;
182 }
183 }
184
185 g_assert_not_reached();
186 }
187
cpu_cfg_offset_is_named_feat(uint32_t ext_offset)188 static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset)
189 {
190 const RISCVCPUMultiExtConfig *feat;
191
192 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) {
193 if (feat->offset == ext_offset) {
194 return true;
195 }
196 }
197
198 return false;
199 }
200
riscv_cpu_enable_named_feat(RISCVCPU * cpu,uint32_t feat_offset)201 static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset)
202 {
203 /*
204 * All other named features are already enabled
205 * in riscv_tcg_cpu_instance_init().
206 */
207 if (feat_offset == CPU_CFG_OFFSET(ext_zic64b)) {
208 cpu->cfg.cbom_blocksize = 64;
209 cpu->cfg.cbop_blocksize = 64;
210 cpu->cfg.cboz_blocksize = 64;
211 }
212 }
213
cpu_bump_multi_ext_priv_ver(CPURISCVState * env,uint32_t ext_offset)214 static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env,
215 uint32_t ext_offset)
216 {
217 int ext_priv_ver;
218
219 if (env->priv_ver == PRIV_VERSION_LATEST) {
220 return;
221 }
222
223 ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset);
224
225 if (env->priv_ver < ext_priv_ver) {
226 /*
227 * Note: the 'priv_spec' command line option, if present,
228 * will take precedence over this priv_ver bump.
229 */
230 env->priv_ver = ext_priv_ver;
231 }
232 }
233
cpu_cfg_ext_auto_update(RISCVCPU * cpu,uint32_t ext_offset,bool value)234 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset,
235 bool value)
236 {
237 CPURISCVState *env = &cpu->env;
238 bool prev_val = isa_ext_is_enabled(cpu, ext_offset);
239 int min_version;
240
241 if (prev_val == value) {
242 return;
243 }
244
245 if (cpu_cfg_ext_is_user_set(ext_offset)) {
246 return;
247 }
248
249 if (value && env->priv_ver != PRIV_VERSION_LATEST) {
250 /* Do not enable it if priv_ver is older than min_version */
251 min_version = cpu_cfg_ext_get_min_version(ext_offset);
252 if (env->priv_ver < min_version) {
253 return;
254 }
255 }
256
257 isa_ext_update_enabled(cpu, ext_offset, value);
258 }
259
riscv_cpu_validate_misa_priv(CPURISCVState * env,Error ** errp)260 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp)
261 {
262 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) {
263 error_setg(errp, "H extension requires priv spec 1.12.0");
264 return;
265 }
266 }
267
riscv_cpu_validate_v(CPURISCVState * env,RISCVCPUConfig * cfg,Error ** errp)268 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg,
269 Error **errp)
270 {
271 uint32_t vlen = cfg->vlenb << 3;
272
273 if (vlen > RV_VLEN_MAX || vlen < 128) {
274 error_setg(errp,
275 "Vector extension implementation only supports VLEN "
276 "in the range [128, %d]", RV_VLEN_MAX);
277 return;
278 }
279
280 if (cfg->elen > 64 || cfg->elen < 8) {
281 error_setg(errp,
282 "Vector extension implementation only supports ELEN "
283 "in the range [8, 64]");
284 return;
285 }
286 }
287
riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU * cpu)288 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu)
289 {
290 CPURISCVState *env = &cpu->env;
291 const RISCVIsaExtData *edata;
292
293 /* Force disable extensions if priv spec version does not match */
294 for (edata = isa_edata_arr; edata && edata->name; edata++) {
295 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset) &&
296 (env->priv_ver < edata->min_version)) {
297 /*
298 * These two extensions are always enabled as they were supported
299 * by QEMU before they were added as extensions in the ISA.
300 */
301 if (!strcmp(edata->name, "zicntr") ||
302 !strcmp(edata->name, "zihpm")) {
303 continue;
304 }
305
306 isa_ext_update_enabled(cpu, edata->ext_enable_offset, false);
307 #ifndef CONFIG_USER_ONLY
308 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
309 " because privilege spec version does not match",
310 edata->name, env->mhartid);
311 #else
312 warn_report("disabling %s extension because "
313 "privilege spec version does not match",
314 edata->name);
315 #endif
316 }
317 }
318 }
319
riscv_cpu_update_named_features(RISCVCPU * cpu)320 static void riscv_cpu_update_named_features(RISCVCPU *cpu)
321 {
322 if (cpu->env.priv_ver >= PRIV_VERSION_1_11_0) {
323 cpu->cfg.has_priv_1_11 = true;
324 }
325
326 if (cpu->env.priv_ver >= PRIV_VERSION_1_12_0) {
327 cpu->cfg.has_priv_1_12 = true;
328 }
329
330 if (cpu->env.priv_ver >= PRIV_VERSION_1_13_0) {
331 cpu->cfg.has_priv_1_13 = true;
332 }
333
334 /* zic64b is 1.12 or later */
335 cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 &&
336 cpu->cfg.cbop_blocksize == 64 &&
337 cpu->cfg.cboz_blocksize == 64 &&
338 cpu->cfg.has_priv_1_12;
339 }
340
riscv_cpu_validate_g(RISCVCPU * cpu)341 static void riscv_cpu_validate_g(RISCVCPU *cpu)
342 {
343 const char *warn_msg = "RVG mandates disabled extension %s";
344 uint32_t g_misa_bits[] = {RVI, RVM, RVA, RVF, RVD};
345 bool send_warn = cpu_misa_ext_is_user_set(RVG);
346
347 for (int i = 0; i < ARRAY_SIZE(g_misa_bits); i++) {
348 uint32_t bit = g_misa_bits[i];
349
350 if (riscv_has_ext(&cpu->env, bit)) {
351 continue;
352 }
353
354 if (!cpu_misa_ext_is_user_set(bit)) {
355 riscv_cpu_write_misa_bit(cpu, bit, true);
356 continue;
357 }
358
359 if (send_warn) {
360 warn_report(warn_msg, riscv_get_misa_ext_name(bit));
361 }
362 }
363
364 if (!cpu->cfg.ext_zicsr) {
365 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr))) {
366 cpu->cfg.ext_zicsr = true;
367 } else if (send_warn) {
368 warn_report(warn_msg, "zicsr");
369 }
370 }
371
372 if (!cpu->cfg.ext_zifencei) {
373 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei))) {
374 cpu->cfg.ext_zifencei = true;
375 } else if (send_warn) {
376 warn_report(warn_msg, "zifencei");
377 }
378 }
379 }
380
riscv_cpu_validate_b(RISCVCPU * cpu)381 static void riscv_cpu_validate_b(RISCVCPU *cpu)
382 {
383 const char *warn_msg = "RVB mandates disabled extension %s";
384
385 if (!cpu->cfg.ext_zba) {
386 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zba))) {
387 cpu->cfg.ext_zba = true;
388 } else {
389 warn_report(warn_msg, "zba");
390 }
391 }
392
393 if (!cpu->cfg.ext_zbb) {
394 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbb))) {
395 cpu->cfg.ext_zbb = true;
396 } else {
397 warn_report(warn_msg, "zbb");
398 }
399 }
400
401 if (!cpu->cfg.ext_zbs) {
402 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbs))) {
403 cpu->cfg.ext_zbs = true;
404 } else {
405 warn_report(warn_msg, "zbs");
406 }
407 }
408 }
409
410 /*
411 * Check consistency between chosen extensions while setting
412 * cpu->cfg accordingly.
413 */
riscv_cpu_validate_set_extensions(RISCVCPU * cpu,Error ** errp)414 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
415 {
416 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
417 CPURISCVState *env = &cpu->env;
418 Error *local_err = NULL;
419
420 if (riscv_has_ext(env, RVG)) {
421 riscv_cpu_validate_g(cpu);
422 }
423
424 if (riscv_has_ext(env, RVB)) {
425 riscv_cpu_validate_b(cpu);
426 }
427
428 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) {
429 error_setg(errp,
430 "I and E extensions are incompatible");
431 return;
432 }
433
434 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) {
435 error_setg(errp,
436 "Either I or E extension must be set");
437 return;
438 }
439
440 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) {
441 error_setg(errp,
442 "Setting S extension without U extension is illegal");
443 return;
444 }
445
446 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) {
447 error_setg(errp,
448 "H depends on an I base integer ISA with 32 x registers");
449 return;
450 }
451
452 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) {
453 error_setg(errp, "H extension implicitly requires S-mode");
454 return;
455 }
456
457 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) {
458 error_setg(errp, "F extension requires Zicsr");
459 return;
460 }
461
462 if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) {
463 error_setg(errp, "Zacas extension requires A extension");
464 return;
465 }
466
467 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) {
468 error_setg(errp, "Zawrs extension requires A extension");
469 return;
470 }
471
472 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) {
473 error_setg(errp, "Zfa extension requires F extension");
474 return;
475 }
476
477 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) {
478 error_setg(errp, "Zfh/Zfhmin extensions require F extension");
479 return;
480 }
481
482 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) {
483 error_setg(errp, "Zfbfmin extension depends on F extension");
484 return;
485 }
486
487 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) {
488 error_setg(errp, "D extension requires F extension");
489 return;
490 }
491
492 if (riscv_has_ext(env, RVV)) {
493 riscv_cpu_validate_v(env, &cpu->cfg, &local_err);
494 if (local_err != NULL) {
495 error_propagate(errp, local_err);
496 return;
497 }
498 }
499
500 /* The Zve64d extension depends on the Zve64f extension */
501 if (cpu->cfg.ext_zve64d) {
502 if (!riscv_has_ext(env, RVD)) {
503 error_setg(errp, "Zve64d/V extensions require D extension");
504 return;
505 }
506 }
507
508 /* The Zve32f extension depends on the Zve32x extension */
509 if (cpu->cfg.ext_zve32f) {
510 if (!riscv_has_ext(env, RVF)) {
511 error_setg(errp, "Zve32f/Zve64f extensions require F extension");
512 return;
513 }
514 }
515
516 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) {
517 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension");
518 return;
519 }
520
521 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) {
522 error_setg(errp, "Zvfh extensions requires Zfhmin extension");
523 return;
524 }
525
526 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) {
527 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension");
528 return;
529 }
530
531 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) {
532 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension");
533 return;
534 }
535
536 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) {
537 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx");
538 return;
539 }
540
541 if (cpu->cfg.ext_zfinx) {
542 if (!cpu->cfg.ext_zicsr) {
543 error_setg(errp, "Zfinx extension requires Zicsr");
544 return;
545 }
546 if (riscv_has_ext(env, RVF)) {
547 error_setg(errp,
548 "Zfinx cannot be supported together with F extension");
549 return;
550 }
551 }
552
553 if (cpu->cfg.ext_zcmop && !cpu->cfg.ext_zca) {
554 error_setg(errp, "Zcmop extensions require Zca");
555 return;
556 }
557
558 if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) {
559 error_setg(errp, "Zcf extension is only relevant to RV32");
560 return;
561 }
562
563 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) {
564 error_setg(errp, "Zcf extension requires F extension");
565 return;
566 }
567
568 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) {
569 error_setg(errp, "Zcd extension requires D extension");
570 return;
571 }
572
573 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb ||
574 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) {
575 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca "
576 "extension");
577 return;
578 }
579
580 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) {
581 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with "
582 "Zcd extension");
583 return;
584 }
585
586 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) {
587 error_setg(errp, "Zcmt extension requires Zicsr extension");
588 return;
589 }
590
591 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg ||
592 cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed ||
593 cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32x) {
594 error_setg(errp,
595 "Vector crypto extensions require V or Zve* extensions");
596 return;
597 }
598
599 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64x) {
600 error_setg(
601 errp,
602 "Zvbc and Zvknhb extensions require V or Zve64x extensions");
603 return;
604 }
605
606 if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) {
607 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) {
608 error_setg(errp, "zicntr requires zicsr");
609 return;
610 }
611 cpu->cfg.ext_zicntr = false;
612 }
613
614 if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) {
615 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) {
616 error_setg(errp, "zihpm requires zicsr");
617 return;
618 }
619 cpu->cfg.ext_zihpm = false;
620 }
621
622 if (cpu->cfg.ext_zicfiss) {
623 if (!cpu->cfg.ext_zicsr) {
624 error_setg(errp, "zicfiss extension requires zicsr extension");
625 return;
626 }
627 if (!riscv_has_ext(env, RVA)) {
628 error_setg(errp, "zicfiss extension requires A extension");
629 return;
630 }
631 if (!riscv_has_ext(env, RVS)) {
632 error_setg(errp, "zicfiss extension requires S");
633 return;
634 }
635 if (!cpu->cfg.ext_zimop) {
636 error_setg(errp, "zicfiss extension requires zimop extension");
637 return;
638 }
639 if (cpu->cfg.ext_zca && !cpu->cfg.ext_zcmop) {
640 error_setg(errp, "zicfiss with zca requires zcmop extension");
641 return;
642 }
643 }
644
645 if (!cpu->cfg.ext_zihpm) {
646 cpu->cfg.pmu_mask = 0;
647 cpu->pmu_avail_ctrs = 0;
648 }
649
650 if (cpu->cfg.ext_zicfilp && !cpu->cfg.ext_zicsr) {
651 error_setg(errp, "zicfilp extension requires zicsr extension");
652 return;
653 }
654
655 /*
656 * Disable isa extensions based on priv spec after we
657 * validated and set everything we need.
658 */
659 riscv_cpu_disable_priv_spec_isa_exts(cpu);
660 }
661
662 #ifndef CONFIG_USER_ONLY
riscv_cpu_validate_profile_satp(RISCVCPU * cpu,RISCVCPUProfile * profile,bool send_warn)663 static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu,
664 RISCVCPUProfile *profile,
665 bool send_warn)
666 {
667 int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
668
669 if (profile->satp_mode > satp_max) {
670 if (send_warn) {
671 bool is_32bit = riscv_cpu_is_32bit(cpu);
672 const char *req_satp = satp_mode_str(profile->satp_mode, is_32bit);
673 const char *cur_satp = satp_mode_str(satp_max, is_32bit);
674
675 warn_report("Profile %s requires satp mode %s, "
676 "but satp mode %s was set", profile->name,
677 req_satp, cur_satp);
678 }
679
680 return false;
681 }
682
683 return true;
684 }
685 #endif
686
riscv_cpu_validate_profile(RISCVCPU * cpu,RISCVCPUProfile * profile)687 static void riscv_cpu_validate_profile(RISCVCPU *cpu,
688 RISCVCPUProfile *profile)
689 {
690 CPURISCVState *env = &cpu->env;
691 const char *warn_msg = "Profile %s mandates disabled extension %s";
692 bool send_warn = profile->user_set && profile->enabled;
693 bool parent_enabled, profile_impl = true;
694 int i;
695
696 #ifndef CONFIG_USER_ONLY
697 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) {
698 profile_impl = riscv_cpu_validate_profile_satp(cpu, profile,
699 send_warn);
700 }
701 #endif
702
703 if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED &&
704 profile->priv_spec != env->priv_ver) {
705 profile_impl = false;
706
707 if (send_warn) {
708 warn_report("Profile %s requires priv spec %s, "
709 "but priv ver %s was set", profile->name,
710 cpu_priv_ver_to_str(profile->priv_spec),
711 cpu_priv_ver_to_str(env->priv_ver));
712 }
713 }
714
715 for (i = 0; misa_bits[i] != 0; i++) {
716 uint32_t bit = misa_bits[i];
717
718 if (!(profile->misa_ext & bit)) {
719 continue;
720 }
721
722 if (!riscv_has_ext(&cpu->env, bit)) {
723 profile_impl = false;
724
725 if (send_warn) {
726 warn_report(warn_msg, profile->name,
727 riscv_get_misa_ext_name(bit));
728 }
729 }
730 }
731
732 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) {
733 int ext_offset = profile->ext_offsets[i];
734
735 if (!isa_ext_is_enabled(cpu, ext_offset)) {
736 profile_impl = false;
737
738 if (send_warn) {
739 warn_report(warn_msg, profile->name,
740 cpu_cfg_ext_get_name(ext_offset));
741 }
742 }
743 }
744
745 profile->enabled = profile_impl;
746
747 if (profile->parent != NULL) {
748 parent_enabled = object_property_get_bool(OBJECT(cpu),
749 profile->parent->name,
750 NULL);
751 profile->enabled = profile->enabled && parent_enabled;
752 }
753 }
754
riscv_cpu_validate_profiles(RISCVCPU * cpu)755 static void riscv_cpu_validate_profiles(RISCVCPU *cpu)
756 {
757 for (int i = 0; riscv_profiles[i] != NULL; i++) {
758 riscv_cpu_validate_profile(cpu, riscv_profiles[i]);
759 }
760 }
761
riscv_cpu_init_implied_exts_rules(void)762 static void riscv_cpu_init_implied_exts_rules(void)
763 {
764 RISCVCPUImpliedExtsRule *rule;
765 #ifndef CONFIG_USER_ONLY
766 MachineState *ms = MACHINE(qdev_get_machine());
767 #endif
768 static bool initialized;
769 int i;
770
771 /* Implied rules only need to be initialized once. */
772 if (initialized) {
773 return;
774 }
775
776 for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) {
777 #ifndef CONFIG_USER_ONLY
778 rule->enabled = bitmap_new(ms->smp.cpus);
779 #endif
780 g_hash_table_insert(misa_ext_implied_rules,
781 GUINT_TO_POINTER(rule->ext), (gpointer)rule);
782 }
783
784 for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) {
785 #ifndef CONFIG_USER_ONLY
786 rule->enabled = bitmap_new(ms->smp.cpus);
787 #endif
788 g_hash_table_insert(multi_ext_implied_rules,
789 GUINT_TO_POINTER(rule->ext), (gpointer)rule);
790 }
791
792 initialized = true;
793 }
794
cpu_enable_implied_rule(RISCVCPU * cpu,RISCVCPUImpliedExtsRule * rule)795 static void cpu_enable_implied_rule(RISCVCPU *cpu,
796 RISCVCPUImpliedExtsRule *rule)
797 {
798 CPURISCVState *env = &cpu->env;
799 RISCVCPUImpliedExtsRule *ir;
800 bool enabled = false;
801 int i;
802
803 #ifndef CONFIG_USER_ONLY
804 enabled = test_bit(cpu->env.mhartid, rule->enabled);
805 #endif
806
807 if (!enabled) {
808 /* Enable the implied MISAs. */
809 if (rule->implied_misa_exts) {
810 for (i = 0; misa_bits[i] != 0; i++) {
811 if (rule->implied_misa_exts & misa_bits[i]) {
812 /*
813 * If the user disabled the misa_bit do not re-enable it
814 * and do not apply any implied rules related to it.
815 */
816 if (cpu_misa_ext_is_user_set(misa_bits[i]) &&
817 !(env->misa_ext & misa_bits[i])) {
818 continue;
819 }
820
821 riscv_cpu_set_misa_ext(env, env->misa_ext | misa_bits[i]);
822 ir = g_hash_table_lookup(misa_ext_implied_rules,
823 GUINT_TO_POINTER(misa_bits[i]));
824
825 if (ir) {
826 cpu_enable_implied_rule(cpu, ir);
827 }
828 }
829 }
830 }
831
832 /* Enable the implied extensions. */
833 for (i = 0;
834 rule->implied_multi_exts[i] != RISCV_IMPLIED_EXTS_RULE_END; i++) {
835 cpu_cfg_ext_auto_update(cpu, rule->implied_multi_exts[i], true);
836
837 ir = g_hash_table_lookup(multi_ext_implied_rules,
838 GUINT_TO_POINTER(
839 rule->implied_multi_exts[i]));
840
841 if (ir) {
842 cpu_enable_implied_rule(cpu, ir);
843 }
844 }
845
846 #ifndef CONFIG_USER_ONLY
847 bitmap_set(rule->enabled, cpu->env.mhartid, 1);
848 #endif
849 }
850 }
851
852 /* Zc extension has special implied rules that need to be handled separately. */
cpu_enable_zc_implied_rules(RISCVCPU * cpu)853 static void cpu_enable_zc_implied_rules(RISCVCPU *cpu)
854 {
855 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
856 CPURISCVState *env = &cpu->env;
857
858 if (cpu->cfg.ext_zce) {
859 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
860 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true);
861 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true);
862 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true);
863
864 if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) {
865 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
866 }
867 }
868
869 /* Zca, Zcd and Zcf has a PRIV 1.12.0 restriction */
870 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) {
871 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
872
873 if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) {
874 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
875 }
876
877 if (riscv_has_ext(env, RVD)) {
878 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true);
879 }
880 }
881 }
882
riscv_cpu_enable_implied_rules(RISCVCPU * cpu)883 static void riscv_cpu_enable_implied_rules(RISCVCPU *cpu)
884 {
885 RISCVCPUImpliedExtsRule *rule;
886 int i;
887
888 /* Enable the implied extensions for Zc. */
889 cpu_enable_zc_implied_rules(cpu);
890
891 /* Enable the implied MISAs. */
892 for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) {
893 if (riscv_has_ext(&cpu->env, rule->ext)) {
894 cpu_enable_implied_rule(cpu, rule);
895 }
896 }
897
898 /* Enable the implied extensions. */
899 for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) {
900 if (isa_ext_is_enabled(cpu, rule->ext)) {
901 cpu_enable_implied_rule(cpu, rule);
902 }
903 }
904 }
905
riscv_tcg_cpu_finalize_features(RISCVCPU * cpu,Error ** errp)906 void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
907 {
908 CPURISCVState *env = &cpu->env;
909 Error *local_err = NULL;
910
911 riscv_cpu_init_implied_exts_rules();
912 riscv_cpu_enable_implied_rules(cpu);
913
914 riscv_cpu_validate_misa_priv(env, &local_err);
915 if (local_err != NULL) {
916 error_propagate(errp, local_err);
917 return;
918 }
919
920 riscv_cpu_update_named_features(cpu);
921 riscv_cpu_validate_profiles(cpu);
922
923 if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) {
924 /*
925 * Enhanced PMP should only be available
926 * on harts with PMP support
927 */
928 error_setg(errp, "Invalid configuration: Smepmp requires PMP support");
929 return;
930 }
931
932 riscv_cpu_validate_set_extensions(cpu, &local_err);
933 if (local_err != NULL) {
934 error_propagate(errp, local_err);
935 return;
936 }
937 }
938
riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU * cpu)939 void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu)
940 {
941 GPtrArray *dynamic_decoders;
942 dynamic_decoders = g_ptr_array_sized_new(decoder_table_size);
943 for (size_t i = 0; i < decoder_table_size; ++i) {
944 if (decoder_table[i].guard_func &&
945 decoder_table[i].guard_func(&cpu->cfg)) {
946 g_ptr_array_add(dynamic_decoders,
947 (gpointer)decoder_table[i].riscv_cpu_decode_fn);
948 }
949 }
950
951 cpu->decoders = dynamic_decoders;
952 }
953
riscv_cpu_tcg_compatible(RISCVCPU * cpu)954 bool riscv_cpu_tcg_compatible(RISCVCPU *cpu)
955 {
956 return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL;
957 }
958
riscv_cpu_is_generic(Object * cpu_obj)959 static bool riscv_cpu_is_generic(Object *cpu_obj)
960 {
961 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
962 }
963
964 /*
965 * We'll get here via the following path:
966 *
967 * riscv_cpu_realize()
968 * -> cpu_exec_realizefn()
969 * -> tcg_cpu_realize() (via accel_cpu_common_realize())
970 */
riscv_tcg_cpu_realize(CPUState * cs,Error ** errp)971 static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
972 {
973 RISCVCPU *cpu = RISCV_CPU(cs);
974
975 if (!riscv_cpu_tcg_compatible(cpu)) {
976 g_autofree char *name = riscv_cpu_get_name(cpu);
977 error_setg(errp, "'%s' CPU is not compatible with TCG acceleration",
978 name);
979 return false;
980 }
981
982 #ifndef CONFIG_USER_ONLY
983 CPURISCVState *env = &cpu->env;
984 Error *local_err = NULL;
985
986 tcg_cflags_set(CPU(cs), CF_PCREL);
987
988 if (cpu->cfg.ext_sstc) {
989 riscv_timer_init(cpu);
990 }
991
992 if (cpu->cfg.pmu_mask) {
993 riscv_pmu_init(cpu, &local_err);
994 if (local_err != NULL) {
995 error_propagate(errp, local_err);
996 return false;
997 }
998
999 if (cpu->cfg.ext_sscofpmf) {
1000 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1001 riscv_pmu_timer_cb, cpu);
1002 }
1003 }
1004
1005 /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */
1006 if (riscv_has_ext(env, RVH)) {
1007 env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP;
1008 }
1009 #endif
1010
1011 return true;
1012 }
1013
1014 typedef struct RISCVCPUMisaExtConfig {
1015 target_ulong misa_bit;
1016 bool enabled;
1017 } RISCVCPUMisaExtConfig;
1018
cpu_set_misa_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1019 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
1020 void *opaque, Error **errp)
1021 {
1022 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque;
1023 target_ulong misa_bit = misa_ext_cfg->misa_bit;
1024 RISCVCPU *cpu = RISCV_CPU(obj);
1025 CPURISCVState *env = &cpu->env;
1026 bool vendor_cpu = riscv_cpu_is_vendor(obj);
1027 bool prev_val, value;
1028
1029 if (!visit_type_bool(v, name, &value, errp)) {
1030 return;
1031 }
1032
1033 cpu_misa_ext_add_user_opt(misa_bit, value);
1034
1035 prev_val = env->misa_ext & misa_bit;
1036
1037 if (value == prev_val) {
1038 return;
1039 }
1040
1041 if (value) {
1042 if (vendor_cpu) {
1043 g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1044 error_setg(errp, "'%s' CPU does not allow enabling extensions",
1045 cpuname);
1046 return;
1047 }
1048
1049 if (misa_bit == RVH && env->priv_ver < PRIV_VERSION_1_12_0) {
1050 /*
1051 * Note: the 'priv_spec' command line option, if present,
1052 * will take precedence over this priv_ver bump.
1053 */
1054 env->priv_ver = PRIV_VERSION_1_12_0;
1055 }
1056 }
1057
1058 riscv_cpu_write_misa_bit(cpu, misa_bit, value);
1059 }
1060
cpu_get_misa_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1061 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
1062 void *opaque, Error **errp)
1063 {
1064 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque;
1065 target_ulong misa_bit = misa_ext_cfg->misa_bit;
1066 RISCVCPU *cpu = RISCV_CPU(obj);
1067 CPURISCVState *env = &cpu->env;
1068 bool value;
1069
1070 value = env->misa_ext & misa_bit;
1071
1072 visit_type_bool(v, name, &value, errp);
1073 }
1074
1075 #define MISA_CFG(_bit, _enabled) \
1076 {.misa_bit = _bit, .enabled = _enabled}
1077
1078 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = {
1079 MISA_CFG(RVA, true),
1080 MISA_CFG(RVC, true),
1081 MISA_CFG(RVD, true),
1082 MISA_CFG(RVF, true),
1083 MISA_CFG(RVI, true),
1084 MISA_CFG(RVE, false),
1085 MISA_CFG(RVM, true),
1086 MISA_CFG(RVS, true),
1087 MISA_CFG(RVU, true),
1088 MISA_CFG(RVH, true),
1089 MISA_CFG(RVJ, false),
1090 MISA_CFG(RVV, false),
1091 MISA_CFG(RVG, false),
1092 MISA_CFG(RVB, false),
1093 };
1094
1095 /*
1096 * We do not support user choice tracking for MISA
1097 * extensions yet because, so far, we do not silently
1098 * change MISA bits during realize() (RVG enables MISA
1099 * bits but the user is warned about it).
1100 */
riscv_cpu_add_misa_properties(Object * cpu_obj)1101 static void riscv_cpu_add_misa_properties(Object *cpu_obj)
1102 {
1103 bool use_def_vals = riscv_cpu_is_generic(cpu_obj);
1104 int i;
1105
1106 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) {
1107 const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i];
1108 int bit = misa_cfg->misa_bit;
1109 const char *name = riscv_get_misa_ext_name(bit);
1110 const char *desc = riscv_get_misa_ext_description(bit);
1111
1112 /* Check if KVM already created the property */
1113 if (object_property_find(cpu_obj, name)) {
1114 continue;
1115 }
1116
1117 object_property_add(cpu_obj, name, "bool",
1118 cpu_get_misa_ext_cfg,
1119 cpu_set_misa_ext_cfg,
1120 NULL, (void *)misa_cfg);
1121 object_property_set_description(cpu_obj, name, desc);
1122 if (use_def_vals) {
1123 riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj), bit,
1124 misa_cfg->enabled);
1125 }
1126 }
1127 }
1128
cpu_set_profile(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1129 static void cpu_set_profile(Object *obj, Visitor *v, const char *name,
1130 void *opaque, Error **errp)
1131 {
1132 RISCVCPUProfile *profile = opaque;
1133 RISCVCPU *cpu = RISCV_CPU(obj);
1134 bool value;
1135 int i, ext_offset;
1136
1137 if (riscv_cpu_is_vendor(obj)) {
1138 error_setg(errp, "Profile %s is not available for vendor CPUs",
1139 profile->name);
1140 return;
1141 }
1142
1143 if (cpu->env.misa_mxl != MXL_RV64) {
1144 error_setg(errp, "Profile %s only available for 64 bit CPUs",
1145 profile->name);
1146 return;
1147 }
1148
1149 if (!visit_type_bool(v, name, &value, errp)) {
1150 return;
1151 }
1152
1153 profile->user_set = true;
1154 profile->enabled = value;
1155
1156 if (profile->parent != NULL) {
1157 object_property_set_bool(obj, profile->parent->name,
1158 profile->enabled, NULL);
1159 }
1160
1161 if (profile->enabled) {
1162 cpu->env.priv_ver = profile->priv_spec;
1163 }
1164
1165 #ifndef CONFIG_USER_ONLY
1166 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) {
1167 object_property_set_bool(obj, "mmu", true, NULL);
1168 const char *satp_prop = satp_mode_str(profile->satp_mode,
1169 riscv_cpu_is_32bit(cpu));
1170 object_property_set_bool(obj, satp_prop, profile->enabled, NULL);
1171 }
1172 #endif
1173
1174 for (i = 0; misa_bits[i] != 0; i++) {
1175 uint32_t bit = misa_bits[i];
1176
1177 if (!(profile->misa_ext & bit)) {
1178 continue;
1179 }
1180
1181 if (bit == RVI && !profile->enabled) {
1182 /*
1183 * Disabling profiles will not disable the base
1184 * ISA RV64I.
1185 */
1186 continue;
1187 }
1188
1189 cpu_misa_ext_add_user_opt(bit, profile->enabled);
1190 riscv_cpu_write_misa_bit(cpu, bit, profile->enabled);
1191 }
1192
1193 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) {
1194 ext_offset = profile->ext_offsets[i];
1195
1196 if (profile->enabled) {
1197 if (cpu_cfg_offset_is_named_feat(ext_offset)) {
1198 riscv_cpu_enable_named_feat(cpu, ext_offset);
1199 }
1200
1201 cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset);
1202 }
1203
1204 cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled);
1205 isa_ext_update_enabled(cpu, ext_offset, profile->enabled);
1206 }
1207 }
1208
cpu_get_profile(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1209 static void cpu_get_profile(Object *obj, Visitor *v, const char *name,
1210 void *opaque, Error **errp)
1211 {
1212 RISCVCPUProfile *profile = opaque;
1213 bool value = profile->enabled;
1214
1215 visit_type_bool(v, name, &value, errp);
1216 }
1217
riscv_cpu_add_profiles(Object * cpu_obj)1218 static void riscv_cpu_add_profiles(Object *cpu_obj)
1219 {
1220 for (int i = 0; riscv_profiles[i] != NULL; i++) {
1221 const RISCVCPUProfile *profile = riscv_profiles[i];
1222
1223 object_property_add(cpu_obj, profile->name, "bool",
1224 cpu_get_profile, cpu_set_profile,
1225 NULL, (void *)profile);
1226
1227 /*
1228 * CPUs might enable a profile right from the start.
1229 * Enable its mandatory extensions right away in this
1230 * case.
1231 */
1232 if (profile->enabled) {
1233 object_property_set_bool(cpu_obj, profile->name, true, NULL);
1234 }
1235 }
1236 }
1237
cpu_ext_is_deprecated(const char * ext_name)1238 static bool cpu_ext_is_deprecated(const char *ext_name)
1239 {
1240 return isupper(ext_name[0]);
1241 }
1242
1243 /*
1244 * String will be allocated in the heap. Caller is responsible
1245 * for freeing it.
1246 */
cpu_ext_to_lower(const char * ext_name)1247 static char *cpu_ext_to_lower(const char *ext_name)
1248 {
1249 char *ret = g_malloc0(strlen(ext_name) + 1);
1250
1251 strcpy(ret, ext_name);
1252 ret[0] = tolower(ret[0]);
1253
1254 return ret;
1255 }
1256
cpu_set_multi_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1257 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name,
1258 void *opaque, Error **errp)
1259 {
1260 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque;
1261 RISCVCPU *cpu = RISCV_CPU(obj);
1262 bool vendor_cpu = riscv_cpu_is_vendor(obj);
1263 bool prev_val, value;
1264
1265 if (!visit_type_bool(v, name, &value, errp)) {
1266 return;
1267 }
1268
1269 if (cpu_ext_is_deprecated(multi_ext_cfg->name)) {
1270 g_autofree char *lower = cpu_ext_to_lower(multi_ext_cfg->name);
1271
1272 warn_report("CPU property '%s' is deprecated. Please use '%s' instead",
1273 multi_ext_cfg->name, lower);
1274 }
1275
1276 cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value);
1277
1278 prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset);
1279
1280 if (value == prev_val) {
1281 return;
1282 }
1283
1284 if (value && vendor_cpu) {
1285 g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1286 error_setg(errp, "'%s' CPU does not allow enabling extensions",
1287 cpuname);
1288 return;
1289 }
1290
1291 if (value) {
1292 cpu_bump_multi_ext_priv_ver(&cpu->env, multi_ext_cfg->offset);
1293 }
1294
1295 isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value);
1296 }
1297
cpu_get_multi_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1298 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name,
1299 void *opaque, Error **errp)
1300 {
1301 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque;
1302 bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset);
1303
1304 visit_type_bool(v, name, &value, errp);
1305 }
1306
cpu_add_multi_ext_prop(Object * cpu_obj,const RISCVCPUMultiExtConfig * multi_cfg)1307 static void cpu_add_multi_ext_prop(Object *cpu_obj,
1308 const RISCVCPUMultiExtConfig *multi_cfg)
1309 {
1310 bool generic_cpu = riscv_cpu_is_generic(cpu_obj);
1311 bool deprecated_ext = cpu_ext_is_deprecated(multi_cfg->name);
1312
1313 object_property_add(cpu_obj, multi_cfg->name, "bool",
1314 cpu_get_multi_ext_cfg,
1315 cpu_set_multi_ext_cfg,
1316 NULL, (void *)multi_cfg);
1317
1318 if (!generic_cpu || deprecated_ext) {
1319 return;
1320 }
1321
1322 /*
1323 * Set def val directly instead of using
1324 * object_property_set_bool() to save the set()
1325 * callback hash for user inputs.
1326 */
1327 isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset,
1328 multi_cfg->enabled);
1329 }
1330
riscv_cpu_add_multiext_prop_array(Object * obj,const RISCVCPUMultiExtConfig * array)1331 static void riscv_cpu_add_multiext_prop_array(Object *obj,
1332 const RISCVCPUMultiExtConfig *array)
1333 {
1334 const RISCVCPUMultiExtConfig *prop;
1335
1336 g_assert(array);
1337
1338 for (prop = array; prop && prop->name; prop++) {
1339 cpu_add_multi_ext_prop(obj, prop);
1340 }
1341 }
1342
1343 /*
1344 * Add CPU properties with user-facing flags.
1345 *
1346 * This will overwrite existing env->misa_ext values with the
1347 * defaults set via riscv_cpu_add_misa_properties().
1348 */
riscv_cpu_add_user_properties(Object * obj)1349 static void riscv_cpu_add_user_properties(Object *obj)
1350 {
1351 #ifndef CONFIG_USER_ONLY
1352 riscv_add_satp_mode_properties(obj);
1353 #endif
1354
1355 riscv_cpu_add_misa_properties(obj);
1356
1357 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions);
1358 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts);
1359 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts);
1360
1361 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts);
1362
1363 riscv_cpu_add_profiles(obj);
1364 }
1365
1366 /*
1367 * The 'max' type CPU will have all possible ratified
1368 * non-vendor extensions enabled.
1369 */
riscv_init_max_cpu_extensions(Object * obj)1370 static void riscv_init_max_cpu_extensions(Object *obj)
1371 {
1372 RISCVCPU *cpu = RISCV_CPU(obj);
1373 CPURISCVState *env = &cpu->env;
1374 const RISCVCPUMultiExtConfig *prop;
1375
1376 /* Enable RVG, RVJ and RVV that are disabled by default */
1377 riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVJ | RVV);
1378
1379 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
1380 isa_ext_update_enabled(cpu, prop->offset, true);
1381 }
1382
1383 /*
1384 * Some extensions can't be added without backward compatibilty concerns.
1385 * Disable those, the user can still opt in to them on the command line.
1386 */
1387 cpu->cfg.ext_svade = false;
1388
1389 /* set vector version */
1390 env->vext_ver = VEXT_VERSION_1_00_0;
1391
1392 /* Zfinx is not compatible with F. Disable it */
1393 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false);
1394 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false);
1395 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false);
1396 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false);
1397
1398 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false);
1399 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false);
1400 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false);
1401
1402 if (env->misa_mxl != MXL_RV32) {
1403 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false);
1404 }
1405 }
1406
riscv_cpu_has_max_extensions(Object * cpu_obj)1407 static bool riscv_cpu_has_max_extensions(Object *cpu_obj)
1408 {
1409 return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL;
1410 }
1411
riscv_tcg_cpu_instance_init(CPUState * cs)1412 static void riscv_tcg_cpu_instance_init(CPUState *cs)
1413 {
1414 RISCVCPU *cpu = RISCV_CPU(cs);
1415 Object *obj = OBJECT(cpu);
1416
1417 misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal);
1418 multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal);
1419
1420 if (!misa_ext_implied_rules) {
1421 misa_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal);
1422 }
1423
1424 if (!multi_ext_implied_rules) {
1425 multi_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal);
1426 }
1427
1428 riscv_cpu_add_user_properties(obj);
1429
1430 if (riscv_cpu_has_max_extensions(obj)) {
1431 riscv_init_max_cpu_extensions(obj);
1432 }
1433 }
1434
riscv_tcg_cpu_init_ops(AccelCPUClass * accel_cpu,CPUClass * cc)1435 static void riscv_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc)
1436 {
1437 /*
1438 * All cpus use the same set of operations.
1439 */
1440 cc->tcg_ops = &riscv_tcg_ops;
1441 }
1442
riscv_tcg_cpu_class_init(CPUClass * cc)1443 static void riscv_tcg_cpu_class_init(CPUClass *cc)
1444 {
1445 cc->init_accel_cpu = riscv_tcg_cpu_init_ops;
1446 }
1447
riscv_tcg_cpu_accel_class_init(ObjectClass * oc,void * data)1448 static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
1449 {
1450 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
1451
1452 acc->cpu_class_init = riscv_tcg_cpu_class_init;
1453 acc->cpu_instance_init = riscv_tcg_cpu_instance_init;
1454 acc->cpu_target_realize = riscv_tcg_cpu_realize;
1455 }
1456
1457 static const TypeInfo riscv_tcg_cpu_accel_type_info = {
1458 .name = ACCEL_CPU_NAME("tcg"),
1459
1460 .parent = TYPE_ACCEL_CPU,
1461 .class_init = riscv_tcg_cpu_accel_class_init,
1462 .abstract = true,
1463 };
1464
riscv_tcg_cpu_accel_register_types(void)1465 static void riscv_tcg_cpu_accel_register_types(void)
1466 {
1467 type_register_static(&riscv_tcg_cpu_accel_type_info);
1468 }
1469 type_init(riscv_tcg_cpu_accel_register_types);
1470