1 /*
2 * riscv TCG cpu class initialization
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "exec/translation-block.h"
22 #include "tcg-cpu.h"
23 #include "cpu.h"
24 #include "exec/target_page.h"
25 #include "internals.h"
26 #include "pmu.h"
27 #include "time_helper.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/accel.h"
31 #include "qemu/error-report.h"
32 #include "qemu/log.h"
33 #include "accel/accel-cpu-target.h"
34 #include "accel/tcg/cpu-ops.h"
35 #include "tcg/tcg.h"
36 #ifndef CONFIG_USER_ONLY
37 #include "hw/boards.h"
38 #include "system/tcg.h"
39 #include "exec/icount.h"
40 #endif
41
42 /* Hash that stores user set extensions */
43 static GHashTable *multi_ext_user_opts;
44 static GHashTable *misa_ext_user_opts;
45
46 static GHashTable *multi_ext_implied_rules;
47 static GHashTable *misa_ext_implied_rules;
48
cpu_cfg_ext_is_user_set(uint32_t ext_offset)49 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset)
50 {
51 return g_hash_table_contains(multi_ext_user_opts,
52 GUINT_TO_POINTER(ext_offset));
53 }
54
cpu_misa_ext_is_user_set(uint32_t misa_bit)55 static bool cpu_misa_ext_is_user_set(uint32_t misa_bit)
56 {
57 return g_hash_table_contains(misa_ext_user_opts,
58 GUINT_TO_POINTER(misa_bit));
59 }
60
cpu_cfg_ext_add_user_opt(uint32_t ext_offset,bool value)61 static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset, bool value)
62 {
63 g_hash_table_insert(multi_ext_user_opts, GUINT_TO_POINTER(ext_offset),
64 (gpointer)value);
65 }
66
cpu_misa_ext_add_user_opt(uint32_t bit,bool value)67 static void cpu_misa_ext_add_user_opt(uint32_t bit, bool value)
68 {
69 g_hash_table_insert(misa_ext_user_opts, GUINT_TO_POINTER(bit),
70 (gpointer)value);
71 }
72
riscv_cpu_write_misa_bit(RISCVCPU * cpu,uint32_t bit,bool enabled)73 static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit,
74 bool enabled)
75 {
76 CPURISCVState *env = &cpu->env;
77
78 if (enabled) {
79 env->misa_ext |= bit;
80 env->misa_ext_mask |= bit;
81 } else {
82 env->misa_ext &= ~bit;
83 env->misa_ext_mask &= ~bit;
84 }
85 }
86
cpu_priv_ver_to_str(int priv_ver)87 static const char *cpu_priv_ver_to_str(int priv_ver)
88 {
89 const char *priv_spec_str = priv_spec_to_str(priv_ver);
90
91 g_assert(priv_spec_str);
92
93 return priv_spec_str;
94 }
95
riscv_cpu_mmu_index(CPUState * cs,bool ifetch)96 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
97 {
98 return riscv_env_mmu_index(cpu_env(cs), ifetch);
99 }
100
riscv_get_tb_cpu_state(CPUState * cs)101 static TCGTBCPUState riscv_get_tb_cpu_state(CPUState *cs)
102 {
103 CPURISCVState *env = cpu_env(cs);
104 RISCVCPU *cpu = env_archcpu(env);
105 RISCVExtStatus fs, vs;
106 uint32_t flags = 0;
107 bool pm_signext = riscv_cpu_virt_mem_enabled(env);
108
109 if (cpu->cfg.ext_zve32x) {
110 /*
111 * If env->vl equals to VLMAX, we can use generic vector operation
112 * expanders (GVEC) to accerlate the vector operations.
113 * However, as LMUL could be a fractional number. The maximum
114 * vector size can be operated might be less than 8 bytes,
115 * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
116 * only when maxsz >= 8 bytes.
117 */
118
119 /* lmul encoded as in DisasContext::lmul */
120 int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3);
121 uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW);
122 uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
123 uint32_t maxsz = vlmax << vsew;
124 bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
125 (maxsz >= 8);
126 flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
127 flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew);
128 flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
129 FIELD_EX64(env->vtype, VTYPE, VLMUL));
130 flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
131 flags = FIELD_DP32(flags, TB_FLAGS, VTA,
132 FIELD_EX64(env->vtype, VTYPE, VTA));
133 flags = FIELD_DP32(flags, TB_FLAGS, VMA,
134 FIELD_EX64(env->vtype, VTYPE, VMA));
135 flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0);
136 } else {
137 flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
138 }
139
140 if (cpu_get_fcfien(env)) {
141 /*
142 * For Forward CFI, only the expectation of a lpad at
143 * the start of the block is tracked via env->elp. env->elp
144 * is turned on during jalr translation.
145 */
146 flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED, env->elp);
147 flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1);
148 }
149
150 if (cpu_get_bcfien(env)) {
151 flags = FIELD_DP32(flags, TB_FLAGS, BCFI_ENABLED, 1);
152 }
153
154 #ifdef CONFIG_USER_ONLY
155 fs = EXT_STATUS_DIRTY;
156 vs = EXT_STATUS_DIRTY;
157 #else
158 flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv);
159
160 flags |= riscv_env_mmu_index(env, 0);
161 fs = get_field(env->mstatus, MSTATUS_FS);
162 vs = get_field(env->mstatus, MSTATUS_VS);
163
164 if (env->virt_enabled) {
165 flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1);
166 /*
167 * Merge DISABLED and !DIRTY states using MIN.
168 * We will set both fields when dirtying.
169 */
170 fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS));
171 vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS));
172 }
173
174 /* With Zfinx, floating point is enabled/disabled by Smstateen. */
175 if (!riscv_has_ext(env, RVF)) {
176 fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE)
177 ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED;
178 }
179
180 if (cpu->cfg.debug && !icount_enabled()) {
181 flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
182 }
183 #endif
184
185 flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
186 flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
187 flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
188 flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
189 flags = FIELD_DP32(flags, TB_FLAGS, PM_PMM, riscv_pm_get_pmm(env));
190 flags = FIELD_DP32(flags, TB_FLAGS, PM_SIGNEXTEND, pm_signext);
191
192 return (TCGTBCPUState){
193 .pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc,
194 .flags = flags
195 };
196 }
197
riscv_cpu_synchronize_from_tb(CPUState * cs,const TranslationBlock * tb)198 static void riscv_cpu_synchronize_from_tb(CPUState *cs,
199 const TranslationBlock *tb)
200 {
201 if (!(tb_cflags(tb) & CF_PCREL)) {
202 RISCVCPU *cpu = RISCV_CPU(cs);
203 CPURISCVState *env = &cpu->env;
204 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
205
206 tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL));
207
208 if (xl == MXL_RV32) {
209 env->pc = (int32_t) tb->pc;
210 } else {
211 env->pc = tb->pc;
212 }
213 }
214 }
215
riscv_restore_state_to_opc(CPUState * cs,const TranslationBlock * tb,const uint64_t * data)216 static void riscv_restore_state_to_opc(CPUState *cs,
217 const TranslationBlock *tb,
218 const uint64_t *data)
219 {
220 RISCVCPU *cpu = RISCV_CPU(cs);
221 CPURISCVState *env = &cpu->env;
222 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
223 target_ulong pc;
224
225 if (tb_cflags(tb) & CF_PCREL) {
226 pc = (env->pc & TARGET_PAGE_MASK) | data[0];
227 } else {
228 pc = data[0];
229 }
230
231 if (xl == MXL_RV32) {
232 env->pc = (int32_t)pc;
233 } else {
234 env->pc = pc;
235 }
236 env->bins = data[1];
237 env->excp_uw2 = data[2];
238 }
239
240 #ifndef CONFIG_USER_ONLY
riscv_pointer_wrap(CPUState * cs,int mmu_idx,vaddr result,vaddr base)241 static vaddr riscv_pointer_wrap(CPUState *cs, int mmu_idx,
242 vaddr result, vaddr base)
243 {
244 CPURISCVState *env = cpu_env(cs);
245 uint32_t pm_len;
246 bool pm_signext;
247
248 if (cpu_address_xl(env) == MXL_RV32) {
249 return (uint32_t)result;
250 }
251
252 pm_len = riscv_pm_get_pmlen(riscv_pm_get_pmm(env));
253 if (pm_len == 0) {
254 return result;
255 }
256
257 pm_signext = riscv_cpu_virt_mem_enabled(env);
258 if (pm_signext) {
259 return sextract64(result, 0, 64 - pm_len);
260 }
261 return extract64(result, 0, 64 - pm_len);
262 }
263 #endif
264
265 const TCGCPUOps riscv_tcg_ops = {
266 .mttcg_supported = true,
267 .guest_default_memory_order = 0,
268
269 .initialize = riscv_translate_init,
270 .translate_code = riscv_translate_code,
271 .get_tb_cpu_state = riscv_get_tb_cpu_state,
272 .synchronize_from_tb = riscv_cpu_synchronize_from_tb,
273 .restore_state_to_opc = riscv_restore_state_to_opc,
274 .mmu_index = riscv_cpu_mmu_index,
275
276 #ifndef CONFIG_USER_ONLY
277 .tlb_fill = riscv_cpu_tlb_fill,
278 .pointer_wrap = riscv_pointer_wrap,
279 .cpu_exec_interrupt = riscv_cpu_exec_interrupt,
280 .cpu_exec_halt = riscv_cpu_has_work,
281 .cpu_exec_reset = cpu_reset,
282 .do_interrupt = riscv_cpu_do_interrupt,
283 .do_transaction_failed = riscv_cpu_do_transaction_failed,
284 .do_unaligned_access = riscv_cpu_do_unaligned_access,
285 .debug_excp_handler = riscv_cpu_debug_excp_handler,
286 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint,
287 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint,
288 #endif /* !CONFIG_USER_ONLY */
289 };
290
cpu_cfg_ext_get_min_version(uint32_t ext_offset)291 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset)
292 {
293 const RISCVIsaExtData *edata;
294
295 for (edata = isa_edata_arr; edata && edata->name; edata++) {
296 if (edata->ext_enable_offset != ext_offset) {
297 continue;
298 }
299
300 return edata->min_version;
301 }
302
303 g_assert_not_reached();
304 }
305
cpu_cfg_ext_get_name(uint32_t ext_offset)306 static const char *cpu_cfg_ext_get_name(uint32_t ext_offset)
307 {
308 const RISCVCPUMultiExtConfig *feat;
309 const RISCVIsaExtData *edata;
310
311 for (edata = isa_edata_arr; edata->name != NULL; edata++) {
312 if (edata->ext_enable_offset == ext_offset) {
313 return edata->name;
314 }
315 }
316
317 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) {
318 if (feat->offset == ext_offset) {
319 return feat->name;
320 }
321 }
322
323 g_assert_not_reached();
324 }
325
cpu_cfg_offset_is_named_feat(uint32_t ext_offset)326 static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset)
327 {
328 const RISCVCPUMultiExtConfig *feat;
329
330 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) {
331 if (feat->offset == ext_offset) {
332 return true;
333 }
334 }
335
336 return false;
337 }
338
riscv_cpu_enable_named_feat(RISCVCPU * cpu,uint32_t feat_offset)339 static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset)
340 {
341 /*
342 * All other named features are already enabled
343 * in riscv_tcg_cpu_instance_init().
344 */
345 switch (feat_offset) {
346 case CPU_CFG_OFFSET(ext_zic64b):
347 cpu->cfg.cbom_blocksize = 64;
348 cpu->cfg.cbop_blocksize = 64;
349 cpu->cfg.cboz_blocksize = 64;
350 break;
351 case CPU_CFG_OFFSET(ext_sha):
352 if (!cpu_misa_ext_is_user_set(RVH)) {
353 riscv_cpu_write_misa_bit(cpu, RVH, true);
354 }
355 /* fallthrough */
356 case CPU_CFG_OFFSET(ext_ssstateen):
357 cpu->cfg.ext_smstateen = true;
358 break;
359 }
360 }
361
cpu_bump_multi_ext_priv_ver(CPURISCVState * env,uint32_t ext_offset)362 static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env,
363 uint32_t ext_offset)
364 {
365 int ext_priv_ver;
366
367 if (env->priv_ver == PRIV_VERSION_LATEST) {
368 return;
369 }
370
371 ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset);
372
373 if (env->priv_ver < ext_priv_ver) {
374 /*
375 * Note: the 'priv_spec' command line option, if present,
376 * will take precedence over this priv_ver bump.
377 */
378 env->priv_ver = ext_priv_ver;
379 }
380 }
381
cpu_cfg_ext_auto_update(RISCVCPU * cpu,uint32_t ext_offset,bool value)382 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset,
383 bool value)
384 {
385 CPURISCVState *env = &cpu->env;
386 bool prev_val = isa_ext_is_enabled(cpu, ext_offset);
387 int min_version;
388
389 if (prev_val == value) {
390 return;
391 }
392
393 if (cpu_cfg_ext_is_user_set(ext_offset)) {
394 return;
395 }
396
397 if (value && env->priv_ver != PRIV_VERSION_LATEST) {
398 /* Do not enable it if priv_ver is older than min_version */
399 min_version = cpu_cfg_ext_get_min_version(ext_offset);
400 if (env->priv_ver < min_version) {
401 return;
402 }
403 }
404
405 isa_ext_update_enabled(cpu, ext_offset, value);
406 }
407
riscv_cpu_validate_misa_priv(CPURISCVState * env,Error ** errp)408 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp)
409 {
410 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) {
411 error_setg(errp, "H extension requires priv spec 1.12.0");
412 return;
413 }
414 }
415
riscv_cpu_validate_v(CPURISCVState * env,RISCVCPUConfig * cfg,Error ** errp)416 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg,
417 Error **errp)
418 {
419 uint32_t min_vlen;
420 uint32_t vlen = cfg->vlenb << 3;
421
422 if (riscv_has_ext(env, RVV)) {
423 min_vlen = 128;
424 } else if (cfg->ext_zve64x) {
425 min_vlen = 64;
426 } else if (cfg->ext_zve32x) {
427 min_vlen = 32;
428 }
429
430 if (vlen > RV_VLEN_MAX || vlen < min_vlen) {
431 error_setg(errp,
432 "Vector extension implementation only supports VLEN "
433 "in the range [%d, %d]", min_vlen, RV_VLEN_MAX);
434 return;
435 }
436
437 if (cfg->elen > 64 || cfg->elen < 8) {
438 error_setg(errp,
439 "Vector extension implementation only supports ELEN "
440 "in the range [8, 64]");
441 return;
442 }
443
444 if (vlen < cfg->elen) {
445 error_setg(errp, "Vector extension implementation requires VLEN "
446 "to be greater than or equal to ELEN");
447 return;
448 }
449 }
450
riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU * cpu)451 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu)
452 {
453 CPURISCVState *env = &cpu->env;
454 const RISCVIsaExtData *edata;
455
456 /* Force disable extensions if priv spec version does not match */
457 for (edata = isa_edata_arr; edata && edata->name; edata++) {
458 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset) &&
459 (env->priv_ver < edata->min_version)) {
460 /*
461 * These two extensions are always enabled as they were supported
462 * by QEMU before they were added as extensions in the ISA.
463 */
464 if (!strcmp(edata->name, "zicntr") ||
465 !strcmp(edata->name, "zihpm")) {
466 continue;
467 }
468
469 /*
470 * cpu.debug = true is marked as 'sdtrig', priv spec 1.12.
471 * Skip this warning since existing CPUs with older priv
472 * spec and debug = true will be impacted.
473 */
474 if (!strcmp(edata->name, "sdtrig")) {
475 continue;
476 }
477
478 isa_ext_update_enabled(cpu, edata->ext_enable_offset, false);
479
480 /*
481 * Do not show user warnings for named features that users
482 * can't enable/disable in the command line. See commit
483 * 68c9e54bea for more info.
484 */
485 if (cpu_cfg_offset_is_named_feat(edata->ext_enable_offset)) {
486 continue;
487 }
488 #ifndef CONFIG_USER_ONLY
489 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
490 " because privilege spec version does not match",
491 edata->name, env->mhartid);
492 #else
493 warn_report("disabling %s extension because "
494 "privilege spec version does not match",
495 edata->name);
496 #endif
497 }
498 }
499 }
500
riscv_cpu_update_named_features(RISCVCPU * cpu)501 static void riscv_cpu_update_named_features(RISCVCPU *cpu)
502 {
503 if (cpu->env.priv_ver >= PRIV_VERSION_1_11_0) {
504 cpu->cfg.has_priv_1_11 = true;
505 }
506
507 if (cpu->env.priv_ver >= PRIV_VERSION_1_12_0) {
508 cpu->cfg.has_priv_1_12 = true;
509 }
510
511 if (cpu->env.priv_ver >= PRIV_VERSION_1_13_0) {
512 cpu->cfg.has_priv_1_13 = true;
513 }
514
515 cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 &&
516 cpu->cfg.cbop_blocksize == 64 &&
517 cpu->cfg.cboz_blocksize == 64;
518
519 cpu->cfg.ext_ssstateen = cpu->cfg.ext_smstateen;
520
521 cpu->cfg.ext_sha = riscv_has_ext(&cpu->env, RVH) &&
522 cpu->cfg.ext_ssstateen;
523
524 cpu->cfg.ext_ziccrse = cpu->cfg.has_priv_1_11;
525 }
526
riscv_cpu_validate_g(RISCVCPU * cpu)527 static void riscv_cpu_validate_g(RISCVCPU *cpu)
528 {
529 const char *warn_msg = "RVG mandates disabled extension %s";
530 uint32_t g_misa_bits[] = {RVI, RVM, RVA, RVF, RVD};
531 bool send_warn = cpu_misa_ext_is_user_set(RVG);
532
533 for (int i = 0; i < ARRAY_SIZE(g_misa_bits); i++) {
534 uint32_t bit = g_misa_bits[i];
535
536 if (riscv_has_ext(&cpu->env, bit)) {
537 continue;
538 }
539
540 if (!cpu_misa_ext_is_user_set(bit)) {
541 riscv_cpu_write_misa_bit(cpu, bit, true);
542 continue;
543 }
544
545 if (send_warn) {
546 warn_report(warn_msg, riscv_get_misa_ext_name(bit));
547 }
548 }
549
550 if (!cpu->cfg.ext_zicsr) {
551 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr))) {
552 cpu->cfg.ext_zicsr = true;
553 } else if (send_warn) {
554 warn_report(warn_msg, "zicsr");
555 }
556 }
557
558 if (!cpu->cfg.ext_zifencei) {
559 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei))) {
560 cpu->cfg.ext_zifencei = true;
561 } else if (send_warn) {
562 warn_report(warn_msg, "zifencei");
563 }
564 }
565 }
566
riscv_cpu_validate_b(RISCVCPU * cpu)567 static void riscv_cpu_validate_b(RISCVCPU *cpu)
568 {
569 const char *warn_msg = "RVB mandates disabled extension %s";
570
571 if (!cpu->cfg.ext_zba) {
572 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zba))) {
573 cpu->cfg.ext_zba = true;
574 } else {
575 warn_report(warn_msg, "zba");
576 }
577 }
578
579 if (!cpu->cfg.ext_zbb) {
580 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbb))) {
581 cpu->cfg.ext_zbb = true;
582 } else {
583 warn_report(warn_msg, "zbb");
584 }
585 }
586
587 if (!cpu->cfg.ext_zbs) {
588 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbs))) {
589 cpu->cfg.ext_zbs = true;
590 } else {
591 warn_report(warn_msg, "zbs");
592 }
593 }
594 }
595
596 /*
597 * Check consistency between chosen extensions while setting
598 * cpu->cfg accordingly.
599 */
riscv_cpu_validate_set_extensions(RISCVCPU * cpu,Error ** errp)600 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
601 {
602 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
603 CPURISCVState *env = &cpu->env;
604 Error *local_err = NULL;
605
606 if (riscv_has_ext(env, RVG)) {
607 riscv_cpu_validate_g(cpu);
608 }
609
610 if (riscv_has_ext(env, RVB)) {
611 riscv_cpu_validate_b(cpu);
612 }
613
614 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) {
615 error_setg(errp,
616 "I and E extensions are incompatible");
617 return;
618 }
619
620 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) {
621 error_setg(errp,
622 "Either I or E extension must be set");
623 return;
624 }
625
626 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) {
627 error_setg(errp,
628 "Setting S extension without U extension is illegal");
629 return;
630 }
631
632 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) {
633 error_setg(errp,
634 "H depends on an I base integer ISA with 32 x registers");
635 return;
636 }
637
638 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) {
639 error_setg(errp, "H extension implicitly requires S-mode");
640 return;
641 }
642
643 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) {
644 error_setg(errp, "F extension requires Zicsr");
645 return;
646 }
647
648 if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) {
649 error_setg(errp, "Zacas extension requires A extension");
650 return;
651 }
652
653 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) {
654 error_setg(errp, "Zawrs extension requires A extension");
655 return;
656 }
657
658 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) {
659 error_setg(errp, "Zfa extension requires F extension");
660 return;
661 }
662
663 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) {
664 error_setg(errp, "Zfh/Zfhmin extensions require F extension");
665 return;
666 }
667
668 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) {
669 error_setg(errp, "Zfbfmin extension depends on F extension");
670 return;
671 }
672
673 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) {
674 error_setg(errp, "D extension requires F extension");
675 return;
676 }
677
678 if (cpu->cfg.ext_zve32x) {
679 riscv_cpu_validate_v(env, &cpu->cfg, &local_err);
680 if (local_err != NULL) {
681 error_propagate(errp, local_err);
682 return;
683 }
684 }
685
686 /* The Zve64d extension depends on the Zve64f extension */
687 if (cpu->cfg.ext_zve64d) {
688 if (!riscv_has_ext(env, RVD)) {
689 error_setg(errp, "Zve64d/V extensions require D extension");
690 return;
691 }
692 }
693
694 /* The Zve32f extension depends on the Zve32x extension */
695 if (cpu->cfg.ext_zve32f) {
696 if (!riscv_has_ext(env, RVF)) {
697 error_setg(errp, "Zve32f/Zve64f extensions require F extension");
698 return;
699 }
700 }
701
702 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) {
703 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension");
704 return;
705 }
706
707 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) {
708 error_setg(errp, "Zvfh extensions requires Zfhmin extension");
709 return;
710 }
711
712 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) {
713 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension");
714 return;
715 }
716
717 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) {
718 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension");
719 return;
720 }
721
722 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) {
723 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx");
724 return;
725 }
726
727 if (cpu->cfg.ext_zfinx) {
728 if (!cpu->cfg.ext_zicsr) {
729 error_setg(errp, "Zfinx extension requires Zicsr");
730 return;
731 }
732 if (riscv_has_ext(env, RVF)) {
733 error_setg(errp,
734 "Zfinx cannot be supported together with F extension");
735 return;
736 }
737 }
738
739 if (cpu->cfg.ext_zcmop && !cpu->cfg.ext_zca) {
740 error_setg(errp, "Zcmop extensions require Zca");
741 return;
742 }
743
744 if (mcc->def->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) {
745 error_setg(errp, "Zcf extension is only relevant to RV32");
746 return;
747 }
748
749 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) {
750 error_setg(errp, "Zcf extension requires F extension");
751 return;
752 }
753
754 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) {
755 error_setg(errp, "Zcd extension requires D extension");
756 return;
757 }
758
759 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb ||
760 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) {
761 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca "
762 "extension");
763 return;
764 }
765
766 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) {
767 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with "
768 "Zcd extension");
769 return;
770 }
771
772 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) {
773 error_setg(errp, "Zcmt extension requires Zicsr extension");
774 return;
775 }
776
777 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg ||
778 cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed ||
779 cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32x) {
780 error_setg(errp,
781 "Vector crypto extensions require V or Zve* extensions");
782 return;
783 }
784
785 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64x) {
786 error_setg(
787 errp,
788 "Zvbc and Zvknhb extensions require V or Zve64x extensions");
789 return;
790 }
791
792 if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) {
793 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) {
794 error_setg(errp, "zicntr requires zicsr");
795 return;
796 }
797 cpu->cfg.ext_zicntr = false;
798 }
799
800 if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) {
801 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) {
802 error_setg(errp, "zihpm requires zicsr");
803 return;
804 }
805 cpu->cfg.ext_zihpm = false;
806 }
807
808 if (cpu->cfg.ext_zicfiss) {
809 if (!cpu->cfg.ext_zicsr) {
810 error_setg(errp, "zicfiss extension requires zicsr extension");
811 return;
812 }
813 if (!riscv_has_ext(env, RVA)) {
814 error_setg(errp, "zicfiss extension requires A extension");
815 return;
816 }
817 if (!riscv_has_ext(env, RVS)) {
818 error_setg(errp, "zicfiss extension requires S");
819 return;
820 }
821 if (!cpu->cfg.ext_zimop) {
822 error_setg(errp, "zicfiss extension requires zimop extension");
823 return;
824 }
825 if (cpu->cfg.ext_zca && !cpu->cfg.ext_zcmop) {
826 error_setg(errp, "zicfiss with zca requires zcmop extension");
827 return;
828 }
829 }
830
831 if (!cpu->cfg.ext_zihpm) {
832 cpu->cfg.pmu_mask = 0;
833 cpu->pmu_avail_ctrs = 0;
834 }
835
836 if (cpu->cfg.ext_zicfilp && !cpu->cfg.ext_zicsr) {
837 error_setg(errp, "zicfilp extension requires zicsr extension");
838 return;
839 }
840
841 if (mcc->def->misa_mxl_max == MXL_RV32 && cpu->cfg.ext_svukte) {
842 error_setg(errp, "svukte is not supported for RV32");
843 return;
844 }
845
846 if ((cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr) &&
847 (!riscv_has_ext(env, RVS) || !cpu->cfg.ext_sscsrind)) {
848 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_smctr)) ||
849 cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_ssctr))) {
850 error_setg(errp, "Smctr and Ssctr require S-mode and Sscsrind");
851 return;
852 }
853 cpu->cfg.ext_smctr = false;
854 cpu->cfg.ext_ssctr = false;
855 }
856
857 if (cpu->cfg.ext_svrsw60t59b &&
858 (!cpu->cfg.mmu || mcc->def->misa_mxl_max == MXL_RV32)) {
859 error_setg(errp, "svrsw60t59b is not supported on RV32 and MMU-less platforms");
860 return;
861 }
862
863 /*
864 * Disable isa extensions based on priv spec after we
865 * validated and set everything we need.
866 */
867 riscv_cpu_disable_priv_spec_isa_exts(cpu);
868 }
869
870 #ifndef CONFIG_USER_ONLY
riscv_cpu_validate_profile_satp(RISCVCPU * cpu,RISCVCPUProfile * profile,bool send_warn)871 static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu,
872 RISCVCPUProfile *profile,
873 bool send_warn)
874 {
875 int satp_max = cpu->cfg.max_satp_mode;
876
877 assert(satp_max >= 0);
878 if (profile->satp_mode > satp_max) {
879 if (send_warn) {
880 bool is_32bit = riscv_cpu_is_32bit(cpu);
881 const char *req_satp = satp_mode_str(profile->satp_mode, is_32bit);
882 const char *cur_satp = satp_mode_str(satp_max, is_32bit);
883
884 warn_report("Profile %s requires satp mode %s, "
885 "but satp mode %s was set", profile->name,
886 req_satp, cur_satp);
887 }
888
889 return false;
890 }
891
892 return true;
893 }
894 #endif
895
riscv_cpu_check_parent_profile(RISCVCPU * cpu,RISCVCPUProfile * profile,RISCVCPUProfile * parent)896 static void riscv_cpu_check_parent_profile(RISCVCPU *cpu,
897 RISCVCPUProfile *profile,
898 RISCVCPUProfile *parent)
899 {
900 if (!profile->present || !parent) {
901 return;
902 }
903
904 profile->present = parent->present;
905 }
906
riscv_cpu_validate_profile(RISCVCPU * cpu,RISCVCPUProfile * profile)907 static void riscv_cpu_validate_profile(RISCVCPU *cpu,
908 RISCVCPUProfile *profile)
909 {
910 CPURISCVState *env = &cpu->env;
911 const char *warn_msg = "Profile %s mandates disabled extension %s";
912 bool send_warn = profile->user_set && profile->enabled;
913 bool profile_impl = true;
914 int i;
915
916 #ifndef CONFIG_USER_ONLY
917 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) {
918 profile_impl = riscv_cpu_validate_profile_satp(cpu, profile,
919 send_warn);
920 }
921 #endif
922
923 if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED &&
924 profile->priv_spec > env->priv_ver) {
925 profile_impl = false;
926
927 if (send_warn) {
928 warn_report("Profile %s requires priv spec %s, "
929 "but priv ver %s was set", profile->name,
930 cpu_priv_ver_to_str(profile->priv_spec),
931 cpu_priv_ver_to_str(env->priv_ver));
932 }
933 }
934
935 for (i = 0; misa_bits[i] != 0; i++) {
936 uint32_t bit = misa_bits[i];
937
938 if (!(profile->misa_ext & bit)) {
939 continue;
940 }
941
942 if (!riscv_has_ext(&cpu->env, bit)) {
943 profile_impl = false;
944
945 if (send_warn) {
946 warn_report(warn_msg, profile->name,
947 riscv_get_misa_ext_name(bit));
948 }
949 }
950 }
951
952 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) {
953 int ext_offset = profile->ext_offsets[i];
954
955 if (!isa_ext_is_enabled(cpu, ext_offset)) {
956 profile_impl = false;
957
958 if (send_warn) {
959 warn_report(warn_msg, profile->name,
960 cpu_cfg_ext_get_name(ext_offset));
961 }
962 }
963 }
964
965 profile->present = profile_impl;
966
967 riscv_cpu_check_parent_profile(cpu, profile, profile->u_parent);
968 riscv_cpu_check_parent_profile(cpu, profile, profile->s_parent);
969 }
970
riscv_cpu_validate_profiles(RISCVCPU * cpu)971 static void riscv_cpu_validate_profiles(RISCVCPU *cpu)
972 {
973 for (int i = 0; riscv_profiles[i] != NULL; i++) {
974 riscv_cpu_validate_profile(cpu, riscv_profiles[i]);
975 }
976 }
977
riscv_cpu_init_implied_exts_rules(void)978 static void riscv_cpu_init_implied_exts_rules(void)
979 {
980 RISCVCPUImpliedExtsRule *rule;
981 #ifndef CONFIG_USER_ONLY
982 MachineState *ms = MACHINE(qdev_get_machine());
983 #endif
984 static bool initialized;
985 int i;
986
987 /* Implied rules only need to be initialized once. */
988 if (initialized) {
989 return;
990 }
991
992 for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) {
993 #ifndef CONFIG_USER_ONLY
994 rule->enabled = bitmap_new(ms->smp.cpus);
995 #endif
996 g_hash_table_insert(misa_ext_implied_rules,
997 GUINT_TO_POINTER(rule->ext), (gpointer)rule);
998 }
999
1000 for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) {
1001 #ifndef CONFIG_USER_ONLY
1002 rule->enabled = bitmap_new(ms->smp.cpus);
1003 #endif
1004 g_hash_table_insert(multi_ext_implied_rules,
1005 GUINT_TO_POINTER(rule->ext), (gpointer)rule);
1006 }
1007
1008 initialized = true;
1009 }
1010
cpu_enable_implied_rule(RISCVCPU * cpu,RISCVCPUImpliedExtsRule * rule)1011 static void cpu_enable_implied_rule(RISCVCPU *cpu,
1012 RISCVCPUImpliedExtsRule *rule)
1013 {
1014 CPURISCVState *env = &cpu->env;
1015 RISCVCPUImpliedExtsRule *ir;
1016 bool enabled = false;
1017 int i;
1018
1019 #ifndef CONFIG_USER_ONLY
1020 enabled = test_bit(cpu->env.mhartid, rule->enabled);
1021 #endif
1022
1023 if (!enabled) {
1024 /* Enable the implied MISAs. */
1025 if (rule->implied_misa_exts) {
1026 for (i = 0; misa_bits[i] != 0; i++) {
1027 if (rule->implied_misa_exts & misa_bits[i]) {
1028 /*
1029 * If the user disabled the misa_bit do not re-enable it
1030 * and do not apply any implied rules related to it.
1031 */
1032 if (cpu_misa_ext_is_user_set(misa_bits[i]) &&
1033 !(env->misa_ext & misa_bits[i])) {
1034 continue;
1035 }
1036
1037 riscv_cpu_set_misa_ext(env, env->misa_ext | misa_bits[i]);
1038 ir = g_hash_table_lookup(misa_ext_implied_rules,
1039 GUINT_TO_POINTER(misa_bits[i]));
1040
1041 if (ir) {
1042 cpu_enable_implied_rule(cpu, ir);
1043 }
1044 }
1045 }
1046 }
1047
1048 /* Enable the implied extensions. */
1049 for (i = 0;
1050 rule->implied_multi_exts[i] != RISCV_IMPLIED_EXTS_RULE_END; i++) {
1051 cpu_cfg_ext_auto_update(cpu, rule->implied_multi_exts[i], true);
1052
1053 ir = g_hash_table_lookup(multi_ext_implied_rules,
1054 GUINT_TO_POINTER(
1055 rule->implied_multi_exts[i]));
1056
1057 if (ir) {
1058 cpu_enable_implied_rule(cpu, ir);
1059 }
1060 }
1061
1062 #ifndef CONFIG_USER_ONLY
1063 bitmap_set(rule->enabled, cpu->env.mhartid, 1);
1064 #endif
1065 }
1066 }
1067
1068 /* Zc extension has special implied rules that need to be handled separately. */
cpu_enable_zc_implied_rules(RISCVCPU * cpu)1069 static void cpu_enable_zc_implied_rules(RISCVCPU *cpu)
1070 {
1071 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
1072 CPURISCVState *env = &cpu->env;
1073
1074 if (cpu->cfg.ext_zce) {
1075 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
1076 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true);
1077 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true);
1078 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true);
1079
1080 if (riscv_has_ext(env, RVF) && mcc->def->misa_mxl_max == MXL_RV32) {
1081 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
1082 }
1083 }
1084
1085 /* Zca, Zcd and Zcf has a PRIV 1.12.0 restriction */
1086 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) {
1087 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
1088
1089 if (riscv_has_ext(env, RVF) && mcc->def->misa_mxl_max == MXL_RV32) {
1090 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
1091 }
1092
1093 if (riscv_has_ext(env, RVD)) {
1094 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true);
1095 }
1096 }
1097 }
1098
riscv_cpu_enable_implied_rules(RISCVCPU * cpu)1099 static void riscv_cpu_enable_implied_rules(RISCVCPU *cpu)
1100 {
1101 RISCVCPUImpliedExtsRule *rule;
1102 int i;
1103
1104 /* Enable the implied extensions for Zc. */
1105 cpu_enable_zc_implied_rules(cpu);
1106
1107 /* Enable the implied MISAs. */
1108 for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) {
1109 if (riscv_has_ext(&cpu->env, rule->ext)) {
1110 cpu_enable_implied_rule(cpu, rule);
1111 }
1112 }
1113
1114 /* Enable the implied extensions. */
1115 for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) {
1116 if (isa_ext_is_enabled(cpu, rule->ext)) {
1117 cpu_enable_implied_rule(cpu, rule);
1118 }
1119 }
1120 }
1121
riscv_tcg_cpu_finalize_features(RISCVCPU * cpu,Error ** errp)1122 void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1123 {
1124 CPURISCVState *env = &cpu->env;
1125 Error *local_err = NULL;
1126
1127 riscv_cpu_init_implied_exts_rules();
1128 riscv_cpu_enable_implied_rules(cpu);
1129
1130 riscv_cpu_validate_misa_priv(env, &local_err);
1131 if (local_err != NULL) {
1132 error_propagate(errp, local_err);
1133 return;
1134 }
1135
1136 riscv_cpu_update_named_features(cpu);
1137 riscv_cpu_validate_profiles(cpu);
1138
1139 if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) {
1140 /*
1141 * Enhanced PMP should only be available
1142 * on harts with PMP support
1143 */
1144 error_setg(errp, "Invalid configuration: Smepmp requires PMP support");
1145 return;
1146 }
1147
1148 riscv_cpu_validate_set_extensions(cpu, &local_err);
1149 if (local_err != NULL) {
1150 error_propagate(errp, local_err);
1151 return;
1152 }
1153 #ifndef CONFIG_USER_ONLY
1154 if (cpu->cfg.pmu_mask) {
1155 riscv_pmu_init(cpu, &local_err);
1156 if (local_err != NULL) {
1157 error_propagate(errp, local_err);
1158 return;
1159 }
1160
1161 if (cpu->cfg.ext_sscofpmf) {
1162 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1163 riscv_pmu_timer_cb, cpu);
1164 }
1165 }
1166 #endif
1167 }
1168
riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU * cpu)1169 void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu)
1170 {
1171 GPtrArray *dynamic_decoders;
1172 dynamic_decoders = g_ptr_array_sized_new(decoder_table_size);
1173 for (size_t i = 0; i < decoder_table_size; ++i) {
1174 if (decoder_table[i].guard_func &&
1175 decoder_table[i].guard_func(&cpu->cfg)) {
1176 g_ptr_array_add(dynamic_decoders,
1177 (gpointer)decoder_table[i].riscv_cpu_decode_fn);
1178 }
1179 }
1180
1181 cpu->decoders = dynamic_decoders;
1182 }
1183
riscv_cpu_tcg_compatible(RISCVCPU * cpu)1184 bool riscv_cpu_tcg_compatible(RISCVCPU *cpu)
1185 {
1186 return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL;
1187 }
1188
riscv_cpu_is_generic(Object * cpu_obj)1189 static bool riscv_cpu_is_generic(Object *cpu_obj)
1190 {
1191 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1192 }
1193
riscv_cpu_set_profile(RISCVCPU * cpu,RISCVCPUProfile * profile,bool enabled)1194 static void riscv_cpu_set_profile(RISCVCPU *cpu,
1195 RISCVCPUProfile *profile,
1196 bool enabled)
1197 {
1198 int i, ext_offset;
1199
1200 if (profile->u_parent != NULL) {
1201 riscv_cpu_set_profile(cpu, profile->u_parent, enabled);
1202 }
1203
1204 if (profile->s_parent != NULL) {
1205 riscv_cpu_set_profile(cpu, profile->s_parent, enabled);
1206 }
1207
1208 profile->enabled = enabled;
1209
1210 if (profile->enabled) {
1211 cpu->env.priv_ver = profile->priv_spec;
1212
1213 #ifndef CONFIG_USER_ONLY
1214 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) {
1215 object_property_set_bool(OBJECT(cpu), "mmu", true, NULL);
1216 const char *satp_prop = satp_mode_str(profile->satp_mode,
1217 riscv_cpu_is_32bit(cpu));
1218 object_property_set_bool(OBJECT(cpu), satp_prop, true, NULL);
1219 }
1220 #endif
1221 }
1222
1223 for (i = 0; misa_bits[i] != 0; i++) {
1224 uint32_t bit = misa_bits[i];
1225
1226 if (!(profile->misa_ext & bit)) {
1227 continue;
1228 }
1229
1230 if (bit == RVI && !profile->enabled) {
1231 /*
1232 * Disabling profiles will not disable the base
1233 * ISA RV64I.
1234 */
1235 continue;
1236 }
1237
1238 cpu_misa_ext_add_user_opt(bit, profile->enabled);
1239 riscv_cpu_write_misa_bit(cpu, bit, profile->enabled);
1240 }
1241
1242 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) {
1243 ext_offset = profile->ext_offsets[i];
1244
1245 if (profile->enabled) {
1246 if (cpu_cfg_offset_is_named_feat(ext_offset)) {
1247 riscv_cpu_enable_named_feat(cpu, ext_offset);
1248 }
1249
1250 cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset);
1251 }
1252
1253 cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled);
1254 isa_ext_update_enabled(cpu, ext_offset, profile->enabled);
1255 }
1256 }
1257
1258 /*
1259 * We'll get here via the following path:
1260 *
1261 * riscv_cpu_realize()
1262 * -> cpu_exec_realizefn()
1263 * -> tcg_cpu_realize() (via accel_cpu_common_realize())
1264 */
riscv_tcg_cpu_realize(CPUState * cs,Error ** errp)1265 static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
1266 {
1267 RISCVCPU *cpu = RISCV_CPU(cs);
1268
1269 if (!riscv_cpu_tcg_compatible(cpu)) {
1270 g_autofree char *name = riscv_cpu_get_name(cpu);
1271 error_setg(errp, "'%s' CPU is not compatible with TCG acceleration",
1272 name);
1273 return false;
1274 }
1275
1276 #ifndef CONFIG_USER_ONLY
1277 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
1278
1279 if (mcc->def->misa_mxl_max >= MXL_RV128 && qemu_tcg_mttcg_enabled()) {
1280 /* Missing 128-bit aligned atomics */
1281 error_setg(errp,
1282 "128-bit RISC-V currently does not work with Multi "
1283 "Threaded TCG. Please use: -accel tcg,thread=single");
1284 return false;
1285 }
1286
1287 CPURISCVState *env = &cpu->env;
1288
1289 tcg_cflags_set(CPU(cs), CF_PCREL);
1290
1291 if (cpu->cfg.ext_sstc) {
1292 riscv_timer_init(cpu);
1293 }
1294
1295 /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */
1296 if (riscv_has_ext(env, RVH)) {
1297 env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP;
1298 }
1299 #endif
1300
1301 return true;
1302 }
1303
1304 typedef struct RISCVCPUMisaExtConfig {
1305 target_ulong misa_bit;
1306 bool enabled;
1307 } RISCVCPUMisaExtConfig;
1308
cpu_set_misa_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1309 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
1310 void *opaque, Error **errp)
1311 {
1312 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque;
1313 target_ulong misa_bit = misa_ext_cfg->misa_bit;
1314 RISCVCPU *cpu = RISCV_CPU(obj);
1315 CPURISCVState *env = &cpu->env;
1316 bool vendor_cpu = riscv_cpu_is_vendor(obj);
1317 bool prev_val, value;
1318
1319 if (!visit_type_bool(v, name, &value, errp)) {
1320 return;
1321 }
1322
1323 cpu_misa_ext_add_user_opt(misa_bit, value);
1324
1325 prev_val = env->misa_ext & misa_bit;
1326
1327 if (value == prev_val) {
1328 return;
1329 }
1330
1331 if (value) {
1332 if (vendor_cpu) {
1333 g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1334 error_setg(errp, "'%s' CPU does not allow enabling extensions",
1335 cpuname);
1336 return;
1337 }
1338
1339 if (misa_bit == RVH && env->priv_ver < PRIV_VERSION_1_12_0) {
1340 /*
1341 * Note: the 'priv_spec' command line option, if present,
1342 * will take precedence over this priv_ver bump.
1343 */
1344 env->priv_ver = PRIV_VERSION_1_12_0;
1345 }
1346 }
1347
1348 riscv_cpu_write_misa_bit(cpu, misa_bit, value);
1349 }
1350
cpu_get_misa_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1351 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
1352 void *opaque, Error **errp)
1353 {
1354 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque;
1355 target_ulong misa_bit = misa_ext_cfg->misa_bit;
1356 RISCVCPU *cpu = RISCV_CPU(obj);
1357 CPURISCVState *env = &cpu->env;
1358 bool value;
1359
1360 value = env->misa_ext & misa_bit;
1361
1362 visit_type_bool(v, name, &value, errp);
1363 }
1364
1365 #define MISA_CFG(_bit, _enabled) \
1366 {.misa_bit = _bit, .enabled = _enabled}
1367
1368 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = {
1369 MISA_CFG(RVA, true),
1370 MISA_CFG(RVC, true),
1371 MISA_CFG(RVD, true),
1372 MISA_CFG(RVF, true),
1373 MISA_CFG(RVI, true),
1374 MISA_CFG(RVE, false),
1375 MISA_CFG(RVM, true),
1376 MISA_CFG(RVS, true),
1377 MISA_CFG(RVU, true),
1378 MISA_CFG(RVH, true),
1379 MISA_CFG(RVV, false),
1380 MISA_CFG(RVG, false),
1381 MISA_CFG(RVB, false),
1382 };
1383
1384 /*
1385 * We do not support user choice tracking for MISA
1386 * extensions yet because, so far, we do not silently
1387 * change MISA bits during realize() (RVG enables MISA
1388 * bits but the user is warned about it).
1389 */
riscv_cpu_add_misa_properties(Object * cpu_obj)1390 static void riscv_cpu_add_misa_properties(Object *cpu_obj)
1391 {
1392 bool use_def_vals = riscv_cpu_is_generic(cpu_obj);
1393 int i;
1394
1395 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) {
1396 const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i];
1397 int bit = misa_cfg->misa_bit;
1398 const char *name = riscv_get_misa_ext_name(bit);
1399 const char *desc = riscv_get_misa_ext_description(bit);
1400
1401 /* Check if KVM already created the property */
1402 if (object_property_find(cpu_obj, name)) {
1403 continue;
1404 }
1405
1406 object_property_add(cpu_obj, name, "bool",
1407 cpu_get_misa_ext_cfg,
1408 cpu_set_misa_ext_cfg,
1409 NULL, (void *)misa_cfg);
1410 object_property_set_description(cpu_obj, name, desc);
1411 if (use_def_vals) {
1412 riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj), bit,
1413 misa_cfg->enabled);
1414 }
1415 }
1416 }
1417
cpu_set_profile(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1418 static void cpu_set_profile(Object *obj, Visitor *v, const char *name,
1419 void *opaque, Error **errp)
1420 {
1421 RISCVCPUProfile *profile = opaque;
1422 RISCVCPU *cpu = RISCV_CPU(obj);
1423 bool value;
1424
1425 if (riscv_cpu_is_vendor(obj)) {
1426 error_setg(errp, "Profile %s is not available for vendor CPUs",
1427 profile->name);
1428 return;
1429 }
1430
1431 if (cpu->env.misa_mxl != MXL_RV64) {
1432 error_setg(errp, "Profile %s only available for 64 bit CPUs",
1433 profile->name);
1434 return;
1435 }
1436
1437 if (!visit_type_bool(v, name, &value, errp)) {
1438 return;
1439 }
1440
1441 profile->user_set = true;
1442
1443 riscv_cpu_set_profile(cpu, profile, value);
1444 }
1445
cpu_get_profile(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1446 static void cpu_get_profile(Object *obj, Visitor *v, const char *name,
1447 void *opaque, Error **errp)
1448 {
1449 RISCVCPUProfile *profile = opaque;
1450 bool value = profile->enabled;
1451
1452 visit_type_bool(v, name, &value, errp);
1453 }
1454
riscv_cpu_add_profiles(Object * cpu_obj)1455 static void riscv_cpu_add_profiles(Object *cpu_obj)
1456 {
1457 for (int i = 0; riscv_profiles[i] != NULL; i++) {
1458 RISCVCPUProfile *profile = riscv_profiles[i];
1459
1460 object_property_add(cpu_obj, profile->name, "bool",
1461 cpu_get_profile, cpu_set_profile,
1462 NULL, (void *)profile);
1463
1464 /*
1465 * CPUs might enable a profile right from the start.
1466 * Enable its mandatory extensions right away in this
1467 * case.
1468 */
1469 if (profile->enabled) {
1470 riscv_cpu_set_profile(RISCV_CPU(cpu_obj), profile, true);
1471 }
1472 }
1473 }
1474
cpu_set_multi_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1475 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name,
1476 void *opaque, Error **errp)
1477 {
1478 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque;
1479 RISCVCPU *cpu = RISCV_CPU(obj);
1480 bool vendor_cpu = riscv_cpu_is_vendor(obj);
1481 bool prev_val, value;
1482
1483 if (!visit_type_bool(v, name, &value, errp)) {
1484 return;
1485 }
1486
1487 cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value);
1488
1489 prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset);
1490
1491 if (value == prev_val) {
1492 return;
1493 }
1494
1495 if (value && vendor_cpu) {
1496 g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1497 error_setg(errp, "'%s' CPU does not allow enabling extensions",
1498 cpuname);
1499 return;
1500 }
1501
1502 if (value) {
1503 cpu_bump_multi_ext_priv_ver(&cpu->env, multi_ext_cfg->offset);
1504 }
1505
1506 isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value);
1507 }
1508
cpu_get_multi_ext_cfg(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1509 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name,
1510 void *opaque, Error **errp)
1511 {
1512 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque;
1513 bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset);
1514
1515 visit_type_bool(v, name, &value, errp);
1516 }
1517
cpu_add_multi_ext_prop(Object * cpu_obj,const RISCVCPUMultiExtConfig * multi_cfg)1518 static void cpu_add_multi_ext_prop(Object *cpu_obj,
1519 const RISCVCPUMultiExtConfig *multi_cfg)
1520 {
1521 bool generic_cpu = riscv_cpu_is_generic(cpu_obj);
1522
1523 object_property_add(cpu_obj, multi_cfg->name, "bool",
1524 cpu_get_multi_ext_cfg,
1525 cpu_set_multi_ext_cfg,
1526 NULL, (void *)multi_cfg);
1527
1528 if (!generic_cpu) {
1529 return;
1530 }
1531
1532 /*
1533 * Set def val directly instead of using
1534 * object_property_set_bool() to save the set()
1535 * callback hash for user inputs.
1536 */
1537 isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset,
1538 multi_cfg->enabled);
1539 }
1540
riscv_cpu_add_multiext_prop_array(Object * obj,const RISCVCPUMultiExtConfig * array)1541 static void riscv_cpu_add_multiext_prop_array(Object *obj,
1542 const RISCVCPUMultiExtConfig *array)
1543 {
1544 const RISCVCPUMultiExtConfig *prop;
1545
1546 g_assert(array);
1547
1548 for (prop = array; prop && prop->name; prop++) {
1549 cpu_add_multi_ext_prop(obj, prop);
1550 }
1551 }
1552
1553 /*
1554 * Add CPU properties with user-facing flags.
1555 *
1556 * This will overwrite existing env->misa_ext values with the
1557 * defaults set via riscv_cpu_add_misa_properties().
1558 */
riscv_cpu_add_user_properties(Object * obj)1559 static void riscv_cpu_add_user_properties(Object *obj)
1560 {
1561 #ifndef CONFIG_USER_ONLY
1562 riscv_add_satp_mode_properties(obj);
1563 #endif
1564
1565 riscv_cpu_add_misa_properties(obj);
1566
1567 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions);
1568 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts);
1569 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts);
1570
1571 riscv_cpu_add_profiles(obj);
1572 }
1573
1574 /*
1575 * The 'max' type CPU will have all possible ratified
1576 * non-vendor extensions enabled.
1577 */
riscv_init_max_cpu_extensions(Object * obj)1578 static void riscv_init_max_cpu_extensions(Object *obj)
1579 {
1580 RISCVCPU *cpu = RISCV_CPU(obj);
1581 CPURISCVState *env = &cpu->env;
1582 const RISCVCPUMultiExtConfig *prop;
1583
1584 /* Enable RVG and RVV that are disabled by default */
1585 riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVV);
1586
1587 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
1588 isa_ext_update_enabled(cpu, prop->offset, true);
1589 }
1590
1591 /*
1592 * Some extensions can't be added without backward compatibilty concerns.
1593 * Disable those, the user can still opt in to them on the command line.
1594 */
1595 cpu->cfg.ext_svade = false;
1596
1597 /* set vector version */
1598 env->vext_ver = VEXT_VERSION_1_00_0;
1599
1600 /* Zfinx is not compatible with F. Disable it */
1601 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false);
1602 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false);
1603 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false);
1604 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false);
1605
1606 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false);
1607 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false);
1608 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false);
1609
1610 if (env->misa_mxl != MXL_RV32) {
1611 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false);
1612 } else {
1613 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_svrsw60t59b), false);
1614 }
1615
1616 /*
1617 * TODO: ext_smrnmi requires OpenSBI changes that our current
1618 * image does not have. Disable it for now.
1619 */
1620 if (cpu->cfg.ext_smrnmi) {
1621 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smrnmi), false);
1622 }
1623
1624 /*
1625 * TODO: ext_smdbltrp requires the firmware to clear MSTATUS.MDT on startup
1626 * to avoid generating a double trap. OpenSBI does not currently support it,
1627 * disable it for now.
1628 */
1629 if (cpu->cfg.ext_smdbltrp) {
1630 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smdbltrp), false);
1631 }
1632 }
1633
riscv_cpu_has_max_extensions(Object * cpu_obj)1634 static bool riscv_cpu_has_max_extensions(Object *cpu_obj)
1635 {
1636 return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL;
1637 }
1638
riscv_tcg_cpu_instance_init(CPUState * cs)1639 static void riscv_tcg_cpu_instance_init(CPUState *cs)
1640 {
1641 RISCVCPU *cpu = RISCV_CPU(cs);
1642 Object *obj = OBJECT(cpu);
1643
1644 misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal);
1645 multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal);
1646
1647 if (!misa_ext_implied_rules) {
1648 misa_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal);
1649 }
1650
1651 if (!multi_ext_implied_rules) {
1652 multi_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal);
1653 }
1654
1655 riscv_cpu_add_user_properties(obj);
1656
1657 if (riscv_cpu_has_max_extensions(obj)) {
1658 riscv_init_max_cpu_extensions(obj);
1659 }
1660 }
1661
riscv_tcg_cpu_accel_class_init(ObjectClass * oc,const void * data)1662 static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, const void *data)
1663 {
1664 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
1665
1666 acc->cpu_instance_init = riscv_tcg_cpu_instance_init;
1667 acc->cpu_target_realize = riscv_tcg_cpu_realize;
1668 }
1669
1670 static const TypeInfo riscv_tcg_cpu_accel_type_info = {
1671 .name = ACCEL_CPU_NAME("tcg"),
1672
1673 .parent = TYPE_ACCEL_CPU,
1674 .class_init = riscv_tcg_cpu_accel_class_init,
1675 .abstract = true,
1676 };
1677
riscv_tcg_cpu_accel_register_types(void)1678 static void riscv_tcg_cpu_accel_register_types(void)
1679 {
1680 type_register_static(&riscv_tcg_cpu_accel_type_info);
1681 }
1682 type_init(riscv_tcg_cpu_accel_register_types);
1683