1 /* 2 * riscv TCG cpu class initialization 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/exec-all.h" 22 #include "tcg-cpu.h" 23 #include "cpu.h" 24 #include "internals.h" 25 #include "pmu.h" 26 #include "time_helper.h" 27 #include "qapi/error.h" 28 #include "qapi/visitor.h" 29 #include "qemu/accel.h" 30 #include "qemu/error-report.h" 31 #include "qemu/log.h" 32 #include "hw/core/accel-cpu.h" 33 #include "hw/core/tcg-cpu-ops.h" 34 #include "tcg/tcg.h" 35 #ifndef CONFIG_USER_ONLY 36 #include "hw/boards.h" 37 #endif 38 39 /* Hash that stores user set extensions */ 40 static GHashTable *multi_ext_user_opts; 41 static GHashTable *misa_ext_user_opts; 42 43 static GHashTable *multi_ext_implied_rules; 44 static GHashTable *misa_ext_implied_rules; 45 46 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset) 47 { 48 return g_hash_table_contains(multi_ext_user_opts, 49 GUINT_TO_POINTER(ext_offset)); 50 } 51 52 static bool cpu_misa_ext_is_user_set(uint32_t misa_bit) 53 { 54 return g_hash_table_contains(misa_ext_user_opts, 55 GUINT_TO_POINTER(misa_bit)); 56 } 57 58 static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset, bool value) 59 { 60 g_hash_table_insert(multi_ext_user_opts, GUINT_TO_POINTER(ext_offset), 61 (gpointer)value); 62 } 63 64 static void cpu_misa_ext_add_user_opt(uint32_t bit, bool value) 65 { 66 g_hash_table_insert(misa_ext_user_opts, GUINT_TO_POINTER(bit), 67 (gpointer)value); 68 } 69 70 static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit, 71 bool enabled) 72 { 73 CPURISCVState *env = &cpu->env; 74 75 if (enabled) { 76 env->misa_ext |= bit; 77 env->misa_ext_mask |= bit; 78 } else { 79 env->misa_ext &= ~bit; 80 env->misa_ext_mask &= ~bit; 81 } 82 } 83 84 static const char *cpu_priv_ver_to_str(int priv_ver) 85 { 86 const char *priv_spec_str = priv_spec_to_str(priv_ver); 87 88 g_assert(priv_spec_str); 89 90 return priv_spec_str; 91 } 92 93 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 94 const TranslationBlock *tb) 95 { 96 if (!(tb_cflags(tb) & CF_PCREL)) { 97 RISCVCPU *cpu = RISCV_CPU(cs); 98 CPURISCVState *env = &cpu->env; 99 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 100 101 tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL)); 102 103 if (xl == MXL_RV32) { 104 env->pc = (int32_t) tb->pc; 105 } else { 106 env->pc = tb->pc; 107 } 108 } 109 } 110 111 static void riscv_restore_state_to_opc(CPUState *cs, 112 const TranslationBlock *tb, 113 const uint64_t *data) 114 { 115 RISCVCPU *cpu = RISCV_CPU(cs); 116 CPURISCVState *env = &cpu->env; 117 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 118 target_ulong pc; 119 120 if (tb_cflags(tb) & CF_PCREL) { 121 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 122 } else { 123 pc = data[0]; 124 } 125 126 if (xl == MXL_RV32) { 127 env->pc = (int32_t)pc; 128 } else { 129 env->pc = pc; 130 } 131 env->bins = data[1]; 132 } 133 134 static const TCGCPUOps riscv_tcg_ops = { 135 .initialize = riscv_translate_init, 136 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 137 .restore_state_to_opc = riscv_restore_state_to_opc, 138 139 #ifndef CONFIG_USER_ONLY 140 .tlb_fill = riscv_cpu_tlb_fill, 141 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 142 .cpu_exec_halt = riscv_cpu_has_work, 143 .do_interrupt = riscv_cpu_do_interrupt, 144 .do_transaction_failed = riscv_cpu_do_transaction_failed, 145 .do_unaligned_access = riscv_cpu_do_unaligned_access, 146 .debug_excp_handler = riscv_cpu_debug_excp_handler, 147 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 148 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 149 #endif /* !CONFIG_USER_ONLY */ 150 }; 151 152 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset) 153 { 154 const RISCVIsaExtData *edata; 155 156 for (edata = isa_edata_arr; edata && edata->name; edata++) { 157 if (edata->ext_enable_offset != ext_offset) { 158 continue; 159 } 160 161 return edata->min_version; 162 } 163 164 g_assert_not_reached(); 165 } 166 167 static const char *cpu_cfg_ext_get_name(uint32_t ext_offset) 168 { 169 const RISCVCPUMultiExtConfig *feat; 170 const RISCVIsaExtData *edata; 171 172 for (edata = isa_edata_arr; edata->name != NULL; edata++) { 173 if (edata->ext_enable_offset == ext_offset) { 174 return edata->name; 175 } 176 } 177 178 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { 179 if (feat->offset == ext_offset) { 180 return feat->name; 181 } 182 } 183 184 g_assert_not_reached(); 185 } 186 187 static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset) 188 { 189 const RISCVCPUMultiExtConfig *feat; 190 191 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { 192 if (feat->offset == ext_offset) { 193 return true; 194 } 195 } 196 197 return false; 198 } 199 200 static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset) 201 { 202 /* 203 * All other named features are already enabled 204 * in riscv_tcg_cpu_instance_init(). 205 */ 206 if (feat_offset == CPU_CFG_OFFSET(ext_zic64b)) { 207 cpu->cfg.cbom_blocksize = 64; 208 cpu->cfg.cbop_blocksize = 64; 209 cpu->cfg.cboz_blocksize = 64; 210 } 211 } 212 213 static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env, 214 uint32_t ext_offset) 215 { 216 int ext_priv_ver; 217 218 if (env->priv_ver == PRIV_VERSION_LATEST) { 219 return; 220 } 221 222 ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset); 223 224 if (env->priv_ver < ext_priv_ver) { 225 /* 226 * Note: the 'priv_spec' command line option, if present, 227 * will take precedence over this priv_ver bump. 228 */ 229 env->priv_ver = ext_priv_ver; 230 } 231 } 232 233 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset, 234 bool value) 235 { 236 CPURISCVState *env = &cpu->env; 237 bool prev_val = isa_ext_is_enabled(cpu, ext_offset); 238 int min_version; 239 240 if (prev_val == value) { 241 return; 242 } 243 244 if (cpu_cfg_ext_is_user_set(ext_offset)) { 245 return; 246 } 247 248 if (value && env->priv_ver != PRIV_VERSION_LATEST) { 249 /* Do not enable it if priv_ver is older than min_version */ 250 min_version = cpu_cfg_ext_get_min_version(ext_offset); 251 if (env->priv_ver < min_version) { 252 return; 253 } 254 } 255 256 isa_ext_update_enabled(cpu, ext_offset, value); 257 } 258 259 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 260 { 261 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 262 error_setg(errp, "H extension requires priv spec 1.12.0"); 263 return; 264 } 265 } 266 267 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 268 Error **errp) 269 { 270 uint32_t vlen = cfg->vlenb << 3; 271 272 if (vlen > RV_VLEN_MAX || vlen < 128) { 273 error_setg(errp, 274 "Vector extension implementation only supports VLEN " 275 "in the range [128, %d]", RV_VLEN_MAX); 276 return; 277 } 278 279 if (cfg->elen > 64 || cfg->elen < 8) { 280 error_setg(errp, 281 "Vector extension implementation only supports ELEN " 282 "in the range [8, 64]"); 283 return; 284 } 285 } 286 287 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 288 { 289 CPURISCVState *env = &cpu->env; 290 const RISCVIsaExtData *edata; 291 292 /* Force disable extensions if priv spec version does not match */ 293 for (edata = isa_edata_arr; edata && edata->name; edata++) { 294 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset) && 295 (env->priv_ver < edata->min_version)) { 296 /* 297 * These two extensions are always enabled as they were supported 298 * by QEMU before they were added as extensions in the ISA. 299 */ 300 if (!strcmp(edata->name, "zicntr") || 301 !strcmp(edata->name, "zihpm")) { 302 continue; 303 } 304 305 isa_ext_update_enabled(cpu, edata->ext_enable_offset, false); 306 #ifndef CONFIG_USER_ONLY 307 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 308 " because privilege spec version does not match", 309 edata->name, env->mhartid); 310 #else 311 warn_report("disabling %s extension because " 312 "privilege spec version does not match", 313 edata->name); 314 #endif 315 } 316 } 317 } 318 319 static void riscv_cpu_update_named_features(RISCVCPU *cpu) 320 { 321 if (cpu->env.priv_ver >= PRIV_VERSION_1_11_0) { 322 cpu->cfg.has_priv_1_11 = true; 323 } 324 325 if (cpu->env.priv_ver >= PRIV_VERSION_1_12_0) { 326 cpu->cfg.has_priv_1_12 = true; 327 } 328 329 if (cpu->env.priv_ver >= PRIV_VERSION_1_13_0) { 330 cpu->cfg.has_priv_1_13 = true; 331 } 332 333 /* zic64b is 1.12 or later */ 334 cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 && 335 cpu->cfg.cbop_blocksize == 64 && 336 cpu->cfg.cboz_blocksize == 64 && 337 cpu->cfg.has_priv_1_12; 338 } 339 340 static void riscv_cpu_validate_g(RISCVCPU *cpu) 341 { 342 const char *warn_msg = "RVG mandates disabled extension %s"; 343 uint32_t g_misa_bits[] = {RVI, RVM, RVA, RVF, RVD}; 344 bool send_warn = cpu_misa_ext_is_user_set(RVG); 345 346 for (int i = 0; i < ARRAY_SIZE(g_misa_bits); i++) { 347 uint32_t bit = g_misa_bits[i]; 348 349 if (riscv_has_ext(&cpu->env, bit)) { 350 continue; 351 } 352 353 if (!cpu_misa_ext_is_user_set(bit)) { 354 riscv_cpu_write_misa_bit(cpu, bit, true); 355 continue; 356 } 357 358 if (send_warn) { 359 warn_report(warn_msg, riscv_get_misa_ext_name(bit)); 360 } 361 } 362 363 if (!cpu->cfg.ext_zicsr) { 364 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr))) { 365 cpu->cfg.ext_zicsr = true; 366 } else if (send_warn) { 367 warn_report(warn_msg, "zicsr"); 368 } 369 } 370 371 if (!cpu->cfg.ext_zifencei) { 372 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei))) { 373 cpu->cfg.ext_zifencei = true; 374 } else if (send_warn) { 375 warn_report(warn_msg, "zifencei"); 376 } 377 } 378 } 379 380 static void riscv_cpu_validate_b(RISCVCPU *cpu) 381 { 382 const char *warn_msg = "RVB mandates disabled extension %s"; 383 384 if (!cpu->cfg.ext_zba) { 385 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zba))) { 386 cpu->cfg.ext_zba = true; 387 } else { 388 warn_report(warn_msg, "zba"); 389 } 390 } 391 392 if (!cpu->cfg.ext_zbb) { 393 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbb))) { 394 cpu->cfg.ext_zbb = true; 395 } else { 396 warn_report(warn_msg, "zbb"); 397 } 398 } 399 400 if (!cpu->cfg.ext_zbs) { 401 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbs))) { 402 cpu->cfg.ext_zbs = true; 403 } else { 404 warn_report(warn_msg, "zbs"); 405 } 406 } 407 } 408 409 /* 410 * Check consistency between chosen extensions while setting 411 * cpu->cfg accordingly. 412 */ 413 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 414 { 415 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 416 CPURISCVState *env = &cpu->env; 417 Error *local_err = NULL; 418 419 if (riscv_has_ext(env, RVG)) { 420 riscv_cpu_validate_g(cpu); 421 } 422 423 if (riscv_has_ext(env, RVB)) { 424 riscv_cpu_validate_b(cpu); 425 } 426 427 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 428 error_setg(errp, 429 "I and E extensions are incompatible"); 430 return; 431 } 432 433 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 434 error_setg(errp, 435 "Either I or E extension must be set"); 436 return; 437 } 438 439 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 440 error_setg(errp, 441 "Setting S extension without U extension is illegal"); 442 return; 443 } 444 445 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 446 error_setg(errp, 447 "H depends on an I base integer ISA with 32 x registers"); 448 return; 449 } 450 451 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 452 error_setg(errp, "H extension implicitly requires S-mode"); 453 return; 454 } 455 456 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) { 457 error_setg(errp, "F extension requires Zicsr"); 458 return; 459 } 460 461 if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) { 462 error_setg(errp, "Zacas extension requires A extension"); 463 return; 464 } 465 466 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 467 error_setg(errp, "Zawrs extension requires A extension"); 468 return; 469 } 470 471 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 472 error_setg(errp, "Zfa extension requires F extension"); 473 return; 474 } 475 476 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 477 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 478 return; 479 } 480 481 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 482 error_setg(errp, "Zfbfmin extension depends on F extension"); 483 return; 484 } 485 486 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 487 error_setg(errp, "D extension requires F extension"); 488 return; 489 } 490 491 if (riscv_has_ext(env, RVV)) { 492 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 493 if (local_err != NULL) { 494 error_propagate(errp, local_err); 495 return; 496 } 497 } 498 499 /* The Zve64d extension depends on the Zve64f extension */ 500 if (cpu->cfg.ext_zve64d) { 501 if (!riscv_has_ext(env, RVD)) { 502 error_setg(errp, "Zve64d/V extensions require D extension"); 503 return; 504 } 505 } 506 507 /* The Zve32f extension depends on the Zve32x extension */ 508 if (cpu->cfg.ext_zve32f) { 509 if (!riscv_has_ext(env, RVF)) { 510 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 511 return; 512 } 513 } 514 515 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 516 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 517 return; 518 } 519 520 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 521 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 522 return; 523 } 524 525 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 526 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 527 return; 528 } 529 530 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 531 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 532 return; 533 } 534 535 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 536 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 537 return; 538 } 539 540 if (cpu->cfg.ext_zfinx) { 541 if (!cpu->cfg.ext_zicsr) { 542 error_setg(errp, "Zfinx extension requires Zicsr"); 543 return; 544 } 545 if (riscv_has_ext(env, RVF)) { 546 error_setg(errp, 547 "Zfinx cannot be supported together with F extension"); 548 return; 549 } 550 } 551 552 if (cpu->cfg.ext_zcmop && !cpu->cfg.ext_zca) { 553 error_setg(errp, "Zcmop extensions require Zca"); 554 return; 555 } 556 557 if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 558 error_setg(errp, "Zcf extension is only relevant to RV32"); 559 return; 560 } 561 562 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 563 error_setg(errp, "Zcf extension requires F extension"); 564 return; 565 } 566 567 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 568 error_setg(errp, "Zcd extension requires D extension"); 569 return; 570 } 571 572 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 573 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 574 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 575 "extension"); 576 return; 577 } 578 579 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 580 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 581 "Zcd extension"); 582 return; 583 } 584 585 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) { 586 error_setg(errp, "Zcmt extension requires Zicsr extension"); 587 return; 588 } 589 590 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg || 591 cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || 592 cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32x) { 593 error_setg(errp, 594 "Vector crypto extensions require V or Zve* extensions"); 595 return; 596 } 597 598 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64x) { 599 error_setg( 600 errp, 601 "Zvbc and Zvknhb extensions require V or Zve64x extensions"); 602 return; 603 } 604 605 if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) { 606 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) { 607 error_setg(errp, "zicntr requires zicsr"); 608 return; 609 } 610 cpu->cfg.ext_zicntr = false; 611 } 612 613 if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) { 614 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) { 615 error_setg(errp, "zihpm requires zicsr"); 616 return; 617 } 618 cpu->cfg.ext_zihpm = false; 619 } 620 621 if (!cpu->cfg.ext_zihpm) { 622 cpu->cfg.pmu_mask = 0; 623 cpu->pmu_avail_ctrs = 0; 624 } 625 626 /* 627 * Disable isa extensions based on priv spec after we 628 * validated and set everything we need. 629 */ 630 riscv_cpu_disable_priv_spec_isa_exts(cpu); 631 } 632 633 #ifndef CONFIG_USER_ONLY 634 static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu, 635 RISCVCPUProfile *profile, 636 bool send_warn) 637 { 638 int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 639 640 if (profile->satp_mode > satp_max) { 641 if (send_warn) { 642 bool is_32bit = riscv_cpu_is_32bit(cpu); 643 const char *req_satp = satp_mode_str(profile->satp_mode, is_32bit); 644 const char *cur_satp = satp_mode_str(satp_max, is_32bit); 645 646 warn_report("Profile %s requires satp mode %s, " 647 "but satp mode %s was set", profile->name, 648 req_satp, cur_satp); 649 } 650 651 return false; 652 } 653 654 return true; 655 } 656 #endif 657 658 static void riscv_cpu_validate_profile(RISCVCPU *cpu, 659 RISCVCPUProfile *profile) 660 { 661 CPURISCVState *env = &cpu->env; 662 const char *warn_msg = "Profile %s mandates disabled extension %s"; 663 bool send_warn = profile->user_set && profile->enabled; 664 bool parent_enabled, profile_impl = true; 665 int i; 666 667 #ifndef CONFIG_USER_ONLY 668 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { 669 profile_impl = riscv_cpu_validate_profile_satp(cpu, profile, 670 send_warn); 671 } 672 #endif 673 674 if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED && 675 profile->priv_spec != env->priv_ver) { 676 profile_impl = false; 677 678 if (send_warn) { 679 warn_report("Profile %s requires priv spec %s, " 680 "but priv ver %s was set", profile->name, 681 cpu_priv_ver_to_str(profile->priv_spec), 682 cpu_priv_ver_to_str(env->priv_ver)); 683 } 684 } 685 686 for (i = 0; misa_bits[i] != 0; i++) { 687 uint32_t bit = misa_bits[i]; 688 689 if (!(profile->misa_ext & bit)) { 690 continue; 691 } 692 693 if (!riscv_has_ext(&cpu->env, bit)) { 694 profile_impl = false; 695 696 if (send_warn) { 697 warn_report(warn_msg, profile->name, 698 riscv_get_misa_ext_name(bit)); 699 } 700 } 701 } 702 703 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { 704 int ext_offset = profile->ext_offsets[i]; 705 706 if (!isa_ext_is_enabled(cpu, ext_offset)) { 707 profile_impl = false; 708 709 if (send_warn) { 710 warn_report(warn_msg, profile->name, 711 cpu_cfg_ext_get_name(ext_offset)); 712 } 713 } 714 } 715 716 profile->enabled = profile_impl; 717 718 if (profile->parent != NULL) { 719 parent_enabled = object_property_get_bool(OBJECT(cpu), 720 profile->parent->name, 721 NULL); 722 profile->enabled = profile->enabled && parent_enabled; 723 } 724 } 725 726 static void riscv_cpu_validate_profiles(RISCVCPU *cpu) 727 { 728 for (int i = 0; riscv_profiles[i] != NULL; i++) { 729 riscv_cpu_validate_profile(cpu, riscv_profiles[i]); 730 } 731 } 732 733 static void riscv_cpu_init_implied_exts_rules(void) 734 { 735 RISCVCPUImpliedExtsRule *rule; 736 #ifndef CONFIG_USER_ONLY 737 MachineState *ms = MACHINE(qdev_get_machine()); 738 #endif 739 static bool initialized; 740 int i; 741 742 /* Implied rules only need to be initialized once. */ 743 if (initialized) { 744 return; 745 } 746 747 for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) { 748 #ifndef CONFIG_USER_ONLY 749 rule->enabled = bitmap_new(ms->smp.cpus); 750 #endif 751 g_hash_table_insert(misa_ext_implied_rules, 752 GUINT_TO_POINTER(rule->ext), (gpointer)rule); 753 } 754 755 for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) { 756 #ifndef CONFIG_USER_ONLY 757 rule->enabled = bitmap_new(ms->smp.cpus); 758 #endif 759 g_hash_table_insert(multi_ext_implied_rules, 760 GUINT_TO_POINTER(rule->ext), (gpointer)rule); 761 } 762 763 initialized = true; 764 } 765 766 static void cpu_enable_implied_rule(RISCVCPU *cpu, 767 RISCVCPUImpliedExtsRule *rule) 768 { 769 CPURISCVState *env = &cpu->env; 770 RISCVCPUImpliedExtsRule *ir; 771 bool enabled = false; 772 int i; 773 774 #ifndef CONFIG_USER_ONLY 775 enabled = test_bit(cpu->env.mhartid, rule->enabled); 776 #endif 777 778 if (!enabled) { 779 /* Enable the implied MISAs. */ 780 if (rule->implied_misa_exts) { 781 for (i = 0; misa_bits[i] != 0; i++) { 782 if (rule->implied_misa_exts & misa_bits[i]) { 783 /* 784 * If the user disabled the misa_bit do not re-enable it 785 * and do not apply any implied rules related to it. 786 */ 787 if (cpu_misa_ext_is_user_set(misa_bits[i]) && 788 !(env->misa_ext & misa_bits[i])) { 789 continue; 790 } 791 792 riscv_cpu_set_misa_ext(env, env->misa_ext | misa_bits[i]); 793 ir = g_hash_table_lookup(misa_ext_implied_rules, 794 GUINT_TO_POINTER(misa_bits[i])); 795 796 if (ir) { 797 cpu_enable_implied_rule(cpu, ir); 798 } 799 } 800 } 801 } 802 803 /* Enable the implied extensions. */ 804 for (i = 0; 805 rule->implied_multi_exts[i] != RISCV_IMPLIED_EXTS_RULE_END; i++) { 806 cpu_cfg_ext_auto_update(cpu, rule->implied_multi_exts[i], true); 807 808 ir = g_hash_table_lookup(multi_ext_implied_rules, 809 GUINT_TO_POINTER( 810 rule->implied_multi_exts[i])); 811 812 if (ir) { 813 cpu_enable_implied_rule(cpu, ir); 814 } 815 } 816 817 #ifndef CONFIG_USER_ONLY 818 bitmap_set(rule->enabled, cpu->env.mhartid, 1); 819 #endif 820 } 821 } 822 823 /* Zc extension has special implied rules that need to be handled separately. */ 824 static void cpu_enable_zc_implied_rules(RISCVCPU *cpu) 825 { 826 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 827 CPURISCVState *env = &cpu->env; 828 829 if (cpu->cfg.ext_zce) { 830 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 831 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true); 832 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); 833 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); 834 835 if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { 836 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 837 } 838 } 839 840 /* Zca, Zcd and Zcf has a PRIV 1.12.0 restriction */ 841 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 842 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 843 844 if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { 845 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 846 } 847 848 if (riscv_has_ext(env, RVD)) { 849 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true); 850 } 851 } 852 } 853 854 static void riscv_cpu_enable_implied_rules(RISCVCPU *cpu) 855 { 856 RISCVCPUImpliedExtsRule *rule; 857 int i; 858 859 /* Enable the implied extensions for Zc. */ 860 cpu_enable_zc_implied_rules(cpu); 861 862 /* Enable the implied MISAs. */ 863 for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) { 864 if (riscv_has_ext(&cpu->env, rule->ext)) { 865 cpu_enable_implied_rule(cpu, rule); 866 } 867 } 868 869 /* Enable the implied extensions. */ 870 for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) { 871 if (isa_ext_is_enabled(cpu, rule->ext)) { 872 cpu_enable_implied_rule(cpu, rule); 873 } 874 } 875 } 876 877 void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 878 { 879 CPURISCVState *env = &cpu->env; 880 Error *local_err = NULL; 881 882 riscv_cpu_init_implied_exts_rules(); 883 riscv_cpu_enable_implied_rules(cpu); 884 885 riscv_cpu_validate_misa_priv(env, &local_err); 886 if (local_err != NULL) { 887 error_propagate(errp, local_err); 888 return; 889 } 890 891 riscv_cpu_update_named_features(cpu); 892 riscv_cpu_validate_profiles(cpu); 893 894 if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) { 895 /* 896 * Enhanced PMP should only be available 897 * on harts with PMP support 898 */ 899 error_setg(errp, "Invalid configuration: Smepmp requires PMP support"); 900 return; 901 } 902 903 riscv_cpu_validate_set_extensions(cpu, &local_err); 904 if (local_err != NULL) { 905 error_propagate(errp, local_err); 906 return; 907 } 908 } 909 910 void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu) 911 { 912 GPtrArray *dynamic_decoders; 913 dynamic_decoders = g_ptr_array_sized_new(decoder_table_size); 914 for (size_t i = 0; i < decoder_table_size; ++i) { 915 if (decoder_table[i].guard_func && 916 decoder_table[i].guard_func(&cpu->cfg)) { 917 g_ptr_array_add(dynamic_decoders, 918 (gpointer)decoder_table[i].riscv_cpu_decode_fn); 919 } 920 } 921 922 cpu->decoders = dynamic_decoders; 923 } 924 925 bool riscv_cpu_tcg_compatible(RISCVCPU *cpu) 926 { 927 return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL; 928 } 929 930 static bool riscv_cpu_is_generic(Object *cpu_obj) 931 { 932 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 933 } 934 935 /* 936 * We'll get here via the following path: 937 * 938 * riscv_cpu_realize() 939 * -> cpu_exec_realizefn() 940 * -> tcg_cpu_realize() (via accel_cpu_common_realize()) 941 */ 942 static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp) 943 { 944 RISCVCPU *cpu = RISCV_CPU(cs); 945 946 if (!riscv_cpu_tcg_compatible(cpu)) { 947 g_autofree char *name = riscv_cpu_get_name(cpu); 948 error_setg(errp, "'%s' CPU is not compatible with TCG acceleration", 949 name); 950 return false; 951 } 952 953 #ifndef CONFIG_USER_ONLY 954 CPURISCVState *env = &cpu->env; 955 Error *local_err = NULL; 956 957 tcg_cflags_set(CPU(cs), CF_PCREL); 958 959 if (cpu->cfg.ext_sstc) { 960 riscv_timer_init(cpu); 961 } 962 963 if (cpu->cfg.pmu_mask) { 964 riscv_pmu_init(cpu, &local_err); 965 if (local_err != NULL) { 966 error_propagate(errp, local_err); 967 return false; 968 } 969 970 if (cpu->cfg.ext_sscofpmf) { 971 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 972 riscv_pmu_timer_cb, cpu); 973 } 974 } 975 976 /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */ 977 if (riscv_has_ext(env, RVH)) { 978 env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP; 979 } 980 #endif 981 982 return true; 983 } 984 985 typedef struct RISCVCPUMisaExtConfig { 986 target_ulong misa_bit; 987 bool enabled; 988 } RISCVCPUMisaExtConfig; 989 990 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 991 void *opaque, Error **errp) 992 { 993 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 994 target_ulong misa_bit = misa_ext_cfg->misa_bit; 995 RISCVCPU *cpu = RISCV_CPU(obj); 996 CPURISCVState *env = &cpu->env; 997 bool vendor_cpu = riscv_cpu_is_vendor(obj); 998 bool prev_val, value; 999 1000 if (!visit_type_bool(v, name, &value, errp)) { 1001 return; 1002 } 1003 1004 cpu_misa_ext_add_user_opt(misa_bit, value); 1005 1006 prev_val = env->misa_ext & misa_bit; 1007 1008 if (value == prev_val) { 1009 return; 1010 } 1011 1012 if (value) { 1013 if (vendor_cpu) { 1014 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1015 error_setg(errp, "'%s' CPU does not allow enabling extensions", 1016 cpuname); 1017 return; 1018 } 1019 1020 if (misa_bit == RVH && env->priv_ver < PRIV_VERSION_1_12_0) { 1021 /* 1022 * Note: the 'priv_spec' command line option, if present, 1023 * will take precedence over this priv_ver bump. 1024 */ 1025 env->priv_ver = PRIV_VERSION_1_12_0; 1026 } 1027 } 1028 1029 riscv_cpu_write_misa_bit(cpu, misa_bit, value); 1030 } 1031 1032 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1033 void *opaque, Error **errp) 1034 { 1035 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1036 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1037 RISCVCPU *cpu = RISCV_CPU(obj); 1038 CPURISCVState *env = &cpu->env; 1039 bool value; 1040 1041 value = env->misa_ext & misa_bit; 1042 1043 visit_type_bool(v, name, &value, errp); 1044 } 1045 1046 #define MISA_CFG(_bit, _enabled) \ 1047 {.misa_bit = _bit, .enabled = _enabled} 1048 1049 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1050 MISA_CFG(RVA, true), 1051 MISA_CFG(RVC, true), 1052 MISA_CFG(RVD, true), 1053 MISA_CFG(RVF, true), 1054 MISA_CFG(RVI, true), 1055 MISA_CFG(RVE, false), 1056 MISA_CFG(RVM, true), 1057 MISA_CFG(RVS, true), 1058 MISA_CFG(RVU, true), 1059 MISA_CFG(RVH, true), 1060 MISA_CFG(RVJ, false), 1061 MISA_CFG(RVV, false), 1062 MISA_CFG(RVG, false), 1063 MISA_CFG(RVB, false), 1064 }; 1065 1066 /* 1067 * We do not support user choice tracking for MISA 1068 * extensions yet because, so far, we do not silently 1069 * change MISA bits during realize() (RVG enables MISA 1070 * bits but the user is warned about it). 1071 */ 1072 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1073 { 1074 bool use_def_vals = riscv_cpu_is_generic(cpu_obj); 1075 int i; 1076 1077 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1078 const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1079 int bit = misa_cfg->misa_bit; 1080 const char *name = riscv_get_misa_ext_name(bit); 1081 const char *desc = riscv_get_misa_ext_description(bit); 1082 1083 /* Check if KVM already created the property */ 1084 if (object_property_find(cpu_obj, name)) { 1085 continue; 1086 } 1087 1088 object_property_add(cpu_obj, name, "bool", 1089 cpu_get_misa_ext_cfg, 1090 cpu_set_misa_ext_cfg, 1091 NULL, (void *)misa_cfg); 1092 object_property_set_description(cpu_obj, name, desc); 1093 if (use_def_vals) { 1094 riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj), bit, 1095 misa_cfg->enabled); 1096 } 1097 } 1098 } 1099 1100 static void cpu_set_profile(Object *obj, Visitor *v, const char *name, 1101 void *opaque, Error **errp) 1102 { 1103 RISCVCPUProfile *profile = opaque; 1104 RISCVCPU *cpu = RISCV_CPU(obj); 1105 bool value; 1106 int i, ext_offset; 1107 1108 if (riscv_cpu_is_vendor(obj)) { 1109 error_setg(errp, "Profile %s is not available for vendor CPUs", 1110 profile->name); 1111 return; 1112 } 1113 1114 if (cpu->env.misa_mxl != MXL_RV64) { 1115 error_setg(errp, "Profile %s only available for 64 bit CPUs", 1116 profile->name); 1117 return; 1118 } 1119 1120 if (!visit_type_bool(v, name, &value, errp)) { 1121 return; 1122 } 1123 1124 profile->user_set = true; 1125 profile->enabled = value; 1126 1127 if (profile->parent != NULL) { 1128 object_property_set_bool(obj, profile->parent->name, 1129 profile->enabled, NULL); 1130 } 1131 1132 if (profile->enabled) { 1133 cpu->env.priv_ver = profile->priv_spec; 1134 } 1135 1136 #ifndef CONFIG_USER_ONLY 1137 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { 1138 object_property_set_bool(obj, "mmu", true, NULL); 1139 const char *satp_prop = satp_mode_str(profile->satp_mode, 1140 riscv_cpu_is_32bit(cpu)); 1141 object_property_set_bool(obj, satp_prop, profile->enabled, NULL); 1142 } 1143 #endif 1144 1145 for (i = 0; misa_bits[i] != 0; i++) { 1146 uint32_t bit = misa_bits[i]; 1147 1148 if (!(profile->misa_ext & bit)) { 1149 continue; 1150 } 1151 1152 if (bit == RVI && !profile->enabled) { 1153 /* 1154 * Disabling profiles will not disable the base 1155 * ISA RV64I. 1156 */ 1157 continue; 1158 } 1159 1160 cpu_misa_ext_add_user_opt(bit, profile->enabled); 1161 riscv_cpu_write_misa_bit(cpu, bit, profile->enabled); 1162 } 1163 1164 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { 1165 ext_offset = profile->ext_offsets[i]; 1166 1167 if (profile->enabled) { 1168 if (cpu_cfg_offset_is_named_feat(ext_offset)) { 1169 riscv_cpu_enable_named_feat(cpu, ext_offset); 1170 } 1171 1172 cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset); 1173 } 1174 1175 cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled); 1176 isa_ext_update_enabled(cpu, ext_offset, profile->enabled); 1177 } 1178 } 1179 1180 static void cpu_get_profile(Object *obj, Visitor *v, const char *name, 1181 void *opaque, Error **errp) 1182 { 1183 RISCVCPUProfile *profile = opaque; 1184 bool value = profile->enabled; 1185 1186 visit_type_bool(v, name, &value, errp); 1187 } 1188 1189 static void riscv_cpu_add_profiles(Object *cpu_obj) 1190 { 1191 for (int i = 0; riscv_profiles[i] != NULL; i++) { 1192 const RISCVCPUProfile *profile = riscv_profiles[i]; 1193 1194 object_property_add(cpu_obj, profile->name, "bool", 1195 cpu_get_profile, cpu_set_profile, 1196 NULL, (void *)profile); 1197 1198 /* 1199 * CPUs might enable a profile right from the start. 1200 * Enable its mandatory extensions right away in this 1201 * case. 1202 */ 1203 if (profile->enabled) { 1204 object_property_set_bool(cpu_obj, profile->name, true, NULL); 1205 } 1206 } 1207 } 1208 1209 static bool cpu_ext_is_deprecated(const char *ext_name) 1210 { 1211 return isupper(ext_name[0]); 1212 } 1213 1214 /* 1215 * String will be allocated in the heap. Caller is responsible 1216 * for freeing it. 1217 */ 1218 static char *cpu_ext_to_lower(const char *ext_name) 1219 { 1220 char *ret = g_malloc0(strlen(ext_name) + 1); 1221 1222 strcpy(ret, ext_name); 1223 ret[0] = tolower(ret[0]); 1224 1225 return ret; 1226 } 1227 1228 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 1229 void *opaque, Error **errp) 1230 { 1231 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 1232 RISCVCPU *cpu = RISCV_CPU(obj); 1233 bool vendor_cpu = riscv_cpu_is_vendor(obj); 1234 bool prev_val, value; 1235 1236 if (!visit_type_bool(v, name, &value, errp)) { 1237 return; 1238 } 1239 1240 if (cpu_ext_is_deprecated(multi_ext_cfg->name)) { 1241 g_autofree char *lower = cpu_ext_to_lower(multi_ext_cfg->name); 1242 1243 warn_report("CPU property '%s' is deprecated. Please use '%s' instead", 1244 multi_ext_cfg->name, lower); 1245 } 1246 1247 cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value); 1248 1249 prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset); 1250 1251 if (value == prev_val) { 1252 return; 1253 } 1254 1255 if (value && vendor_cpu) { 1256 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1257 error_setg(errp, "'%s' CPU does not allow enabling extensions", 1258 cpuname); 1259 return; 1260 } 1261 1262 if (value) { 1263 cpu_bump_multi_ext_priv_ver(&cpu->env, multi_ext_cfg->offset); 1264 } 1265 1266 isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value); 1267 } 1268 1269 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 1270 void *opaque, Error **errp) 1271 { 1272 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 1273 bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset); 1274 1275 visit_type_bool(v, name, &value, errp); 1276 } 1277 1278 static void cpu_add_multi_ext_prop(Object *cpu_obj, 1279 const RISCVCPUMultiExtConfig *multi_cfg) 1280 { 1281 bool generic_cpu = riscv_cpu_is_generic(cpu_obj); 1282 bool deprecated_ext = cpu_ext_is_deprecated(multi_cfg->name); 1283 1284 object_property_add(cpu_obj, multi_cfg->name, "bool", 1285 cpu_get_multi_ext_cfg, 1286 cpu_set_multi_ext_cfg, 1287 NULL, (void *)multi_cfg); 1288 1289 if (!generic_cpu || deprecated_ext) { 1290 return; 1291 } 1292 1293 /* 1294 * Set def val directly instead of using 1295 * object_property_set_bool() to save the set() 1296 * callback hash for user inputs. 1297 */ 1298 isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset, 1299 multi_cfg->enabled); 1300 } 1301 1302 static void riscv_cpu_add_multiext_prop_array(Object *obj, 1303 const RISCVCPUMultiExtConfig *array) 1304 { 1305 const RISCVCPUMultiExtConfig *prop; 1306 1307 g_assert(array); 1308 1309 for (prop = array; prop && prop->name; prop++) { 1310 cpu_add_multi_ext_prop(obj, prop); 1311 } 1312 } 1313 1314 /* 1315 * Add CPU properties with user-facing flags. 1316 * 1317 * This will overwrite existing env->misa_ext values with the 1318 * defaults set via riscv_cpu_add_misa_properties(). 1319 */ 1320 static void riscv_cpu_add_user_properties(Object *obj) 1321 { 1322 #ifndef CONFIG_USER_ONLY 1323 riscv_add_satp_mode_properties(obj); 1324 #endif 1325 1326 riscv_cpu_add_misa_properties(obj); 1327 1328 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions); 1329 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts); 1330 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts); 1331 1332 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts); 1333 1334 riscv_cpu_add_profiles(obj); 1335 } 1336 1337 /* 1338 * The 'max' type CPU will have all possible ratified 1339 * non-vendor extensions enabled. 1340 */ 1341 static void riscv_init_max_cpu_extensions(Object *obj) 1342 { 1343 RISCVCPU *cpu = RISCV_CPU(obj); 1344 CPURISCVState *env = &cpu->env; 1345 const RISCVCPUMultiExtConfig *prop; 1346 1347 /* Enable RVG, RVJ and RVV that are disabled by default */ 1348 riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVJ | RVV); 1349 1350 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1351 isa_ext_update_enabled(cpu, prop->offset, true); 1352 } 1353 1354 /* 1355 * Some extensions can't be added without backward compatibilty concerns. 1356 * Disable those, the user can still opt in to them on the command line. 1357 */ 1358 cpu->cfg.ext_svade = false; 1359 1360 /* set vector version */ 1361 env->vext_ver = VEXT_VERSION_1_00_0; 1362 1363 /* Zfinx is not compatible with F. Disable it */ 1364 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false); 1365 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false); 1366 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false); 1367 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false); 1368 1369 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false); 1370 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false); 1371 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false); 1372 1373 if (env->misa_mxl != MXL_RV32) { 1374 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false); 1375 } 1376 } 1377 1378 static bool riscv_cpu_has_max_extensions(Object *cpu_obj) 1379 { 1380 return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL; 1381 } 1382 1383 static void riscv_tcg_cpu_instance_init(CPUState *cs) 1384 { 1385 RISCVCPU *cpu = RISCV_CPU(cs); 1386 Object *obj = OBJECT(cpu); 1387 1388 misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1389 multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1390 1391 if (!misa_ext_implied_rules) { 1392 misa_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal); 1393 } 1394 1395 if (!multi_ext_implied_rules) { 1396 multi_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal); 1397 } 1398 1399 riscv_cpu_add_user_properties(obj); 1400 1401 if (riscv_cpu_has_max_extensions(obj)) { 1402 riscv_init_max_cpu_extensions(obj); 1403 } 1404 } 1405 1406 static void riscv_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) 1407 { 1408 /* 1409 * All cpus use the same set of operations. 1410 */ 1411 cc->tcg_ops = &riscv_tcg_ops; 1412 } 1413 1414 static void riscv_tcg_cpu_class_init(CPUClass *cc) 1415 { 1416 cc->init_accel_cpu = riscv_tcg_cpu_init_ops; 1417 } 1418 1419 static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, void *data) 1420 { 1421 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); 1422 1423 acc->cpu_class_init = riscv_tcg_cpu_class_init; 1424 acc->cpu_instance_init = riscv_tcg_cpu_instance_init; 1425 acc->cpu_target_realize = riscv_tcg_cpu_realize; 1426 } 1427 1428 static const TypeInfo riscv_tcg_cpu_accel_type_info = { 1429 .name = ACCEL_CPU_NAME("tcg"), 1430 1431 .parent = TYPE_ACCEL_CPU, 1432 .class_init = riscv_tcg_cpu_accel_class_init, 1433 .abstract = true, 1434 }; 1435 1436 static void riscv_tcg_cpu_accel_register_types(void) 1437 { 1438 type_register_static(&riscv_tcg_cpu_accel_type_info); 1439 } 1440 type_init(riscv_tcg_cpu_accel_register_types); 1441