1 /* 2 * riscv TCG cpu class initialization 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/exec-all.h" 22 #include "exec/translation-block.h" 23 #include "tcg-cpu.h" 24 #include "cpu.h" 25 #include "exec/target_page.h" 26 #include "internals.h" 27 #include "pmu.h" 28 #include "time_helper.h" 29 #include "qapi/error.h" 30 #include "qapi/visitor.h" 31 #include "qemu/accel.h" 32 #include "qemu/error-report.h" 33 #include "qemu/log.h" 34 #include "accel/accel-cpu-target.h" 35 #include "accel/tcg/cpu-ops.h" 36 #include "tcg/tcg.h" 37 #ifndef CONFIG_USER_ONLY 38 #include "hw/boards.h" 39 #endif 40 41 /* Hash that stores user set extensions */ 42 static GHashTable *multi_ext_user_opts; 43 static GHashTable *misa_ext_user_opts; 44 45 static GHashTable *multi_ext_implied_rules; 46 static GHashTable *misa_ext_implied_rules; 47 48 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset) 49 { 50 return g_hash_table_contains(multi_ext_user_opts, 51 GUINT_TO_POINTER(ext_offset)); 52 } 53 54 static bool cpu_misa_ext_is_user_set(uint32_t misa_bit) 55 { 56 return g_hash_table_contains(misa_ext_user_opts, 57 GUINT_TO_POINTER(misa_bit)); 58 } 59 60 static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset, bool value) 61 { 62 g_hash_table_insert(multi_ext_user_opts, GUINT_TO_POINTER(ext_offset), 63 (gpointer)value); 64 } 65 66 static void cpu_misa_ext_add_user_opt(uint32_t bit, bool value) 67 { 68 g_hash_table_insert(misa_ext_user_opts, GUINT_TO_POINTER(bit), 69 (gpointer)value); 70 } 71 72 static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit, 73 bool enabled) 74 { 75 CPURISCVState *env = &cpu->env; 76 77 if (enabled) { 78 env->misa_ext |= bit; 79 env->misa_ext_mask |= bit; 80 } else { 81 env->misa_ext &= ~bit; 82 env->misa_ext_mask &= ~bit; 83 } 84 } 85 86 static const char *cpu_priv_ver_to_str(int priv_ver) 87 { 88 const char *priv_spec_str = priv_spec_to_str(priv_ver); 89 90 g_assert(priv_spec_str); 91 92 return priv_spec_str; 93 } 94 95 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) 96 { 97 return riscv_env_mmu_index(cpu_env(cs), ifetch); 98 } 99 100 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 101 const TranslationBlock *tb) 102 { 103 if (!(tb_cflags(tb) & CF_PCREL)) { 104 RISCVCPU *cpu = RISCV_CPU(cs); 105 CPURISCVState *env = &cpu->env; 106 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 107 108 tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL)); 109 110 if (xl == MXL_RV32) { 111 env->pc = (int32_t) tb->pc; 112 } else { 113 env->pc = tb->pc; 114 } 115 } 116 } 117 118 static void riscv_restore_state_to_opc(CPUState *cs, 119 const TranslationBlock *tb, 120 const uint64_t *data) 121 { 122 RISCVCPU *cpu = RISCV_CPU(cs); 123 CPURISCVState *env = &cpu->env; 124 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 125 target_ulong pc; 126 127 if (tb_cflags(tb) & CF_PCREL) { 128 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 129 } else { 130 pc = data[0]; 131 } 132 133 if (xl == MXL_RV32) { 134 env->pc = (int32_t)pc; 135 } else { 136 env->pc = pc; 137 } 138 env->bins = data[1]; 139 env->excp_uw2 = data[2]; 140 } 141 142 static const TCGCPUOps riscv_tcg_ops = { 143 .guest_default_memory_order = 0, 144 145 .initialize = riscv_translate_init, 146 .translate_code = riscv_translate_code, 147 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 148 .restore_state_to_opc = riscv_restore_state_to_opc, 149 .mmu_index = riscv_cpu_mmu_index, 150 151 #ifndef CONFIG_USER_ONLY 152 .tlb_fill = riscv_cpu_tlb_fill, 153 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 154 .cpu_exec_halt = riscv_cpu_has_work, 155 .do_interrupt = riscv_cpu_do_interrupt, 156 .do_transaction_failed = riscv_cpu_do_transaction_failed, 157 .do_unaligned_access = riscv_cpu_do_unaligned_access, 158 .debug_excp_handler = riscv_cpu_debug_excp_handler, 159 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 160 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 161 #endif /* !CONFIG_USER_ONLY */ 162 }; 163 164 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset) 165 { 166 const RISCVIsaExtData *edata; 167 168 for (edata = isa_edata_arr; edata && edata->name; edata++) { 169 if (edata->ext_enable_offset != ext_offset) { 170 continue; 171 } 172 173 return edata->min_version; 174 } 175 176 g_assert_not_reached(); 177 } 178 179 static const char *cpu_cfg_ext_get_name(uint32_t ext_offset) 180 { 181 const RISCVCPUMultiExtConfig *feat; 182 const RISCVIsaExtData *edata; 183 184 for (edata = isa_edata_arr; edata->name != NULL; edata++) { 185 if (edata->ext_enable_offset == ext_offset) { 186 return edata->name; 187 } 188 } 189 190 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { 191 if (feat->offset == ext_offset) { 192 return feat->name; 193 } 194 } 195 196 g_assert_not_reached(); 197 } 198 199 static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset) 200 { 201 const RISCVCPUMultiExtConfig *feat; 202 203 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { 204 if (feat->offset == ext_offset) { 205 return true; 206 } 207 } 208 209 return false; 210 } 211 212 static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset) 213 { 214 /* 215 * All other named features are already enabled 216 * in riscv_tcg_cpu_instance_init(). 217 */ 218 switch (feat_offset) { 219 case CPU_CFG_OFFSET(ext_zic64b): 220 cpu->cfg.cbom_blocksize = 64; 221 cpu->cfg.cbop_blocksize = 64; 222 cpu->cfg.cboz_blocksize = 64; 223 break; 224 case CPU_CFG_OFFSET(ext_sha): 225 if (!cpu_misa_ext_is_user_set(RVH)) { 226 riscv_cpu_write_misa_bit(cpu, RVH, true); 227 } 228 /* fallthrough */ 229 case CPU_CFG_OFFSET(ext_ssstateen): 230 cpu->cfg.ext_smstateen = true; 231 break; 232 } 233 } 234 235 static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env, 236 uint32_t ext_offset) 237 { 238 int ext_priv_ver; 239 240 if (env->priv_ver == PRIV_VERSION_LATEST) { 241 return; 242 } 243 244 ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset); 245 246 if (env->priv_ver < ext_priv_ver) { 247 /* 248 * Note: the 'priv_spec' command line option, if present, 249 * will take precedence over this priv_ver bump. 250 */ 251 env->priv_ver = ext_priv_ver; 252 } 253 } 254 255 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset, 256 bool value) 257 { 258 CPURISCVState *env = &cpu->env; 259 bool prev_val = isa_ext_is_enabled(cpu, ext_offset); 260 int min_version; 261 262 if (prev_val == value) { 263 return; 264 } 265 266 if (cpu_cfg_ext_is_user_set(ext_offset)) { 267 return; 268 } 269 270 if (value && env->priv_ver != PRIV_VERSION_LATEST) { 271 /* Do not enable it if priv_ver is older than min_version */ 272 min_version = cpu_cfg_ext_get_min_version(ext_offset); 273 if (env->priv_ver < min_version) { 274 return; 275 } 276 } 277 278 isa_ext_update_enabled(cpu, ext_offset, value); 279 } 280 281 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 282 { 283 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 284 error_setg(errp, "H extension requires priv spec 1.12.0"); 285 return; 286 } 287 } 288 289 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 290 Error **errp) 291 { 292 uint32_t vlen = cfg->vlenb << 3; 293 294 if (vlen > RV_VLEN_MAX || vlen < 128) { 295 error_setg(errp, 296 "Vector extension implementation only supports VLEN " 297 "in the range [128, %d]", RV_VLEN_MAX); 298 return; 299 } 300 301 if (cfg->elen > 64 || cfg->elen < 8) { 302 error_setg(errp, 303 "Vector extension implementation only supports ELEN " 304 "in the range [8, 64]"); 305 return; 306 } 307 } 308 309 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 310 { 311 CPURISCVState *env = &cpu->env; 312 const RISCVIsaExtData *edata; 313 314 /* Force disable extensions if priv spec version does not match */ 315 for (edata = isa_edata_arr; edata && edata->name; edata++) { 316 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset) && 317 (env->priv_ver < edata->min_version)) { 318 /* 319 * These two extensions are always enabled as they were supported 320 * by QEMU before they were added as extensions in the ISA. 321 */ 322 if (!strcmp(edata->name, "zicntr") || 323 !strcmp(edata->name, "zihpm")) { 324 continue; 325 } 326 327 isa_ext_update_enabled(cpu, edata->ext_enable_offset, false); 328 329 /* 330 * Do not show user warnings for named features that users 331 * can't enable/disable in the command line. See commit 332 * 68c9e54bea for more info. 333 */ 334 if (cpu_cfg_offset_is_named_feat(edata->ext_enable_offset)) { 335 continue; 336 } 337 #ifndef CONFIG_USER_ONLY 338 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 339 " because privilege spec version does not match", 340 edata->name, env->mhartid); 341 #else 342 warn_report("disabling %s extension because " 343 "privilege spec version does not match", 344 edata->name); 345 #endif 346 } 347 } 348 } 349 350 static void riscv_cpu_update_named_features(RISCVCPU *cpu) 351 { 352 if (cpu->env.priv_ver >= PRIV_VERSION_1_11_0) { 353 cpu->cfg.has_priv_1_11 = true; 354 } 355 356 if (cpu->env.priv_ver >= PRIV_VERSION_1_12_0) { 357 cpu->cfg.has_priv_1_12 = true; 358 } 359 360 if (cpu->env.priv_ver >= PRIV_VERSION_1_13_0) { 361 cpu->cfg.has_priv_1_13 = true; 362 } 363 364 cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 && 365 cpu->cfg.cbop_blocksize == 64 && 366 cpu->cfg.cboz_blocksize == 64; 367 368 cpu->cfg.ext_ssstateen = cpu->cfg.ext_smstateen; 369 370 cpu->cfg.ext_sha = riscv_has_ext(&cpu->env, RVH) && 371 cpu->cfg.ext_ssstateen; 372 373 cpu->cfg.ext_ziccrse = cpu->cfg.has_priv_1_11; 374 } 375 376 static void riscv_cpu_validate_g(RISCVCPU *cpu) 377 { 378 const char *warn_msg = "RVG mandates disabled extension %s"; 379 uint32_t g_misa_bits[] = {RVI, RVM, RVA, RVF, RVD}; 380 bool send_warn = cpu_misa_ext_is_user_set(RVG); 381 382 for (int i = 0; i < ARRAY_SIZE(g_misa_bits); i++) { 383 uint32_t bit = g_misa_bits[i]; 384 385 if (riscv_has_ext(&cpu->env, bit)) { 386 continue; 387 } 388 389 if (!cpu_misa_ext_is_user_set(bit)) { 390 riscv_cpu_write_misa_bit(cpu, bit, true); 391 continue; 392 } 393 394 if (send_warn) { 395 warn_report(warn_msg, riscv_get_misa_ext_name(bit)); 396 } 397 } 398 399 if (!cpu->cfg.ext_zicsr) { 400 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr))) { 401 cpu->cfg.ext_zicsr = true; 402 } else if (send_warn) { 403 warn_report(warn_msg, "zicsr"); 404 } 405 } 406 407 if (!cpu->cfg.ext_zifencei) { 408 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei))) { 409 cpu->cfg.ext_zifencei = true; 410 } else if (send_warn) { 411 warn_report(warn_msg, "zifencei"); 412 } 413 } 414 } 415 416 static void riscv_cpu_validate_b(RISCVCPU *cpu) 417 { 418 const char *warn_msg = "RVB mandates disabled extension %s"; 419 420 if (!cpu->cfg.ext_zba) { 421 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zba))) { 422 cpu->cfg.ext_zba = true; 423 } else { 424 warn_report(warn_msg, "zba"); 425 } 426 } 427 428 if (!cpu->cfg.ext_zbb) { 429 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbb))) { 430 cpu->cfg.ext_zbb = true; 431 } else { 432 warn_report(warn_msg, "zbb"); 433 } 434 } 435 436 if (!cpu->cfg.ext_zbs) { 437 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbs))) { 438 cpu->cfg.ext_zbs = true; 439 } else { 440 warn_report(warn_msg, "zbs"); 441 } 442 } 443 } 444 445 /* 446 * Check consistency between chosen extensions while setting 447 * cpu->cfg accordingly. 448 */ 449 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 450 { 451 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 452 CPURISCVState *env = &cpu->env; 453 Error *local_err = NULL; 454 455 if (riscv_has_ext(env, RVG)) { 456 riscv_cpu_validate_g(cpu); 457 } 458 459 if (riscv_has_ext(env, RVB)) { 460 riscv_cpu_validate_b(cpu); 461 } 462 463 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 464 error_setg(errp, 465 "I and E extensions are incompatible"); 466 return; 467 } 468 469 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 470 error_setg(errp, 471 "Either I or E extension must be set"); 472 return; 473 } 474 475 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 476 error_setg(errp, 477 "Setting S extension without U extension is illegal"); 478 return; 479 } 480 481 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 482 error_setg(errp, 483 "H depends on an I base integer ISA with 32 x registers"); 484 return; 485 } 486 487 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 488 error_setg(errp, "H extension implicitly requires S-mode"); 489 return; 490 } 491 492 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) { 493 error_setg(errp, "F extension requires Zicsr"); 494 return; 495 } 496 497 if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) { 498 error_setg(errp, "Zacas extension requires A extension"); 499 return; 500 } 501 502 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 503 error_setg(errp, "Zawrs extension requires A extension"); 504 return; 505 } 506 507 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 508 error_setg(errp, "Zfa extension requires F extension"); 509 return; 510 } 511 512 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 513 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 514 return; 515 } 516 517 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 518 error_setg(errp, "Zfbfmin extension depends on F extension"); 519 return; 520 } 521 522 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 523 error_setg(errp, "D extension requires F extension"); 524 return; 525 } 526 527 if (riscv_has_ext(env, RVV)) { 528 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 529 if (local_err != NULL) { 530 error_propagate(errp, local_err); 531 return; 532 } 533 } 534 535 /* The Zve64d extension depends on the Zve64f extension */ 536 if (cpu->cfg.ext_zve64d) { 537 if (!riscv_has_ext(env, RVD)) { 538 error_setg(errp, "Zve64d/V extensions require D extension"); 539 return; 540 } 541 } 542 543 /* The Zve32f extension depends on the Zve32x extension */ 544 if (cpu->cfg.ext_zve32f) { 545 if (!riscv_has_ext(env, RVF)) { 546 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 547 return; 548 } 549 } 550 551 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 552 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 553 return; 554 } 555 556 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 557 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 558 return; 559 } 560 561 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 562 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 563 return; 564 } 565 566 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 567 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 568 return; 569 } 570 571 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 572 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 573 return; 574 } 575 576 if (cpu->cfg.ext_zfinx) { 577 if (!cpu->cfg.ext_zicsr) { 578 error_setg(errp, "Zfinx extension requires Zicsr"); 579 return; 580 } 581 if (riscv_has_ext(env, RVF)) { 582 error_setg(errp, 583 "Zfinx cannot be supported together with F extension"); 584 return; 585 } 586 } 587 588 if (cpu->cfg.ext_zcmop && !cpu->cfg.ext_zca) { 589 error_setg(errp, "Zcmop extensions require Zca"); 590 return; 591 } 592 593 if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 594 error_setg(errp, "Zcf extension is only relevant to RV32"); 595 return; 596 } 597 598 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 599 error_setg(errp, "Zcf extension requires F extension"); 600 return; 601 } 602 603 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 604 error_setg(errp, "Zcd extension requires D extension"); 605 return; 606 } 607 608 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 609 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 610 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 611 "extension"); 612 return; 613 } 614 615 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 616 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 617 "Zcd extension"); 618 return; 619 } 620 621 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) { 622 error_setg(errp, "Zcmt extension requires Zicsr extension"); 623 return; 624 } 625 626 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg || 627 cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || 628 cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32x) { 629 error_setg(errp, 630 "Vector crypto extensions require V or Zve* extensions"); 631 return; 632 } 633 634 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64x) { 635 error_setg( 636 errp, 637 "Zvbc and Zvknhb extensions require V or Zve64x extensions"); 638 return; 639 } 640 641 if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) { 642 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) { 643 error_setg(errp, "zicntr requires zicsr"); 644 return; 645 } 646 cpu->cfg.ext_zicntr = false; 647 } 648 649 if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) { 650 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) { 651 error_setg(errp, "zihpm requires zicsr"); 652 return; 653 } 654 cpu->cfg.ext_zihpm = false; 655 } 656 657 if (cpu->cfg.ext_zicfiss) { 658 if (!cpu->cfg.ext_zicsr) { 659 error_setg(errp, "zicfiss extension requires zicsr extension"); 660 return; 661 } 662 if (!riscv_has_ext(env, RVA)) { 663 error_setg(errp, "zicfiss extension requires A extension"); 664 return; 665 } 666 if (!riscv_has_ext(env, RVS)) { 667 error_setg(errp, "zicfiss extension requires S"); 668 return; 669 } 670 if (!cpu->cfg.ext_zimop) { 671 error_setg(errp, "zicfiss extension requires zimop extension"); 672 return; 673 } 674 if (cpu->cfg.ext_zca && !cpu->cfg.ext_zcmop) { 675 error_setg(errp, "zicfiss with zca requires zcmop extension"); 676 return; 677 } 678 } 679 680 if (!cpu->cfg.ext_zihpm) { 681 cpu->cfg.pmu_mask = 0; 682 cpu->pmu_avail_ctrs = 0; 683 } 684 685 if (cpu->cfg.ext_zicfilp && !cpu->cfg.ext_zicsr) { 686 error_setg(errp, "zicfilp extension requires zicsr extension"); 687 return; 688 } 689 690 if (mcc->misa_mxl_max == MXL_RV32 && cpu->cfg.ext_svukte) { 691 error_setg(errp, "svukte is not supported for RV32"); 692 return; 693 } 694 695 if ((cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr) && 696 (!riscv_has_ext(env, RVS) || !cpu->cfg.ext_sscsrind)) { 697 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_smctr)) || 698 cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_ssctr))) { 699 error_setg(errp, "Smctr and Ssctr require S-mode and Sscsrind"); 700 return; 701 } 702 cpu->cfg.ext_smctr = false; 703 cpu->cfg.ext_ssctr = false; 704 } 705 706 /* 707 * Disable isa extensions based on priv spec after we 708 * validated and set everything we need. 709 */ 710 riscv_cpu_disable_priv_spec_isa_exts(cpu); 711 } 712 713 #ifndef CONFIG_USER_ONLY 714 static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu, 715 RISCVCPUProfile *profile, 716 bool send_warn) 717 { 718 int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 719 720 if (profile->satp_mode > satp_max) { 721 if (send_warn) { 722 bool is_32bit = riscv_cpu_is_32bit(cpu); 723 const char *req_satp = satp_mode_str(profile->satp_mode, is_32bit); 724 const char *cur_satp = satp_mode_str(satp_max, is_32bit); 725 726 warn_report("Profile %s requires satp mode %s, " 727 "but satp mode %s was set", profile->name, 728 req_satp, cur_satp); 729 } 730 731 return false; 732 } 733 734 return true; 735 } 736 #endif 737 738 static void riscv_cpu_check_parent_profile(RISCVCPU *cpu, 739 RISCVCPUProfile *profile, 740 RISCVCPUProfile *parent) 741 { 742 const char *parent_name; 743 bool parent_enabled; 744 745 if (!profile->enabled || !parent) { 746 return; 747 } 748 749 parent_name = parent->name; 750 parent_enabled = object_property_get_bool(OBJECT(cpu), parent_name, NULL); 751 profile->enabled = parent_enabled; 752 } 753 754 static void riscv_cpu_validate_profile(RISCVCPU *cpu, 755 RISCVCPUProfile *profile) 756 { 757 CPURISCVState *env = &cpu->env; 758 const char *warn_msg = "Profile %s mandates disabled extension %s"; 759 bool send_warn = profile->user_set && profile->enabled; 760 bool profile_impl = true; 761 int i; 762 763 #ifndef CONFIG_USER_ONLY 764 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { 765 profile_impl = riscv_cpu_validate_profile_satp(cpu, profile, 766 send_warn); 767 } 768 #endif 769 770 if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED && 771 profile->priv_spec > env->priv_ver) { 772 profile_impl = false; 773 774 if (send_warn) { 775 warn_report("Profile %s requires priv spec %s, " 776 "but priv ver %s was set", profile->name, 777 cpu_priv_ver_to_str(profile->priv_spec), 778 cpu_priv_ver_to_str(env->priv_ver)); 779 } 780 } 781 782 for (i = 0; misa_bits[i] != 0; i++) { 783 uint32_t bit = misa_bits[i]; 784 785 if (!(profile->misa_ext & bit)) { 786 continue; 787 } 788 789 if (!riscv_has_ext(&cpu->env, bit)) { 790 profile_impl = false; 791 792 if (send_warn) { 793 warn_report(warn_msg, profile->name, 794 riscv_get_misa_ext_name(bit)); 795 } 796 } 797 } 798 799 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { 800 int ext_offset = profile->ext_offsets[i]; 801 802 if (!isa_ext_is_enabled(cpu, ext_offset)) { 803 profile_impl = false; 804 805 if (send_warn) { 806 warn_report(warn_msg, profile->name, 807 cpu_cfg_ext_get_name(ext_offset)); 808 } 809 } 810 } 811 812 profile->enabled = profile_impl; 813 814 riscv_cpu_check_parent_profile(cpu, profile, profile->u_parent); 815 riscv_cpu_check_parent_profile(cpu, profile, profile->s_parent); 816 } 817 818 static void riscv_cpu_validate_profiles(RISCVCPU *cpu) 819 { 820 for (int i = 0; riscv_profiles[i] != NULL; i++) { 821 riscv_cpu_validate_profile(cpu, riscv_profiles[i]); 822 } 823 } 824 825 static void riscv_cpu_init_implied_exts_rules(void) 826 { 827 RISCVCPUImpliedExtsRule *rule; 828 #ifndef CONFIG_USER_ONLY 829 MachineState *ms = MACHINE(qdev_get_machine()); 830 #endif 831 static bool initialized; 832 int i; 833 834 /* Implied rules only need to be initialized once. */ 835 if (initialized) { 836 return; 837 } 838 839 for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) { 840 #ifndef CONFIG_USER_ONLY 841 rule->enabled = bitmap_new(ms->smp.cpus); 842 #endif 843 g_hash_table_insert(misa_ext_implied_rules, 844 GUINT_TO_POINTER(rule->ext), (gpointer)rule); 845 } 846 847 for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) { 848 #ifndef CONFIG_USER_ONLY 849 rule->enabled = bitmap_new(ms->smp.cpus); 850 #endif 851 g_hash_table_insert(multi_ext_implied_rules, 852 GUINT_TO_POINTER(rule->ext), (gpointer)rule); 853 } 854 855 initialized = true; 856 } 857 858 static void cpu_enable_implied_rule(RISCVCPU *cpu, 859 RISCVCPUImpliedExtsRule *rule) 860 { 861 CPURISCVState *env = &cpu->env; 862 RISCVCPUImpliedExtsRule *ir; 863 bool enabled = false; 864 int i; 865 866 #ifndef CONFIG_USER_ONLY 867 enabled = test_bit(cpu->env.mhartid, rule->enabled); 868 #endif 869 870 if (!enabled) { 871 /* Enable the implied MISAs. */ 872 if (rule->implied_misa_exts) { 873 for (i = 0; misa_bits[i] != 0; i++) { 874 if (rule->implied_misa_exts & misa_bits[i]) { 875 /* 876 * If the user disabled the misa_bit do not re-enable it 877 * and do not apply any implied rules related to it. 878 */ 879 if (cpu_misa_ext_is_user_set(misa_bits[i]) && 880 !(env->misa_ext & misa_bits[i])) { 881 continue; 882 } 883 884 riscv_cpu_set_misa_ext(env, env->misa_ext | misa_bits[i]); 885 ir = g_hash_table_lookup(misa_ext_implied_rules, 886 GUINT_TO_POINTER(misa_bits[i])); 887 888 if (ir) { 889 cpu_enable_implied_rule(cpu, ir); 890 } 891 } 892 } 893 } 894 895 /* Enable the implied extensions. */ 896 for (i = 0; 897 rule->implied_multi_exts[i] != RISCV_IMPLIED_EXTS_RULE_END; i++) { 898 cpu_cfg_ext_auto_update(cpu, rule->implied_multi_exts[i], true); 899 900 ir = g_hash_table_lookup(multi_ext_implied_rules, 901 GUINT_TO_POINTER( 902 rule->implied_multi_exts[i])); 903 904 if (ir) { 905 cpu_enable_implied_rule(cpu, ir); 906 } 907 } 908 909 #ifndef CONFIG_USER_ONLY 910 bitmap_set(rule->enabled, cpu->env.mhartid, 1); 911 #endif 912 } 913 } 914 915 /* Zc extension has special implied rules that need to be handled separately. */ 916 static void cpu_enable_zc_implied_rules(RISCVCPU *cpu) 917 { 918 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 919 CPURISCVState *env = &cpu->env; 920 921 if (cpu->cfg.ext_zce) { 922 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 923 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true); 924 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); 925 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); 926 927 if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { 928 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 929 } 930 } 931 932 /* Zca, Zcd and Zcf has a PRIV 1.12.0 restriction */ 933 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 934 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 935 936 if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { 937 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 938 } 939 940 if (riscv_has_ext(env, RVD)) { 941 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true); 942 } 943 } 944 } 945 946 static void riscv_cpu_enable_implied_rules(RISCVCPU *cpu) 947 { 948 RISCVCPUImpliedExtsRule *rule; 949 int i; 950 951 /* Enable the implied extensions for Zc. */ 952 cpu_enable_zc_implied_rules(cpu); 953 954 /* Enable the implied MISAs. */ 955 for (i = 0; (rule = riscv_misa_ext_implied_rules[i]); i++) { 956 if (riscv_has_ext(&cpu->env, rule->ext)) { 957 cpu_enable_implied_rule(cpu, rule); 958 } 959 } 960 961 /* Enable the implied extensions. */ 962 for (i = 0; (rule = riscv_multi_ext_implied_rules[i]); i++) { 963 if (isa_ext_is_enabled(cpu, rule->ext)) { 964 cpu_enable_implied_rule(cpu, rule); 965 } 966 } 967 } 968 969 void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 970 { 971 CPURISCVState *env = &cpu->env; 972 Error *local_err = NULL; 973 974 riscv_cpu_init_implied_exts_rules(); 975 riscv_cpu_enable_implied_rules(cpu); 976 977 riscv_cpu_validate_misa_priv(env, &local_err); 978 if (local_err != NULL) { 979 error_propagate(errp, local_err); 980 return; 981 } 982 983 riscv_cpu_update_named_features(cpu); 984 riscv_cpu_validate_profiles(cpu); 985 986 if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) { 987 /* 988 * Enhanced PMP should only be available 989 * on harts with PMP support 990 */ 991 error_setg(errp, "Invalid configuration: Smepmp requires PMP support"); 992 return; 993 } 994 995 riscv_cpu_validate_set_extensions(cpu, &local_err); 996 if (local_err != NULL) { 997 error_propagate(errp, local_err); 998 return; 999 } 1000 #ifndef CONFIG_USER_ONLY 1001 if (cpu->cfg.pmu_mask) { 1002 riscv_pmu_init(cpu, &local_err); 1003 if (local_err != NULL) { 1004 error_propagate(errp, local_err); 1005 return; 1006 } 1007 1008 if (cpu->cfg.ext_sscofpmf) { 1009 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1010 riscv_pmu_timer_cb, cpu); 1011 } 1012 } 1013 #endif 1014 } 1015 1016 void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu) 1017 { 1018 GPtrArray *dynamic_decoders; 1019 dynamic_decoders = g_ptr_array_sized_new(decoder_table_size); 1020 for (size_t i = 0; i < decoder_table_size; ++i) { 1021 if (decoder_table[i].guard_func && 1022 decoder_table[i].guard_func(&cpu->cfg)) { 1023 g_ptr_array_add(dynamic_decoders, 1024 (gpointer)decoder_table[i].riscv_cpu_decode_fn); 1025 } 1026 } 1027 1028 cpu->decoders = dynamic_decoders; 1029 } 1030 1031 bool riscv_cpu_tcg_compatible(RISCVCPU *cpu) 1032 { 1033 return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL; 1034 } 1035 1036 static bool riscv_cpu_is_generic(Object *cpu_obj) 1037 { 1038 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 1039 } 1040 1041 /* 1042 * We'll get here via the following path: 1043 * 1044 * riscv_cpu_realize() 1045 * -> cpu_exec_realizefn() 1046 * -> tcg_cpu_realize() (via accel_cpu_common_realize()) 1047 */ 1048 static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp) 1049 { 1050 RISCVCPU *cpu = RISCV_CPU(cs); 1051 1052 if (!riscv_cpu_tcg_compatible(cpu)) { 1053 g_autofree char *name = riscv_cpu_get_name(cpu); 1054 error_setg(errp, "'%s' CPU is not compatible with TCG acceleration", 1055 name); 1056 return false; 1057 } 1058 1059 #ifndef CONFIG_USER_ONLY 1060 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 1061 1062 if (mcc->misa_mxl_max >= MXL_RV128 && qemu_tcg_mttcg_enabled()) { 1063 /* Missing 128-bit aligned atomics */ 1064 error_setg(errp, 1065 "128-bit RISC-V currently does not work with Multi " 1066 "Threaded TCG. Please use: -accel tcg,thread=single"); 1067 return false; 1068 } 1069 1070 CPURISCVState *env = &cpu->env; 1071 1072 tcg_cflags_set(CPU(cs), CF_PCREL); 1073 1074 if (cpu->cfg.ext_sstc) { 1075 riscv_timer_init(cpu); 1076 } 1077 1078 /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */ 1079 if (riscv_has_ext(env, RVH)) { 1080 env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP; 1081 } 1082 #endif 1083 1084 return true; 1085 } 1086 1087 typedef struct RISCVCPUMisaExtConfig { 1088 target_ulong misa_bit; 1089 bool enabled; 1090 } RISCVCPUMisaExtConfig; 1091 1092 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1093 void *opaque, Error **errp) 1094 { 1095 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1096 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1097 RISCVCPU *cpu = RISCV_CPU(obj); 1098 CPURISCVState *env = &cpu->env; 1099 bool vendor_cpu = riscv_cpu_is_vendor(obj); 1100 bool prev_val, value; 1101 1102 if (!visit_type_bool(v, name, &value, errp)) { 1103 return; 1104 } 1105 1106 cpu_misa_ext_add_user_opt(misa_bit, value); 1107 1108 prev_val = env->misa_ext & misa_bit; 1109 1110 if (value == prev_val) { 1111 return; 1112 } 1113 1114 if (value) { 1115 if (vendor_cpu) { 1116 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1117 error_setg(errp, "'%s' CPU does not allow enabling extensions", 1118 cpuname); 1119 return; 1120 } 1121 1122 if (misa_bit == RVH && env->priv_ver < PRIV_VERSION_1_12_0) { 1123 /* 1124 * Note: the 'priv_spec' command line option, if present, 1125 * will take precedence over this priv_ver bump. 1126 */ 1127 env->priv_ver = PRIV_VERSION_1_12_0; 1128 } 1129 } 1130 1131 riscv_cpu_write_misa_bit(cpu, misa_bit, value); 1132 } 1133 1134 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1135 void *opaque, Error **errp) 1136 { 1137 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1138 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1139 RISCVCPU *cpu = RISCV_CPU(obj); 1140 CPURISCVState *env = &cpu->env; 1141 bool value; 1142 1143 value = env->misa_ext & misa_bit; 1144 1145 visit_type_bool(v, name, &value, errp); 1146 } 1147 1148 #define MISA_CFG(_bit, _enabled) \ 1149 {.misa_bit = _bit, .enabled = _enabled} 1150 1151 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1152 MISA_CFG(RVA, true), 1153 MISA_CFG(RVC, true), 1154 MISA_CFG(RVD, true), 1155 MISA_CFG(RVF, true), 1156 MISA_CFG(RVI, true), 1157 MISA_CFG(RVE, false), 1158 MISA_CFG(RVM, true), 1159 MISA_CFG(RVS, true), 1160 MISA_CFG(RVU, true), 1161 MISA_CFG(RVH, true), 1162 MISA_CFG(RVV, false), 1163 MISA_CFG(RVG, false), 1164 MISA_CFG(RVB, false), 1165 }; 1166 1167 /* 1168 * We do not support user choice tracking for MISA 1169 * extensions yet because, so far, we do not silently 1170 * change MISA bits during realize() (RVG enables MISA 1171 * bits but the user is warned about it). 1172 */ 1173 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1174 { 1175 bool use_def_vals = riscv_cpu_is_generic(cpu_obj); 1176 int i; 1177 1178 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1179 const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1180 int bit = misa_cfg->misa_bit; 1181 const char *name = riscv_get_misa_ext_name(bit); 1182 const char *desc = riscv_get_misa_ext_description(bit); 1183 1184 /* Check if KVM already created the property */ 1185 if (object_property_find(cpu_obj, name)) { 1186 continue; 1187 } 1188 1189 object_property_add(cpu_obj, name, "bool", 1190 cpu_get_misa_ext_cfg, 1191 cpu_set_misa_ext_cfg, 1192 NULL, (void *)misa_cfg); 1193 object_property_set_description(cpu_obj, name, desc); 1194 if (use_def_vals) { 1195 riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj), bit, 1196 misa_cfg->enabled); 1197 } 1198 } 1199 } 1200 1201 static void cpu_set_profile(Object *obj, Visitor *v, const char *name, 1202 void *opaque, Error **errp) 1203 { 1204 RISCVCPUProfile *profile = opaque; 1205 RISCVCPU *cpu = RISCV_CPU(obj); 1206 bool value; 1207 int i, ext_offset; 1208 1209 if (riscv_cpu_is_vendor(obj)) { 1210 error_setg(errp, "Profile %s is not available for vendor CPUs", 1211 profile->name); 1212 return; 1213 } 1214 1215 if (cpu->env.misa_mxl != MXL_RV64) { 1216 error_setg(errp, "Profile %s only available for 64 bit CPUs", 1217 profile->name); 1218 return; 1219 } 1220 1221 if (!visit_type_bool(v, name, &value, errp)) { 1222 return; 1223 } 1224 1225 profile->user_set = true; 1226 profile->enabled = value; 1227 1228 if (profile->u_parent != NULL) { 1229 object_property_set_bool(obj, profile->u_parent->name, 1230 profile->enabled, NULL); 1231 } 1232 1233 if (profile->s_parent != NULL) { 1234 object_property_set_bool(obj, profile->s_parent->name, 1235 profile->enabled, NULL); 1236 } 1237 1238 if (profile->enabled) { 1239 cpu->env.priv_ver = profile->priv_spec; 1240 } 1241 1242 #ifndef CONFIG_USER_ONLY 1243 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { 1244 object_property_set_bool(obj, "mmu", true, NULL); 1245 const char *satp_prop = satp_mode_str(profile->satp_mode, 1246 riscv_cpu_is_32bit(cpu)); 1247 object_property_set_bool(obj, satp_prop, profile->enabled, NULL); 1248 } 1249 #endif 1250 1251 for (i = 0; misa_bits[i] != 0; i++) { 1252 uint32_t bit = misa_bits[i]; 1253 1254 if (!(profile->misa_ext & bit)) { 1255 continue; 1256 } 1257 1258 if (bit == RVI && !profile->enabled) { 1259 /* 1260 * Disabling profiles will not disable the base 1261 * ISA RV64I. 1262 */ 1263 continue; 1264 } 1265 1266 cpu_misa_ext_add_user_opt(bit, profile->enabled); 1267 riscv_cpu_write_misa_bit(cpu, bit, profile->enabled); 1268 } 1269 1270 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { 1271 ext_offset = profile->ext_offsets[i]; 1272 1273 if (profile->enabled) { 1274 if (cpu_cfg_offset_is_named_feat(ext_offset)) { 1275 riscv_cpu_enable_named_feat(cpu, ext_offset); 1276 } 1277 1278 cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset); 1279 } 1280 1281 cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled); 1282 isa_ext_update_enabled(cpu, ext_offset, profile->enabled); 1283 } 1284 } 1285 1286 static void cpu_get_profile(Object *obj, Visitor *v, const char *name, 1287 void *opaque, Error **errp) 1288 { 1289 RISCVCPUProfile *profile = opaque; 1290 bool value = profile->enabled; 1291 1292 visit_type_bool(v, name, &value, errp); 1293 } 1294 1295 static void riscv_cpu_add_profiles(Object *cpu_obj) 1296 { 1297 for (int i = 0; riscv_profiles[i] != NULL; i++) { 1298 const RISCVCPUProfile *profile = riscv_profiles[i]; 1299 1300 object_property_add(cpu_obj, profile->name, "bool", 1301 cpu_get_profile, cpu_set_profile, 1302 NULL, (void *)profile); 1303 1304 /* 1305 * CPUs might enable a profile right from the start. 1306 * Enable its mandatory extensions right away in this 1307 * case. 1308 */ 1309 if (profile->enabled) { 1310 object_property_set_bool(cpu_obj, profile->name, true, NULL); 1311 } 1312 } 1313 } 1314 1315 static bool cpu_ext_is_deprecated(const char *ext_name) 1316 { 1317 return isupper(ext_name[0]); 1318 } 1319 1320 /* 1321 * String will be allocated in the heap. Caller is responsible 1322 * for freeing it. 1323 */ 1324 static char *cpu_ext_to_lower(const char *ext_name) 1325 { 1326 char *ret = g_malloc0(strlen(ext_name) + 1); 1327 1328 strcpy(ret, ext_name); 1329 ret[0] = tolower(ret[0]); 1330 1331 return ret; 1332 } 1333 1334 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 1335 void *opaque, Error **errp) 1336 { 1337 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 1338 RISCVCPU *cpu = RISCV_CPU(obj); 1339 bool vendor_cpu = riscv_cpu_is_vendor(obj); 1340 bool prev_val, value; 1341 1342 if (!visit_type_bool(v, name, &value, errp)) { 1343 return; 1344 } 1345 1346 if (cpu_ext_is_deprecated(multi_ext_cfg->name)) { 1347 g_autofree char *lower = cpu_ext_to_lower(multi_ext_cfg->name); 1348 1349 warn_report("CPU property '%s' is deprecated. Please use '%s' instead", 1350 multi_ext_cfg->name, lower); 1351 } 1352 1353 cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value); 1354 1355 prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset); 1356 1357 if (value == prev_val) { 1358 return; 1359 } 1360 1361 if (value && vendor_cpu) { 1362 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1363 error_setg(errp, "'%s' CPU does not allow enabling extensions", 1364 cpuname); 1365 return; 1366 } 1367 1368 if (value) { 1369 cpu_bump_multi_ext_priv_ver(&cpu->env, multi_ext_cfg->offset); 1370 } 1371 1372 isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value); 1373 } 1374 1375 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 1376 void *opaque, Error **errp) 1377 { 1378 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 1379 bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset); 1380 1381 visit_type_bool(v, name, &value, errp); 1382 } 1383 1384 static void cpu_add_multi_ext_prop(Object *cpu_obj, 1385 const RISCVCPUMultiExtConfig *multi_cfg) 1386 { 1387 bool generic_cpu = riscv_cpu_is_generic(cpu_obj); 1388 bool deprecated_ext = cpu_ext_is_deprecated(multi_cfg->name); 1389 1390 object_property_add(cpu_obj, multi_cfg->name, "bool", 1391 cpu_get_multi_ext_cfg, 1392 cpu_set_multi_ext_cfg, 1393 NULL, (void *)multi_cfg); 1394 1395 if (!generic_cpu || deprecated_ext) { 1396 return; 1397 } 1398 1399 /* 1400 * Set def val directly instead of using 1401 * object_property_set_bool() to save the set() 1402 * callback hash for user inputs. 1403 */ 1404 isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset, 1405 multi_cfg->enabled); 1406 } 1407 1408 static void riscv_cpu_add_multiext_prop_array(Object *obj, 1409 const RISCVCPUMultiExtConfig *array) 1410 { 1411 const RISCVCPUMultiExtConfig *prop; 1412 1413 g_assert(array); 1414 1415 for (prop = array; prop && prop->name; prop++) { 1416 cpu_add_multi_ext_prop(obj, prop); 1417 } 1418 } 1419 1420 /* 1421 * Add CPU properties with user-facing flags. 1422 * 1423 * This will overwrite existing env->misa_ext values with the 1424 * defaults set via riscv_cpu_add_misa_properties(). 1425 */ 1426 static void riscv_cpu_add_user_properties(Object *obj) 1427 { 1428 #ifndef CONFIG_USER_ONLY 1429 riscv_add_satp_mode_properties(obj); 1430 #endif 1431 1432 riscv_cpu_add_misa_properties(obj); 1433 1434 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions); 1435 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts); 1436 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts); 1437 1438 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts); 1439 1440 riscv_cpu_add_profiles(obj); 1441 } 1442 1443 /* 1444 * The 'max' type CPU will have all possible ratified 1445 * non-vendor extensions enabled. 1446 */ 1447 static void riscv_init_max_cpu_extensions(Object *obj) 1448 { 1449 RISCVCPU *cpu = RISCV_CPU(obj); 1450 CPURISCVState *env = &cpu->env; 1451 const RISCVCPUMultiExtConfig *prop; 1452 1453 /* Enable RVG and RVV that are disabled by default */ 1454 riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVV); 1455 1456 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1457 isa_ext_update_enabled(cpu, prop->offset, true); 1458 } 1459 1460 /* 1461 * Some extensions can't be added without backward compatibilty concerns. 1462 * Disable those, the user can still opt in to them on the command line. 1463 */ 1464 cpu->cfg.ext_svade = false; 1465 1466 /* set vector version */ 1467 env->vext_ver = VEXT_VERSION_1_00_0; 1468 1469 /* Zfinx is not compatible with F. Disable it */ 1470 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false); 1471 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false); 1472 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false); 1473 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false); 1474 1475 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false); 1476 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false); 1477 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false); 1478 1479 if (env->misa_mxl != MXL_RV32) { 1480 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false); 1481 } 1482 1483 /* 1484 * TODO: ext_smrnmi requires OpenSBI changes that our current 1485 * image does not have. Disable it for now. 1486 */ 1487 if (cpu->cfg.ext_smrnmi) { 1488 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smrnmi), false); 1489 } 1490 1491 /* 1492 * TODO: ext_smdbltrp requires the firmware to clear MSTATUS.MDT on startup 1493 * to avoid generating a double trap. OpenSBI does not currently support it, 1494 * disable it for now. 1495 */ 1496 if (cpu->cfg.ext_smdbltrp) { 1497 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smdbltrp), false); 1498 } 1499 } 1500 1501 static bool riscv_cpu_has_max_extensions(Object *cpu_obj) 1502 { 1503 return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL; 1504 } 1505 1506 static void riscv_tcg_cpu_instance_init(CPUState *cs) 1507 { 1508 RISCVCPU *cpu = RISCV_CPU(cs); 1509 Object *obj = OBJECT(cpu); 1510 1511 misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1512 multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1513 1514 if (!misa_ext_implied_rules) { 1515 misa_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal); 1516 } 1517 1518 if (!multi_ext_implied_rules) { 1519 multi_ext_implied_rules = g_hash_table_new(NULL, g_direct_equal); 1520 } 1521 1522 riscv_cpu_add_user_properties(obj); 1523 1524 if (riscv_cpu_has_max_extensions(obj)) { 1525 riscv_init_max_cpu_extensions(obj); 1526 } 1527 } 1528 1529 static void riscv_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) 1530 { 1531 /* 1532 * All cpus use the same set of operations. 1533 */ 1534 cc->tcg_ops = &riscv_tcg_ops; 1535 } 1536 1537 static void riscv_tcg_cpu_class_init(CPUClass *cc) 1538 { 1539 cc->init_accel_cpu = riscv_tcg_cpu_init_ops; 1540 } 1541 1542 static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, void *data) 1543 { 1544 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); 1545 1546 acc->cpu_class_init = riscv_tcg_cpu_class_init; 1547 acc->cpu_instance_init = riscv_tcg_cpu_instance_init; 1548 acc->cpu_target_realize = riscv_tcg_cpu_realize; 1549 } 1550 1551 static const TypeInfo riscv_tcg_cpu_accel_type_info = { 1552 .name = ACCEL_CPU_NAME("tcg"), 1553 1554 .parent = TYPE_ACCEL_CPU, 1555 .class_init = riscv_tcg_cpu_accel_class_init, 1556 .abstract = true, 1557 }; 1558 1559 static void riscv_tcg_cpu_accel_register_types(void) 1560 { 1561 type_register_static(&riscv_tcg_cpu_accel_type_info); 1562 } 1563 type_init(riscv_tcg_cpu_accel_register_types); 1564