1 /* 2 * riscv TCG cpu class initialization 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/exec-all.h" 22 #include "tcg-cpu.h" 23 #include "cpu.h" 24 #include "pmu.h" 25 #include "time_helper.h" 26 #include "qapi/error.h" 27 #include "qapi/visitor.h" 28 #include "qemu/accel.h" 29 #include "qemu/error-report.h" 30 #include "qemu/log.h" 31 #include "hw/core/accel-cpu.h" 32 #include "hw/core/tcg-cpu-ops.h" 33 #include "tcg/tcg.h" 34 35 /* Hash that stores user set extensions */ 36 static GHashTable *multi_ext_user_opts; 37 static GHashTable *misa_ext_user_opts; 38 39 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset) 40 { 41 return g_hash_table_contains(multi_ext_user_opts, 42 GUINT_TO_POINTER(ext_offset)); 43 } 44 45 static bool cpu_misa_ext_is_user_set(uint32_t misa_bit) 46 { 47 return g_hash_table_contains(misa_ext_user_opts, 48 GUINT_TO_POINTER(misa_bit)); 49 } 50 51 static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset, bool value) 52 { 53 g_hash_table_insert(multi_ext_user_opts, GUINT_TO_POINTER(ext_offset), 54 (gpointer)value); 55 } 56 57 static void cpu_misa_ext_add_user_opt(uint32_t bit, bool value) 58 { 59 g_hash_table_insert(misa_ext_user_opts, GUINT_TO_POINTER(bit), 60 (gpointer)value); 61 } 62 63 static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit, 64 bool enabled) 65 { 66 CPURISCVState *env = &cpu->env; 67 68 if (enabled) { 69 env->misa_ext |= bit; 70 env->misa_ext_mask |= bit; 71 } else { 72 env->misa_ext &= ~bit; 73 env->misa_ext_mask &= ~bit; 74 } 75 } 76 77 static const char *cpu_priv_ver_to_str(int priv_ver) 78 { 79 switch (priv_ver) { 80 case PRIV_VERSION_1_10_0: 81 return "v1.10.0"; 82 case PRIV_VERSION_1_11_0: 83 return "v1.11.0"; 84 case PRIV_VERSION_1_12_0: 85 return "v1.12.0"; 86 } 87 88 g_assert_not_reached(); 89 } 90 91 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 92 const TranslationBlock *tb) 93 { 94 if (!(tb_cflags(tb) & CF_PCREL)) { 95 RISCVCPU *cpu = RISCV_CPU(cs); 96 CPURISCVState *env = &cpu->env; 97 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 98 99 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 100 101 if (xl == MXL_RV32) { 102 env->pc = (int32_t) tb->pc; 103 } else { 104 env->pc = tb->pc; 105 } 106 } 107 } 108 109 static void riscv_restore_state_to_opc(CPUState *cs, 110 const TranslationBlock *tb, 111 const uint64_t *data) 112 { 113 RISCVCPU *cpu = RISCV_CPU(cs); 114 CPURISCVState *env = &cpu->env; 115 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 116 target_ulong pc; 117 118 if (tb_cflags(tb) & CF_PCREL) { 119 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 120 } else { 121 pc = data[0]; 122 } 123 124 if (xl == MXL_RV32) { 125 env->pc = (int32_t)pc; 126 } else { 127 env->pc = pc; 128 } 129 env->bins = data[1]; 130 } 131 132 static const TCGCPUOps riscv_tcg_ops = { 133 .initialize = riscv_translate_init, 134 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 135 .restore_state_to_opc = riscv_restore_state_to_opc, 136 137 #ifndef CONFIG_USER_ONLY 138 .tlb_fill = riscv_cpu_tlb_fill, 139 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 140 .do_interrupt = riscv_cpu_do_interrupt, 141 .do_transaction_failed = riscv_cpu_do_transaction_failed, 142 .do_unaligned_access = riscv_cpu_do_unaligned_access, 143 .debug_excp_handler = riscv_cpu_debug_excp_handler, 144 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 145 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 146 #endif /* !CONFIG_USER_ONLY */ 147 }; 148 149 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset) 150 { 151 const RISCVIsaExtData *edata; 152 153 for (edata = isa_edata_arr; edata && edata->name; edata++) { 154 if (edata->ext_enable_offset != ext_offset) { 155 continue; 156 } 157 158 return edata->min_version; 159 } 160 161 g_assert_not_reached(); 162 } 163 164 static const char *cpu_cfg_ext_get_name(uint32_t ext_offset) 165 { 166 const RISCVCPUMultiExtConfig *feat; 167 const RISCVIsaExtData *edata; 168 169 for (edata = isa_edata_arr; edata->name != NULL; edata++) { 170 if (edata->ext_enable_offset == ext_offset) { 171 return edata->name; 172 } 173 } 174 175 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { 176 if (feat->offset == ext_offset) { 177 return feat->name; 178 } 179 } 180 181 g_assert_not_reached(); 182 } 183 184 static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset) 185 { 186 const RISCVCPUMultiExtConfig *feat; 187 188 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { 189 if (feat->offset == ext_offset) { 190 return true; 191 } 192 } 193 194 return false; 195 } 196 197 static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset) 198 { 199 switch (feat_offset) { 200 case CPU_CFG_OFFSET(zic64b): 201 cpu->cfg.cbom_blocksize = 64; 202 cpu->cfg.cbop_blocksize = 64; 203 cpu->cfg.cboz_blocksize = 64; 204 break; 205 case CPU_CFG_OFFSET(svade): 206 cpu->cfg.ext_svadu = false; 207 break; 208 default: 209 g_assert_not_reached(); 210 } 211 } 212 213 static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env, 214 uint32_t ext_offset) 215 { 216 int ext_priv_ver; 217 218 if (env->priv_ver == PRIV_VERSION_LATEST) { 219 return; 220 } 221 222 if (cpu_cfg_offset_is_named_feat(ext_offset)) { 223 return; 224 } 225 226 ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset); 227 228 if (env->priv_ver < ext_priv_ver) { 229 /* 230 * Note: the 'priv_spec' command line option, if present, 231 * will take precedence over this priv_ver bump. 232 */ 233 env->priv_ver = ext_priv_ver; 234 } 235 } 236 237 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset, 238 bool value) 239 { 240 CPURISCVState *env = &cpu->env; 241 bool prev_val = isa_ext_is_enabled(cpu, ext_offset); 242 int min_version; 243 244 if (prev_val == value) { 245 return; 246 } 247 248 if (cpu_cfg_ext_is_user_set(ext_offset)) { 249 return; 250 } 251 252 if (value && env->priv_ver != PRIV_VERSION_LATEST) { 253 /* Do not enable it if priv_ver is older than min_version */ 254 min_version = cpu_cfg_ext_get_min_version(ext_offset); 255 if (env->priv_ver < min_version) { 256 return; 257 } 258 } 259 260 isa_ext_update_enabled(cpu, ext_offset, value); 261 } 262 263 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 264 { 265 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 266 error_setg(errp, "H extension requires priv spec 1.12.0"); 267 return; 268 } 269 } 270 271 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) 272 { 273 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 274 CPUClass *cc = CPU_CLASS(mcc); 275 CPURISCVState *env = &cpu->env; 276 277 /* Validate that MISA_MXL is set properly. */ 278 switch (env->misa_mxl_max) { 279 #ifdef TARGET_RISCV64 280 case MXL_RV64: 281 case MXL_RV128: 282 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 283 break; 284 #endif 285 case MXL_RV32: 286 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 287 break; 288 default: 289 g_assert_not_reached(); 290 } 291 292 if (env->misa_mxl_max != env->misa_mxl) { 293 error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); 294 return; 295 } 296 } 297 298 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) 299 { 300 CPURISCVState *env = &cpu->env; 301 int priv_version = -1; 302 303 if (cpu->cfg.priv_spec) { 304 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { 305 priv_version = PRIV_VERSION_1_12_0; 306 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { 307 priv_version = PRIV_VERSION_1_11_0; 308 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { 309 priv_version = PRIV_VERSION_1_10_0; 310 } else { 311 error_setg(errp, 312 "Unsupported privilege spec version '%s'", 313 cpu->cfg.priv_spec); 314 return; 315 } 316 317 env->priv_ver = priv_version; 318 } 319 } 320 321 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 322 Error **errp) 323 { 324 if (!is_power_of_2(cfg->vlen)) { 325 error_setg(errp, "Vector extension VLEN must be power of 2"); 326 return; 327 } 328 329 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { 330 error_setg(errp, 331 "Vector extension implementation only supports VLEN " 332 "in the range [128, %d]", RV_VLEN_MAX); 333 return; 334 } 335 336 if (!is_power_of_2(cfg->elen)) { 337 error_setg(errp, "Vector extension ELEN must be power of 2"); 338 return; 339 } 340 341 if (cfg->elen > 64 || cfg->elen < 8) { 342 error_setg(errp, 343 "Vector extension implementation only supports ELEN " 344 "in the range [8, 64]"); 345 return; 346 } 347 348 if (cfg->vext_spec) { 349 if (!g_strcmp0(cfg->vext_spec, "v1.0")) { 350 env->vext_ver = VEXT_VERSION_1_00_0; 351 } else { 352 error_setg(errp, "Unsupported vector spec version '%s'", 353 cfg->vext_spec); 354 return; 355 } 356 } else if (env->vext_ver == 0) { 357 qemu_log("vector version is not specified, " 358 "use the default value v1.0\n"); 359 360 env->vext_ver = VEXT_VERSION_1_00_0; 361 } 362 } 363 364 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 365 { 366 CPURISCVState *env = &cpu->env; 367 const RISCVIsaExtData *edata; 368 369 /* Force disable extensions if priv spec version does not match */ 370 for (edata = isa_edata_arr; edata && edata->name; edata++) { 371 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset) && 372 (env->priv_ver < edata->min_version)) { 373 /* 374 * These two extensions are always enabled as they were supported 375 * by QEMU before they were added as extensions in the ISA. 376 */ 377 if (!strcmp(edata->name, "zicntr") || 378 !strcmp(edata->name, "zihpm")) { 379 continue; 380 } 381 382 isa_ext_update_enabled(cpu, edata->ext_enable_offset, false); 383 #ifndef CONFIG_USER_ONLY 384 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 385 " because privilege spec version does not match", 386 edata->name, env->mhartid); 387 #else 388 warn_report("disabling %s extension because " 389 "privilege spec version does not match", 390 edata->name); 391 #endif 392 } 393 } 394 } 395 396 static void riscv_cpu_update_named_features(RISCVCPU *cpu) 397 { 398 cpu->cfg.zic64b = cpu->cfg.cbom_blocksize == 64 && 399 cpu->cfg.cbop_blocksize == 64 && 400 cpu->cfg.cboz_blocksize == 64; 401 402 cpu->cfg.svade = !cpu->cfg.ext_svadu; 403 } 404 405 static void riscv_cpu_validate_g(RISCVCPU *cpu) 406 { 407 const char *warn_msg = "RVG mandates disabled extension %s"; 408 uint32_t g_misa_bits[] = {RVI, RVM, RVA, RVF, RVD}; 409 bool send_warn = cpu_misa_ext_is_user_set(RVG); 410 411 for (int i = 0; i < ARRAY_SIZE(g_misa_bits); i++) { 412 uint32_t bit = g_misa_bits[i]; 413 414 if (riscv_has_ext(&cpu->env, bit)) { 415 continue; 416 } 417 418 if (!cpu_misa_ext_is_user_set(bit)) { 419 riscv_cpu_write_misa_bit(cpu, bit, true); 420 continue; 421 } 422 423 if (send_warn) { 424 warn_report(warn_msg, riscv_get_misa_ext_name(bit)); 425 } 426 } 427 428 if (!cpu->cfg.ext_zicsr) { 429 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr))) { 430 cpu->cfg.ext_zicsr = true; 431 } else if (send_warn) { 432 warn_report(warn_msg, "zicsr"); 433 } 434 } 435 436 if (!cpu->cfg.ext_zifencei) { 437 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei))) { 438 cpu->cfg.ext_zifencei = true; 439 } else if (send_warn) { 440 warn_report(warn_msg, "zifencei"); 441 } 442 } 443 } 444 445 /* 446 * Check consistency between chosen extensions while setting 447 * cpu->cfg accordingly. 448 */ 449 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 450 { 451 CPURISCVState *env = &cpu->env; 452 Error *local_err = NULL; 453 454 if (riscv_has_ext(env, RVG)) { 455 riscv_cpu_validate_g(cpu); 456 } 457 458 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 459 error_setg(errp, 460 "I and E extensions are incompatible"); 461 return; 462 } 463 464 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 465 error_setg(errp, 466 "Either I or E extension must be set"); 467 return; 468 } 469 470 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 471 error_setg(errp, 472 "Setting S extension without U extension is illegal"); 473 return; 474 } 475 476 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 477 error_setg(errp, 478 "H depends on an I base integer ISA with 32 x registers"); 479 return; 480 } 481 482 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 483 error_setg(errp, "H extension implicitly requires S-mode"); 484 return; 485 } 486 487 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) { 488 error_setg(errp, "F extension requires Zicsr"); 489 return; 490 } 491 492 if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) { 493 error_setg(errp, "Zacas extension requires A extension"); 494 return; 495 } 496 497 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 498 error_setg(errp, "Zawrs extension requires A extension"); 499 return; 500 } 501 502 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 503 error_setg(errp, "Zfa extension requires F extension"); 504 return; 505 } 506 507 if (cpu->cfg.ext_zfh) { 508 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zfhmin), true); 509 } 510 511 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 512 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 513 return; 514 } 515 516 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 517 error_setg(errp, "Zfbfmin extension depends on F extension"); 518 return; 519 } 520 521 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 522 error_setg(errp, "D extension requires F extension"); 523 return; 524 } 525 526 if (riscv_has_ext(env, RVV)) { 527 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 528 if (local_err != NULL) { 529 error_propagate(errp, local_err); 530 return; 531 } 532 533 /* The V vector extension depends on the Zve64d extension */ 534 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64d), true); 535 } 536 537 /* The Zve64d extension depends on the Zve64f extension */ 538 if (cpu->cfg.ext_zve64d) { 539 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64f), true); 540 } 541 542 /* The Zve64f extension depends on the Zve32f extension */ 543 if (cpu->cfg.ext_zve64f) { 544 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve32f), true); 545 } 546 547 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) { 548 error_setg(errp, "Zve64d/V extensions require D extension"); 549 return; 550 } 551 552 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) { 553 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 554 return; 555 } 556 557 if (cpu->cfg.ext_zvfh) { 558 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvfhmin), true); 559 } 560 561 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 562 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 563 return; 564 } 565 566 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 567 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 568 return; 569 } 570 571 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) { 572 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension"); 573 return; 574 } 575 576 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 577 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 578 return; 579 } 580 581 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 582 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 583 return; 584 } 585 586 /* Set the ISA extensions, checks should have happened above */ 587 if (cpu->cfg.ext_zhinx) { 588 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 589 } 590 591 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 592 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 593 return; 594 } 595 596 if (cpu->cfg.ext_zfinx) { 597 if (!cpu->cfg.ext_zicsr) { 598 error_setg(errp, "Zfinx extension requires Zicsr"); 599 return; 600 } 601 if (riscv_has_ext(env, RVF)) { 602 error_setg(errp, 603 "Zfinx cannot be supported together with F extension"); 604 return; 605 } 606 } 607 608 if (cpu->cfg.ext_zce) { 609 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 610 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true); 611 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); 612 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); 613 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 614 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 615 } 616 } 617 618 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */ 619 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 620 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 621 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 622 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 623 } 624 if (riscv_has_ext(env, RVD)) { 625 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true); 626 } 627 } 628 629 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 630 error_setg(errp, "Zcf extension is only relevant to RV32"); 631 return; 632 } 633 634 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 635 error_setg(errp, "Zcf extension requires F extension"); 636 return; 637 } 638 639 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 640 error_setg(errp, "Zcd extension requires D extension"); 641 return; 642 } 643 644 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 645 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 646 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 647 "extension"); 648 return; 649 } 650 651 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 652 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 653 "Zcd extension"); 654 return; 655 } 656 657 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) { 658 error_setg(errp, "Zcmt extension requires Zicsr extension"); 659 return; 660 } 661 662 /* 663 * Shorthand vector crypto extensions 664 */ 665 if (cpu->cfg.ext_zvknc) { 666 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkn), true); 667 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true); 668 } 669 670 if (cpu->cfg.ext_zvkng) { 671 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkn), true); 672 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkg), true); 673 } 674 675 if (cpu->cfg.ext_zvkn) { 676 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkned), true); 677 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvknhb), true); 678 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkb), true); 679 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkt), true); 680 } 681 682 if (cpu->cfg.ext_zvksc) { 683 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvks), true); 684 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true); 685 } 686 687 if (cpu->cfg.ext_zvksg) { 688 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvks), true); 689 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkg), true); 690 } 691 692 if (cpu->cfg.ext_zvks) { 693 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvksed), true); 694 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvksh), true); 695 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkb), true); 696 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkt), true); 697 } 698 699 if (cpu->cfg.ext_zvkt) { 700 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbb), true); 701 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true); 702 } 703 704 /* 705 * In principle Zve*x would also suffice here, were they supported 706 * in qemu 707 */ 708 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg || 709 cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || 710 cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) { 711 error_setg(errp, 712 "Vector crypto extensions require V or Zve* extensions"); 713 return; 714 } 715 716 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64f) { 717 error_setg( 718 errp, 719 "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions"); 720 return; 721 } 722 723 if (cpu->cfg.ext_zk) { 724 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkn), true); 725 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkr), true); 726 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkt), true); 727 } 728 729 if (cpu->cfg.ext_zkn) { 730 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true); 731 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true); 732 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true); 733 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkne), true); 734 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknd), true); 735 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknh), true); 736 } 737 738 if (cpu->cfg.ext_zks) { 739 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true); 740 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true); 741 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true); 742 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksed), true); 743 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksh), true); 744 } 745 746 if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) { 747 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) { 748 error_setg(errp, "zicntr requires zicsr"); 749 return; 750 } 751 cpu->cfg.ext_zicntr = false; 752 } 753 754 if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) { 755 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) { 756 error_setg(errp, "zihpm requires zicsr"); 757 return; 758 } 759 cpu->cfg.ext_zihpm = false; 760 } 761 762 if (!cpu->cfg.ext_zihpm) { 763 cpu->cfg.pmu_mask = 0; 764 cpu->pmu_avail_ctrs = 0; 765 } 766 767 /* 768 * Disable isa extensions based on priv spec after we 769 * validated and set everything we need. 770 */ 771 riscv_cpu_disable_priv_spec_isa_exts(cpu); 772 } 773 774 #ifndef CONFIG_USER_ONLY 775 static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu, 776 RISCVCPUProfile *profile, 777 bool send_warn) 778 { 779 int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 780 781 if (profile->satp_mode > satp_max) { 782 if (send_warn) { 783 bool is_32bit = riscv_cpu_is_32bit(cpu); 784 const char *req_satp = satp_mode_str(profile->satp_mode, is_32bit); 785 const char *cur_satp = satp_mode_str(satp_max, is_32bit); 786 787 warn_report("Profile %s requires satp mode %s, " 788 "but satp mode %s was set", profile->name, 789 req_satp, cur_satp); 790 } 791 792 return false; 793 } 794 795 return true; 796 } 797 #endif 798 799 static void riscv_cpu_validate_profile(RISCVCPU *cpu, 800 RISCVCPUProfile *profile) 801 { 802 CPURISCVState *env = &cpu->env; 803 const char *warn_msg = "Profile %s mandates disabled extension %s"; 804 bool send_warn = profile->user_set && profile->enabled; 805 bool parent_enabled, profile_impl = true; 806 int i; 807 808 #ifndef CONFIG_USER_ONLY 809 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { 810 profile_impl = riscv_cpu_validate_profile_satp(cpu, profile, 811 send_warn); 812 } 813 #endif 814 815 if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED && 816 profile->priv_spec != env->priv_ver) { 817 profile_impl = false; 818 819 if (send_warn) { 820 warn_report("Profile %s requires priv spec %s, " 821 "but priv ver %s was set", profile->name, 822 cpu_priv_ver_to_str(profile->priv_spec), 823 cpu_priv_ver_to_str(env->priv_ver)); 824 } 825 } 826 827 for (i = 0; misa_bits[i] != 0; i++) { 828 uint32_t bit = misa_bits[i]; 829 830 if (!(profile->misa_ext & bit)) { 831 continue; 832 } 833 834 if (!riscv_has_ext(&cpu->env, bit)) { 835 profile_impl = false; 836 837 if (send_warn) { 838 warn_report(warn_msg, profile->name, 839 riscv_get_misa_ext_name(bit)); 840 } 841 } 842 } 843 844 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { 845 int ext_offset = profile->ext_offsets[i]; 846 847 if (!isa_ext_is_enabled(cpu, ext_offset)) { 848 profile_impl = false; 849 850 if (send_warn) { 851 warn_report(warn_msg, profile->name, 852 cpu_cfg_ext_get_name(ext_offset)); 853 } 854 } 855 } 856 857 profile->enabled = profile_impl; 858 859 if (profile->parent != NULL) { 860 parent_enabled = object_property_get_bool(OBJECT(cpu), 861 profile->parent->name, 862 NULL); 863 profile->enabled = profile->enabled && parent_enabled; 864 } 865 } 866 867 static void riscv_cpu_validate_profiles(RISCVCPU *cpu) 868 { 869 for (int i = 0; riscv_profiles[i] != NULL; i++) { 870 riscv_cpu_validate_profile(cpu, riscv_profiles[i]); 871 } 872 } 873 874 void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 875 { 876 CPURISCVState *env = &cpu->env; 877 Error *local_err = NULL; 878 879 riscv_cpu_validate_priv_spec(cpu, &local_err); 880 if (local_err != NULL) { 881 error_propagate(errp, local_err); 882 return; 883 } 884 885 riscv_cpu_validate_misa_priv(env, &local_err); 886 if (local_err != NULL) { 887 error_propagate(errp, local_err); 888 return; 889 } 890 891 riscv_cpu_update_named_features(cpu); 892 riscv_cpu_validate_profiles(cpu); 893 894 if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) { 895 /* 896 * Enhanced PMP should only be available 897 * on harts with PMP support 898 */ 899 error_setg(errp, "Invalid configuration: Smepmp requires PMP support"); 900 return; 901 } 902 903 riscv_cpu_validate_set_extensions(cpu, &local_err); 904 if (local_err != NULL) { 905 error_propagate(errp, local_err); 906 return; 907 } 908 } 909 910 bool riscv_cpu_tcg_compatible(RISCVCPU *cpu) 911 { 912 return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL; 913 } 914 915 static bool riscv_cpu_is_generic(Object *cpu_obj) 916 { 917 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 918 } 919 920 static bool riscv_cpu_is_vendor(Object *cpu_obj) 921 { 922 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; 923 } 924 925 /* 926 * We'll get here via the following path: 927 * 928 * riscv_cpu_realize() 929 * -> cpu_exec_realizefn() 930 * -> tcg_cpu_realize() (via accel_cpu_common_realize()) 931 */ 932 static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp) 933 { 934 RISCVCPU *cpu = RISCV_CPU(cs); 935 Error *local_err = NULL; 936 937 if (!riscv_cpu_tcg_compatible(cpu)) { 938 g_autofree char *name = riscv_cpu_get_name(cpu); 939 error_setg(errp, "'%s' CPU is not compatible with TCG acceleration", 940 name); 941 return false; 942 } 943 944 riscv_cpu_validate_misa_mxl(cpu, &local_err); 945 if (local_err != NULL) { 946 error_propagate(errp, local_err); 947 return false; 948 } 949 950 #ifndef CONFIG_USER_ONLY 951 CPURISCVState *env = &cpu->env; 952 953 CPU(cs)->tcg_cflags |= CF_PCREL; 954 955 if (cpu->cfg.ext_sstc) { 956 riscv_timer_init(cpu); 957 } 958 959 if (cpu->cfg.pmu_mask) { 960 riscv_pmu_init(cpu, &local_err); 961 if (local_err != NULL) { 962 error_propagate(errp, local_err); 963 return false; 964 } 965 966 if (cpu->cfg.ext_sscofpmf) { 967 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 968 riscv_pmu_timer_cb, cpu); 969 } 970 } 971 972 /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */ 973 if (riscv_has_ext(env, RVH)) { 974 env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP; 975 } 976 #endif 977 978 return true; 979 } 980 981 typedef struct RISCVCPUMisaExtConfig { 982 target_ulong misa_bit; 983 bool enabled; 984 } RISCVCPUMisaExtConfig; 985 986 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 987 void *opaque, Error **errp) 988 { 989 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 990 target_ulong misa_bit = misa_ext_cfg->misa_bit; 991 RISCVCPU *cpu = RISCV_CPU(obj); 992 CPURISCVState *env = &cpu->env; 993 bool vendor_cpu = riscv_cpu_is_vendor(obj); 994 bool prev_val, value; 995 996 if (!visit_type_bool(v, name, &value, errp)) { 997 return; 998 } 999 1000 cpu_misa_ext_add_user_opt(misa_bit, value); 1001 1002 prev_val = env->misa_ext & misa_bit; 1003 1004 if (value == prev_val) { 1005 return; 1006 } 1007 1008 if (value) { 1009 if (vendor_cpu) { 1010 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1011 error_setg(errp, "'%s' CPU does not allow enabling extensions", 1012 cpuname); 1013 return; 1014 } 1015 1016 if (misa_bit == RVH && env->priv_ver < PRIV_VERSION_1_12_0) { 1017 /* 1018 * Note: the 'priv_spec' command line option, if present, 1019 * will take precedence over this priv_ver bump. 1020 */ 1021 env->priv_ver = PRIV_VERSION_1_12_0; 1022 } 1023 } 1024 1025 riscv_cpu_write_misa_bit(cpu, misa_bit, value); 1026 } 1027 1028 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1029 void *opaque, Error **errp) 1030 { 1031 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1032 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1033 RISCVCPU *cpu = RISCV_CPU(obj); 1034 CPURISCVState *env = &cpu->env; 1035 bool value; 1036 1037 value = env->misa_ext & misa_bit; 1038 1039 visit_type_bool(v, name, &value, errp); 1040 } 1041 1042 #define MISA_CFG(_bit, _enabled) \ 1043 {.misa_bit = _bit, .enabled = _enabled} 1044 1045 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1046 MISA_CFG(RVA, true), 1047 MISA_CFG(RVC, true), 1048 MISA_CFG(RVD, true), 1049 MISA_CFG(RVF, true), 1050 MISA_CFG(RVI, true), 1051 MISA_CFG(RVE, false), 1052 MISA_CFG(RVM, true), 1053 MISA_CFG(RVS, true), 1054 MISA_CFG(RVU, true), 1055 MISA_CFG(RVH, true), 1056 MISA_CFG(RVJ, false), 1057 MISA_CFG(RVV, false), 1058 MISA_CFG(RVG, false), 1059 MISA_CFG(RVB, false), 1060 }; 1061 1062 /* 1063 * We do not support user choice tracking for MISA 1064 * extensions yet because, so far, we do not silently 1065 * change MISA bits during realize() (RVG enables MISA 1066 * bits but the user is warned about it). 1067 */ 1068 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1069 { 1070 bool use_def_vals = riscv_cpu_is_generic(cpu_obj); 1071 int i; 1072 1073 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1074 const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1075 int bit = misa_cfg->misa_bit; 1076 const char *name = riscv_get_misa_ext_name(bit); 1077 const char *desc = riscv_get_misa_ext_description(bit); 1078 1079 /* Check if KVM already created the property */ 1080 if (object_property_find(cpu_obj, name)) { 1081 continue; 1082 } 1083 1084 object_property_add(cpu_obj, name, "bool", 1085 cpu_get_misa_ext_cfg, 1086 cpu_set_misa_ext_cfg, 1087 NULL, (void *)misa_cfg); 1088 object_property_set_description(cpu_obj, name, desc); 1089 if (use_def_vals) { 1090 riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj), bit, 1091 misa_cfg->enabled); 1092 } 1093 } 1094 } 1095 1096 static void cpu_set_profile(Object *obj, Visitor *v, const char *name, 1097 void *opaque, Error **errp) 1098 { 1099 RISCVCPUProfile *profile = opaque; 1100 RISCVCPU *cpu = RISCV_CPU(obj); 1101 bool value; 1102 int i, ext_offset; 1103 1104 if (riscv_cpu_is_vendor(obj)) { 1105 error_setg(errp, "Profile %s is not available for vendor CPUs", 1106 profile->name); 1107 return; 1108 } 1109 1110 if (cpu->env.misa_mxl != MXL_RV64) { 1111 error_setg(errp, "Profile %s only available for 64 bit CPUs", 1112 profile->name); 1113 return; 1114 } 1115 1116 if (!visit_type_bool(v, name, &value, errp)) { 1117 return; 1118 } 1119 1120 profile->user_set = true; 1121 profile->enabled = value; 1122 1123 if (profile->parent != NULL) { 1124 object_property_set_bool(obj, profile->parent->name, 1125 profile->enabled, NULL); 1126 } 1127 1128 if (profile->enabled) { 1129 cpu->env.priv_ver = profile->priv_spec; 1130 } 1131 1132 #ifndef CONFIG_USER_ONLY 1133 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { 1134 const char *satp_prop = satp_mode_str(profile->satp_mode, 1135 riscv_cpu_is_32bit(cpu)); 1136 object_property_set_bool(obj, satp_prop, profile->enabled, NULL); 1137 } 1138 #endif 1139 1140 for (i = 0; misa_bits[i] != 0; i++) { 1141 uint32_t bit = misa_bits[i]; 1142 1143 if (!(profile->misa_ext & bit)) { 1144 continue; 1145 } 1146 1147 if (bit == RVI && !profile->enabled) { 1148 /* 1149 * Disabling profiles will not disable the base 1150 * ISA RV64I. 1151 */ 1152 continue; 1153 } 1154 1155 cpu_misa_ext_add_user_opt(bit, profile->enabled); 1156 riscv_cpu_write_misa_bit(cpu, bit, profile->enabled); 1157 } 1158 1159 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { 1160 ext_offset = profile->ext_offsets[i]; 1161 1162 if (profile->enabled) { 1163 if (cpu_cfg_offset_is_named_feat(ext_offset)) { 1164 riscv_cpu_enable_named_feat(cpu, ext_offset); 1165 } 1166 1167 cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset); 1168 } 1169 1170 cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled); 1171 isa_ext_update_enabled(cpu, ext_offset, profile->enabled); 1172 } 1173 } 1174 1175 static void cpu_get_profile(Object *obj, Visitor *v, const char *name, 1176 void *opaque, Error **errp) 1177 { 1178 RISCVCPUProfile *profile = opaque; 1179 bool value = profile->enabled; 1180 1181 visit_type_bool(v, name, &value, errp); 1182 } 1183 1184 static void riscv_cpu_add_profiles(Object *cpu_obj) 1185 { 1186 for (int i = 0; riscv_profiles[i] != NULL; i++) { 1187 const RISCVCPUProfile *profile = riscv_profiles[i]; 1188 1189 object_property_add(cpu_obj, profile->name, "bool", 1190 cpu_get_profile, cpu_set_profile, 1191 NULL, (void *)profile); 1192 1193 /* 1194 * CPUs might enable a profile right from the start. 1195 * Enable its mandatory extensions right away in this 1196 * case. 1197 */ 1198 if (profile->enabled) { 1199 object_property_set_bool(cpu_obj, profile->name, true, NULL); 1200 } 1201 } 1202 } 1203 1204 static bool cpu_ext_is_deprecated(const char *ext_name) 1205 { 1206 return isupper(ext_name[0]); 1207 } 1208 1209 /* 1210 * String will be allocated in the heap. Caller is responsible 1211 * for freeing it. 1212 */ 1213 static char *cpu_ext_to_lower(const char *ext_name) 1214 { 1215 char *ret = g_malloc0(strlen(ext_name) + 1); 1216 1217 strcpy(ret, ext_name); 1218 ret[0] = tolower(ret[0]); 1219 1220 return ret; 1221 } 1222 1223 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 1224 void *opaque, Error **errp) 1225 { 1226 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 1227 RISCVCPU *cpu = RISCV_CPU(obj); 1228 bool vendor_cpu = riscv_cpu_is_vendor(obj); 1229 bool prev_val, value; 1230 1231 if (!visit_type_bool(v, name, &value, errp)) { 1232 return; 1233 } 1234 1235 if (cpu_ext_is_deprecated(multi_ext_cfg->name)) { 1236 g_autofree char *lower = cpu_ext_to_lower(multi_ext_cfg->name); 1237 1238 warn_report("CPU property '%s' is deprecated. Please use '%s' instead", 1239 multi_ext_cfg->name, lower); 1240 } 1241 1242 cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value); 1243 1244 prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset); 1245 1246 if (value == prev_val) { 1247 return; 1248 } 1249 1250 if (value && vendor_cpu) { 1251 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1252 error_setg(errp, "'%s' CPU does not allow enabling extensions", 1253 cpuname); 1254 return; 1255 } 1256 1257 if (value) { 1258 cpu_bump_multi_ext_priv_ver(&cpu->env, multi_ext_cfg->offset); 1259 } 1260 1261 isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value); 1262 } 1263 1264 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 1265 void *opaque, Error **errp) 1266 { 1267 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 1268 bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset); 1269 1270 visit_type_bool(v, name, &value, errp); 1271 } 1272 1273 static void cpu_add_multi_ext_prop(Object *cpu_obj, 1274 const RISCVCPUMultiExtConfig *multi_cfg) 1275 { 1276 bool generic_cpu = riscv_cpu_is_generic(cpu_obj); 1277 bool deprecated_ext = cpu_ext_is_deprecated(multi_cfg->name); 1278 1279 object_property_add(cpu_obj, multi_cfg->name, "bool", 1280 cpu_get_multi_ext_cfg, 1281 cpu_set_multi_ext_cfg, 1282 NULL, (void *)multi_cfg); 1283 1284 if (!generic_cpu || deprecated_ext) { 1285 return; 1286 } 1287 1288 /* 1289 * Set def val directly instead of using 1290 * object_property_set_bool() to save the set() 1291 * callback hash for user inputs. 1292 */ 1293 isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset, 1294 multi_cfg->enabled); 1295 } 1296 1297 static void riscv_cpu_add_multiext_prop_array(Object *obj, 1298 const RISCVCPUMultiExtConfig *array) 1299 { 1300 const RISCVCPUMultiExtConfig *prop; 1301 1302 g_assert(array); 1303 1304 for (prop = array; prop && prop->name; prop++) { 1305 cpu_add_multi_ext_prop(obj, prop); 1306 } 1307 } 1308 1309 /* 1310 * Add CPU properties with user-facing flags. 1311 * 1312 * This will overwrite existing env->misa_ext values with the 1313 * defaults set via riscv_cpu_add_misa_properties(). 1314 */ 1315 static void riscv_cpu_add_user_properties(Object *obj) 1316 { 1317 #ifndef CONFIG_USER_ONLY 1318 riscv_add_satp_mode_properties(obj); 1319 #endif 1320 1321 riscv_cpu_add_misa_properties(obj); 1322 1323 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions); 1324 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts); 1325 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts); 1326 1327 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts); 1328 1329 riscv_cpu_add_profiles(obj); 1330 1331 for (Property *prop = riscv_cpu_options; prop && prop->name; prop++) { 1332 qdev_property_add_static(DEVICE(obj), prop); 1333 } 1334 } 1335 1336 /* 1337 * The 'max' type CPU will have all possible ratified 1338 * non-vendor extensions enabled. 1339 */ 1340 static void riscv_init_max_cpu_extensions(Object *obj) 1341 { 1342 RISCVCPU *cpu = RISCV_CPU(obj); 1343 CPURISCVState *env = &cpu->env; 1344 const RISCVCPUMultiExtConfig *prop; 1345 1346 /* Enable RVG, RVJ and RVV that are disabled by default */ 1347 riscv_cpu_set_misa(env, env->misa_mxl, env->misa_ext | RVG | RVJ | RVV); 1348 1349 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1350 isa_ext_update_enabled(cpu, prop->offset, true); 1351 } 1352 1353 /* set vector version */ 1354 env->vext_ver = VEXT_VERSION_1_00_0; 1355 1356 /* Zfinx is not compatible with F. Disable it */ 1357 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false); 1358 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false); 1359 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false); 1360 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false); 1361 1362 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false); 1363 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false); 1364 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false); 1365 1366 if (env->misa_mxl != MXL_RV32) { 1367 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false); 1368 } 1369 } 1370 1371 static bool riscv_cpu_has_max_extensions(Object *cpu_obj) 1372 { 1373 return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL; 1374 } 1375 1376 static void riscv_tcg_cpu_instance_init(CPUState *cs) 1377 { 1378 RISCVCPU *cpu = RISCV_CPU(cs); 1379 Object *obj = OBJECT(cpu); 1380 1381 misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1382 multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1383 riscv_cpu_add_user_properties(obj); 1384 1385 if (riscv_cpu_has_max_extensions(obj)) { 1386 riscv_init_max_cpu_extensions(obj); 1387 } 1388 } 1389 1390 static void riscv_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) 1391 { 1392 /* 1393 * All cpus use the same set of operations. 1394 */ 1395 cc->tcg_ops = &riscv_tcg_ops; 1396 } 1397 1398 static void riscv_tcg_cpu_class_init(CPUClass *cc) 1399 { 1400 cc->init_accel_cpu = riscv_tcg_cpu_init_ops; 1401 } 1402 1403 static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, void *data) 1404 { 1405 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); 1406 1407 acc->cpu_class_init = riscv_tcg_cpu_class_init; 1408 acc->cpu_instance_init = riscv_tcg_cpu_instance_init; 1409 acc->cpu_target_realize = riscv_tcg_cpu_realize; 1410 } 1411 1412 static const TypeInfo riscv_tcg_cpu_accel_type_info = { 1413 .name = ACCEL_CPU_NAME("tcg"), 1414 1415 .parent = TYPE_ACCEL_CPU, 1416 .class_init = riscv_tcg_cpu_accel_class_init, 1417 .abstract = true, 1418 }; 1419 1420 static void riscv_tcg_cpu_accel_register_types(void) 1421 { 1422 type_register_static(&riscv_tcg_cpu_accel_type_info); 1423 } 1424 type_init(riscv_tcg_cpu_accel_register_types); 1425