1 /* 2 * riscv TCG cpu class initialization 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/exec-all.h" 22 #include "tcg-cpu.h" 23 #include "cpu.h" 24 #include "pmu.h" 25 #include "time_helper.h" 26 #include "qapi/error.h" 27 #include "qapi/visitor.h" 28 #include "qemu/accel.h" 29 #include "qemu/error-report.h" 30 #include "qemu/log.h" 31 #include "hw/core/accel-cpu.h" 32 #include "hw/core/tcg-cpu-ops.h" 33 #include "tcg/tcg.h" 34 35 /* Hash that stores user set extensions */ 36 static GHashTable *multi_ext_user_opts; 37 static GHashTable *misa_ext_user_opts; 38 39 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset) 40 { 41 return g_hash_table_contains(multi_ext_user_opts, 42 GUINT_TO_POINTER(ext_offset)); 43 } 44 45 static bool cpu_misa_ext_is_user_set(uint32_t misa_bit) 46 { 47 return g_hash_table_contains(misa_ext_user_opts, 48 GUINT_TO_POINTER(misa_bit)); 49 } 50 51 static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset, bool value) 52 { 53 g_hash_table_insert(multi_ext_user_opts, GUINT_TO_POINTER(ext_offset), 54 (gpointer)value); 55 } 56 57 static void cpu_misa_ext_add_user_opt(uint32_t bit, bool value) 58 { 59 g_hash_table_insert(misa_ext_user_opts, GUINT_TO_POINTER(bit), 60 (gpointer)value); 61 } 62 63 static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit, 64 bool enabled) 65 { 66 CPURISCVState *env = &cpu->env; 67 68 if (enabled) { 69 env->misa_ext |= bit; 70 env->misa_ext_mask |= bit; 71 } else { 72 env->misa_ext &= ~bit; 73 env->misa_ext_mask &= ~bit; 74 } 75 } 76 77 static const char *cpu_priv_ver_to_str(int priv_ver) 78 { 79 switch (priv_ver) { 80 case PRIV_VERSION_1_10_0: 81 return "v1.10.0"; 82 case PRIV_VERSION_1_11_0: 83 return "v1.11.0"; 84 case PRIV_VERSION_1_12_0: 85 return "v1.12.0"; 86 } 87 88 g_assert_not_reached(); 89 } 90 91 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 92 const TranslationBlock *tb) 93 { 94 if (!(tb_cflags(tb) & CF_PCREL)) { 95 RISCVCPU *cpu = RISCV_CPU(cs); 96 CPURISCVState *env = &cpu->env; 97 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 98 99 tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL)); 100 101 if (xl == MXL_RV32) { 102 env->pc = (int32_t) tb->pc; 103 } else { 104 env->pc = tb->pc; 105 } 106 } 107 } 108 109 static void riscv_restore_state_to_opc(CPUState *cs, 110 const TranslationBlock *tb, 111 const uint64_t *data) 112 { 113 RISCVCPU *cpu = RISCV_CPU(cs); 114 CPURISCVState *env = &cpu->env; 115 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 116 target_ulong pc; 117 118 if (tb_cflags(tb) & CF_PCREL) { 119 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 120 } else { 121 pc = data[0]; 122 } 123 124 if (xl == MXL_RV32) { 125 env->pc = (int32_t)pc; 126 } else { 127 env->pc = pc; 128 } 129 env->bins = data[1]; 130 } 131 132 static const TCGCPUOps riscv_tcg_ops = { 133 .initialize = riscv_translate_init, 134 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 135 .restore_state_to_opc = riscv_restore_state_to_opc, 136 137 #ifndef CONFIG_USER_ONLY 138 .tlb_fill = riscv_cpu_tlb_fill, 139 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 140 .do_interrupt = riscv_cpu_do_interrupt, 141 .do_transaction_failed = riscv_cpu_do_transaction_failed, 142 .do_unaligned_access = riscv_cpu_do_unaligned_access, 143 .debug_excp_handler = riscv_cpu_debug_excp_handler, 144 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 145 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 146 #endif /* !CONFIG_USER_ONLY */ 147 }; 148 149 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset) 150 { 151 const RISCVIsaExtData *edata; 152 153 for (edata = isa_edata_arr; edata && edata->name; edata++) { 154 if (edata->ext_enable_offset != ext_offset) { 155 continue; 156 } 157 158 return edata->min_version; 159 } 160 161 g_assert_not_reached(); 162 } 163 164 static const char *cpu_cfg_ext_get_name(uint32_t ext_offset) 165 { 166 const RISCVCPUMultiExtConfig *feat; 167 const RISCVIsaExtData *edata; 168 169 for (edata = isa_edata_arr; edata->name != NULL; edata++) { 170 if (edata->ext_enable_offset == ext_offset) { 171 return edata->name; 172 } 173 } 174 175 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { 176 if (feat->offset == ext_offset) { 177 return feat->name; 178 } 179 } 180 181 g_assert_not_reached(); 182 } 183 184 static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset) 185 { 186 const RISCVCPUMultiExtConfig *feat; 187 188 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { 189 if (feat->offset == ext_offset) { 190 return true; 191 } 192 } 193 194 return false; 195 } 196 197 static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset) 198 { 199 /* 200 * All other named features are already enabled 201 * in riscv_tcg_cpu_instance_init(). 202 */ 203 if (feat_offset == CPU_CFG_OFFSET(ext_zic64b)) { 204 cpu->cfg.cbom_blocksize = 64; 205 cpu->cfg.cbop_blocksize = 64; 206 cpu->cfg.cboz_blocksize = 64; 207 } 208 } 209 210 static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env, 211 uint32_t ext_offset) 212 { 213 int ext_priv_ver; 214 215 if (env->priv_ver == PRIV_VERSION_LATEST) { 216 return; 217 } 218 219 ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset); 220 221 if (env->priv_ver < ext_priv_ver) { 222 /* 223 * Note: the 'priv_spec' command line option, if present, 224 * will take precedence over this priv_ver bump. 225 */ 226 env->priv_ver = ext_priv_ver; 227 } 228 } 229 230 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset, 231 bool value) 232 { 233 CPURISCVState *env = &cpu->env; 234 bool prev_val = isa_ext_is_enabled(cpu, ext_offset); 235 int min_version; 236 237 if (prev_val == value) { 238 return; 239 } 240 241 if (cpu_cfg_ext_is_user_set(ext_offset)) { 242 return; 243 } 244 245 if (value && env->priv_ver != PRIV_VERSION_LATEST) { 246 /* Do not enable it if priv_ver is older than min_version */ 247 min_version = cpu_cfg_ext_get_min_version(ext_offset); 248 if (env->priv_ver < min_version) { 249 return; 250 } 251 } 252 253 isa_ext_update_enabled(cpu, ext_offset, value); 254 } 255 256 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 257 { 258 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 259 error_setg(errp, "H extension requires priv spec 1.12.0"); 260 return; 261 } 262 } 263 264 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 265 Error **errp) 266 { 267 uint32_t vlen = cfg->vlenb << 3; 268 269 if (vlen > RV_VLEN_MAX || vlen < 128) { 270 error_setg(errp, 271 "Vector extension implementation only supports VLEN " 272 "in the range [128, %d]", RV_VLEN_MAX); 273 return; 274 } 275 276 if (cfg->elen > 64 || cfg->elen < 8) { 277 error_setg(errp, 278 "Vector extension implementation only supports ELEN " 279 "in the range [8, 64]"); 280 return; 281 } 282 } 283 284 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 285 { 286 CPURISCVState *env = &cpu->env; 287 const RISCVIsaExtData *edata; 288 289 /* Force disable extensions if priv spec version does not match */ 290 for (edata = isa_edata_arr; edata && edata->name; edata++) { 291 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset) && 292 (env->priv_ver < edata->min_version)) { 293 /* 294 * These two extensions are always enabled as they were supported 295 * by QEMU before they were added as extensions in the ISA. 296 */ 297 if (!strcmp(edata->name, "zicntr") || 298 !strcmp(edata->name, "zihpm")) { 299 continue; 300 } 301 302 isa_ext_update_enabled(cpu, edata->ext_enable_offset, false); 303 #ifndef CONFIG_USER_ONLY 304 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 305 " because privilege spec version does not match", 306 edata->name, env->mhartid); 307 #else 308 warn_report("disabling %s extension because " 309 "privilege spec version does not match", 310 edata->name); 311 #endif 312 } 313 } 314 } 315 316 static void riscv_cpu_update_named_features(RISCVCPU *cpu) 317 { 318 if (cpu->env.priv_ver >= PRIV_VERSION_1_11_0) { 319 cpu->cfg.has_priv_1_11 = true; 320 } 321 322 if (cpu->env.priv_ver >= PRIV_VERSION_1_12_0) { 323 cpu->cfg.has_priv_1_12 = true; 324 } 325 326 /* zic64b is 1.12 or later */ 327 cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 && 328 cpu->cfg.cbop_blocksize == 64 && 329 cpu->cfg.cboz_blocksize == 64 && 330 cpu->cfg.has_priv_1_12; 331 } 332 333 static void riscv_cpu_validate_g(RISCVCPU *cpu) 334 { 335 const char *warn_msg = "RVG mandates disabled extension %s"; 336 uint32_t g_misa_bits[] = {RVI, RVM, RVA, RVF, RVD}; 337 bool send_warn = cpu_misa_ext_is_user_set(RVG); 338 339 for (int i = 0; i < ARRAY_SIZE(g_misa_bits); i++) { 340 uint32_t bit = g_misa_bits[i]; 341 342 if (riscv_has_ext(&cpu->env, bit)) { 343 continue; 344 } 345 346 if (!cpu_misa_ext_is_user_set(bit)) { 347 riscv_cpu_write_misa_bit(cpu, bit, true); 348 continue; 349 } 350 351 if (send_warn) { 352 warn_report(warn_msg, riscv_get_misa_ext_name(bit)); 353 } 354 } 355 356 if (!cpu->cfg.ext_zicsr) { 357 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr))) { 358 cpu->cfg.ext_zicsr = true; 359 } else if (send_warn) { 360 warn_report(warn_msg, "zicsr"); 361 } 362 } 363 364 if (!cpu->cfg.ext_zifencei) { 365 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei))) { 366 cpu->cfg.ext_zifencei = true; 367 } else if (send_warn) { 368 warn_report(warn_msg, "zifencei"); 369 } 370 } 371 } 372 373 static void riscv_cpu_validate_b(RISCVCPU *cpu) 374 { 375 const char *warn_msg = "RVB mandates disabled extension %s"; 376 377 if (!cpu->cfg.ext_zba) { 378 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zba))) { 379 cpu->cfg.ext_zba = true; 380 } else { 381 warn_report(warn_msg, "zba"); 382 } 383 } 384 385 if (!cpu->cfg.ext_zbb) { 386 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbb))) { 387 cpu->cfg.ext_zbb = true; 388 } else { 389 warn_report(warn_msg, "zbb"); 390 } 391 } 392 393 if (!cpu->cfg.ext_zbs) { 394 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbs))) { 395 cpu->cfg.ext_zbs = true; 396 } else { 397 warn_report(warn_msg, "zbs"); 398 } 399 } 400 } 401 402 /* 403 * Check consistency between chosen extensions while setting 404 * cpu->cfg accordingly. 405 */ 406 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 407 { 408 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 409 CPURISCVState *env = &cpu->env; 410 Error *local_err = NULL; 411 412 if (riscv_has_ext(env, RVG)) { 413 riscv_cpu_validate_g(cpu); 414 } 415 416 if (riscv_has_ext(env, RVB)) { 417 riscv_cpu_validate_b(cpu); 418 } 419 420 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 421 error_setg(errp, 422 "I and E extensions are incompatible"); 423 return; 424 } 425 426 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 427 error_setg(errp, 428 "Either I or E extension must be set"); 429 return; 430 } 431 432 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 433 error_setg(errp, 434 "Setting S extension without U extension is illegal"); 435 return; 436 } 437 438 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 439 error_setg(errp, 440 "H depends on an I base integer ISA with 32 x registers"); 441 return; 442 } 443 444 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 445 error_setg(errp, "H extension implicitly requires S-mode"); 446 return; 447 } 448 449 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) { 450 error_setg(errp, "F extension requires Zicsr"); 451 return; 452 } 453 454 if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) { 455 error_setg(errp, "Zacas extension requires A extension"); 456 return; 457 } 458 459 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 460 error_setg(errp, "Zawrs extension requires A extension"); 461 return; 462 } 463 464 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 465 error_setg(errp, "Zfa extension requires F extension"); 466 return; 467 } 468 469 if (cpu->cfg.ext_zfh) { 470 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zfhmin), true); 471 } 472 473 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 474 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 475 return; 476 } 477 478 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 479 error_setg(errp, "Zfbfmin extension depends on F extension"); 480 return; 481 } 482 483 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 484 error_setg(errp, "D extension requires F extension"); 485 return; 486 } 487 488 if (riscv_has_ext(env, RVV)) { 489 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 490 if (local_err != NULL) { 491 error_propagate(errp, local_err); 492 return; 493 } 494 495 /* The V vector extension depends on the Zve64d extension */ 496 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64d), true); 497 } 498 499 /* The Zve64d extension depends on the Zve64f extension */ 500 if (cpu->cfg.ext_zve64d) { 501 if (!riscv_has_ext(env, RVD)) { 502 error_setg(errp, "Zve64d/V extensions require D extension"); 503 return; 504 } 505 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64f), true); 506 } 507 508 /* The Zve64f extension depends on the Zve64x and Zve32f extensions */ 509 if (cpu->cfg.ext_zve64f) { 510 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64x), true); 511 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve32f), true); 512 } 513 514 /* The Zve64x extension depends on the Zve32x extension */ 515 if (cpu->cfg.ext_zve64x) { 516 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve32x), true); 517 } 518 519 /* The Zve32f extension depends on the Zve32x extension */ 520 if (cpu->cfg.ext_zve32f) { 521 if (!riscv_has_ext(env, RVF)) { 522 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 523 return; 524 } 525 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve32x), true); 526 } 527 528 if (cpu->cfg.ext_zvfh) { 529 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvfhmin), true); 530 } 531 532 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 533 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 534 return; 535 } 536 537 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 538 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 539 return; 540 } 541 542 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 543 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 544 return; 545 } 546 547 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 548 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 549 return; 550 } 551 552 /* Set the ISA extensions, checks should have happened above */ 553 if (cpu->cfg.ext_zhinx) { 554 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 555 } 556 557 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 558 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 559 return; 560 } 561 562 if (cpu->cfg.ext_zfinx) { 563 if (!cpu->cfg.ext_zicsr) { 564 error_setg(errp, "Zfinx extension requires Zicsr"); 565 return; 566 } 567 if (riscv_has_ext(env, RVF)) { 568 error_setg(errp, 569 "Zfinx cannot be supported together with F extension"); 570 return; 571 } 572 } 573 574 if (cpu->cfg.ext_zce) { 575 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 576 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true); 577 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); 578 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); 579 if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { 580 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 581 } 582 } 583 584 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */ 585 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 586 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 587 if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { 588 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 589 } 590 if (riscv_has_ext(env, RVD)) { 591 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true); 592 } 593 } 594 595 if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 596 error_setg(errp, "Zcf extension is only relevant to RV32"); 597 return; 598 } 599 600 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 601 error_setg(errp, "Zcf extension requires F extension"); 602 return; 603 } 604 605 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 606 error_setg(errp, "Zcd extension requires D extension"); 607 return; 608 } 609 610 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 611 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 612 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 613 "extension"); 614 return; 615 } 616 617 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 618 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 619 "Zcd extension"); 620 return; 621 } 622 623 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) { 624 error_setg(errp, "Zcmt extension requires Zicsr extension"); 625 return; 626 } 627 628 /* 629 * Shorthand vector crypto extensions 630 */ 631 if (cpu->cfg.ext_zvknc) { 632 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkn), true); 633 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true); 634 } 635 636 if (cpu->cfg.ext_zvkng) { 637 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkn), true); 638 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkg), true); 639 } 640 641 if (cpu->cfg.ext_zvkn) { 642 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkned), true); 643 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvknhb), true); 644 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkb), true); 645 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkt), true); 646 } 647 648 if (cpu->cfg.ext_zvksc) { 649 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvks), true); 650 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true); 651 } 652 653 if (cpu->cfg.ext_zvksg) { 654 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvks), true); 655 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkg), true); 656 } 657 658 if (cpu->cfg.ext_zvks) { 659 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvksed), true); 660 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvksh), true); 661 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkb), true); 662 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkt), true); 663 } 664 665 if (cpu->cfg.ext_zvkt) { 666 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbb), true); 667 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true); 668 } 669 670 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg || 671 cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || 672 cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32x) { 673 error_setg(errp, 674 "Vector crypto extensions require V or Zve* extensions"); 675 return; 676 } 677 678 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64x) { 679 error_setg( 680 errp, 681 "Zvbc and Zvknhb extensions require V or Zve64x extensions"); 682 return; 683 } 684 685 if (cpu->cfg.ext_zk) { 686 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkn), true); 687 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkr), true); 688 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkt), true); 689 } 690 691 if (cpu->cfg.ext_zkn) { 692 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true); 693 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true); 694 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true); 695 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkne), true); 696 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknd), true); 697 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknh), true); 698 } 699 700 if (cpu->cfg.ext_zks) { 701 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true); 702 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true); 703 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true); 704 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksed), true); 705 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksh), true); 706 } 707 708 if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) { 709 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) { 710 error_setg(errp, "zicntr requires zicsr"); 711 return; 712 } 713 cpu->cfg.ext_zicntr = false; 714 } 715 716 if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) { 717 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) { 718 error_setg(errp, "zihpm requires zicsr"); 719 return; 720 } 721 cpu->cfg.ext_zihpm = false; 722 } 723 724 if (!cpu->cfg.ext_zihpm) { 725 cpu->cfg.pmu_mask = 0; 726 cpu->pmu_avail_ctrs = 0; 727 } 728 729 /* 730 * Disable isa extensions based on priv spec after we 731 * validated and set everything we need. 732 */ 733 riscv_cpu_disable_priv_spec_isa_exts(cpu); 734 } 735 736 #ifndef CONFIG_USER_ONLY 737 static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu, 738 RISCVCPUProfile *profile, 739 bool send_warn) 740 { 741 int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 742 743 if (profile->satp_mode > satp_max) { 744 if (send_warn) { 745 bool is_32bit = riscv_cpu_is_32bit(cpu); 746 const char *req_satp = satp_mode_str(profile->satp_mode, is_32bit); 747 const char *cur_satp = satp_mode_str(satp_max, is_32bit); 748 749 warn_report("Profile %s requires satp mode %s, " 750 "but satp mode %s was set", profile->name, 751 req_satp, cur_satp); 752 } 753 754 return false; 755 } 756 757 return true; 758 } 759 #endif 760 761 static void riscv_cpu_validate_profile(RISCVCPU *cpu, 762 RISCVCPUProfile *profile) 763 { 764 CPURISCVState *env = &cpu->env; 765 const char *warn_msg = "Profile %s mandates disabled extension %s"; 766 bool send_warn = profile->user_set && profile->enabled; 767 bool parent_enabled, profile_impl = true; 768 int i; 769 770 #ifndef CONFIG_USER_ONLY 771 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { 772 profile_impl = riscv_cpu_validate_profile_satp(cpu, profile, 773 send_warn); 774 } 775 #endif 776 777 if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED && 778 profile->priv_spec != env->priv_ver) { 779 profile_impl = false; 780 781 if (send_warn) { 782 warn_report("Profile %s requires priv spec %s, " 783 "but priv ver %s was set", profile->name, 784 cpu_priv_ver_to_str(profile->priv_spec), 785 cpu_priv_ver_to_str(env->priv_ver)); 786 } 787 } 788 789 for (i = 0; misa_bits[i] != 0; i++) { 790 uint32_t bit = misa_bits[i]; 791 792 if (!(profile->misa_ext & bit)) { 793 continue; 794 } 795 796 if (!riscv_has_ext(&cpu->env, bit)) { 797 profile_impl = false; 798 799 if (send_warn) { 800 warn_report(warn_msg, profile->name, 801 riscv_get_misa_ext_name(bit)); 802 } 803 } 804 } 805 806 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { 807 int ext_offset = profile->ext_offsets[i]; 808 809 if (!isa_ext_is_enabled(cpu, ext_offset)) { 810 profile_impl = false; 811 812 if (send_warn) { 813 warn_report(warn_msg, profile->name, 814 cpu_cfg_ext_get_name(ext_offset)); 815 } 816 } 817 } 818 819 profile->enabled = profile_impl; 820 821 if (profile->parent != NULL) { 822 parent_enabled = object_property_get_bool(OBJECT(cpu), 823 profile->parent->name, 824 NULL); 825 profile->enabled = profile->enabled && parent_enabled; 826 } 827 } 828 829 static void riscv_cpu_validate_profiles(RISCVCPU *cpu) 830 { 831 for (int i = 0; riscv_profiles[i] != NULL; i++) { 832 riscv_cpu_validate_profile(cpu, riscv_profiles[i]); 833 } 834 } 835 836 void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 837 { 838 CPURISCVState *env = &cpu->env; 839 Error *local_err = NULL; 840 841 riscv_cpu_validate_misa_priv(env, &local_err); 842 if (local_err != NULL) { 843 error_propagate(errp, local_err); 844 return; 845 } 846 847 riscv_cpu_update_named_features(cpu); 848 riscv_cpu_validate_profiles(cpu); 849 850 if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) { 851 /* 852 * Enhanced PMP should only be available 853 * on harts with PMP support 854 */ 855 error_setg(errp, "Invalid configuration: Smepmp requires PMP support"); 856 return; 857 } 858 859 riscv_cpu_validate_set_extensions(cpu, &local_err); 860 if (local_err != NULL) { 861 error_propagate(errp, local_err); 862 return; 863 } 864 } 865 866 void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu) 867 { 868 GPtrArray *dynamic_decoders; 869 dynamic_decoders = g_ptr_array_sized_new(decoder_table_size); 870 for (size_t i = 0; i < decoder_table_size; ++i) { 871 if (decoder_table[i].guard_func && 872 decoder_table[i].guard_func(&cpu->cfg)) { 873 g_ptr_array_add(dynamic_decoders, 874 (gpointer)decoder_table[i].riscv_cpu_decode_fn); 875 } 876 } 877 878 cpu->decoders = dynamic_decoders; 879 } 880 881 bool riscv_cpu_tcg_compatible(RISCVCPU *cpu) 882 { 883 return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL; 884 } 885 886 static bool riscv_cpu_is_generic(Object *cpu_obj) 887 { 888 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 889 } 890 891 /* 892 * We'll get here via the following path: 893 * 894 * riscv_cpu_realize() 895 * -> cpu_exec_realizefn() 896 * -> tcg_cpu_realize() (via accel_cpu_common_realize()) 897 */ 898 static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp) 899 { 900 RISCVCPU *cpu = RISCV_CPU(cs); 901 902 if (!riscv_cpu_tcg_compatible(cpu)) { 903 g_autofree char *name = riscv_cpu_get_name(cpu); 904 error_setg(errp, "'%s' CPU is not compatible with TCG acceleration", 905 name); 906 return false; 907 } 908 909 #ifndef CONFIG_USER_ONLY 910 CPURISCVState *env = &cpu->env; 911 Error *local_err = NULL; 912 913 tcg_cflags_set(CPU(cs), CF_PCREL); 914 915 if (cpu->cfg.ext_sstc) { 916 riscv_timer_init(cpu); 917 } 918 919 if (cpu->cfg.pmu_mask) { 920 riscv_pmu_init(cpu, &local_err); 921 if (local_err != NULL) { 922 error_propagate(errp, local_err); 923 return false; 924 } 925 926 if (cpu->cfg.ext_sscofpmf) { 927 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 928 riscv_pmu_timer_cb, cpu); 929 } 930 } 931 932 /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */ 933 if (riscv_has_ext(env, RVH)) { 934 env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP; 935 } 936 #endif 937 938 return true; 939 } 940 941 typedef struct RISCVCPUMisaExtConfig { 942 target_ulong misa_bit; 943 bool enabled; 944 } RISCVCPUMisaExtConfig; 945 946 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 947 void *opaque, Error **errp) 948 { 949 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 950 target_ulong misa_bit = misa_ext_cfg->misa_bit; 951 RISCVCPU *cpu = RISCV_CPU(obj); 952 CPURISCVState *env = &cpu->env; 953 bool vendor_cpu = riscv_cpu_is_vendor(obj); 954 bool prev_val, value; 955 956 if (!visit_type_bool(v, name, &value, errp)) { 957 return; 958 } 959 960 cpu_misa_ext_add_user_opt(misa_bit, value); 961 962 prev_val = env->misa_ext & misa_bit; 963 964 if (value == prev_val) { 965 return; 966 } 967 968 if (value) { 969 if (vendor_cpu) { 970 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 971 error_setg(errp, "'%s' CPU does not allow enabling extensions", 972 cpuname); 973 return; 974 } 975 976 if (misa_bit == RVH && env->priv_ver < PRIV_VERSION_1_12_0) { 977 /* 978 * Note: the 'priv_spec' command line option, if present, 979 * will take precedence over this priv_ver bump. 980 */ 981 env->priv_ver = PRIV_VERSION_1_12_0; 982 } 983 } 984 985 riscv_cpu_write_misa_bit(cpu, misa_bit, value); 986 } 987 988 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 989 void *opaque, Error **errp) 990 { 991 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 992 target_ulong misa_bit = misa_ext_cfg->misa_bit; 993 RISCVCPU *cpu = RISCV_CPU(obj); 994 CPURISCVState *env = &cpu->env; 995 bool value; 996 997 value = env->misa_ext & misa_bit; 998 999 visit_type_bool(v, name, &value, errp); 1000 } 1001 1002 #define MISA_CFG(_bit, _enabled) \ 1003 {.misa_bit = _bit, .enabled = _enabled} 1004 1005 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1006 MISA_CFG(RVA, true), 1007 MISA_CFG(RVC, true), 1008 MISA_CFG(RVD, true), 1009 MISA_CFG(RVF, true), 1010 MISA_CFG(RVI, true), 1011 MISA_CFG(RVE, false), 1012 MISA_CFG(RVM, true), 1013 MISA_CFG(RVS, true), 1014 MISA_CFG(RVU, true), 1015 MISA_CFG(RVH, true), 1016 MISA_CFG(RVJ, false), 1017 MISA_CFG(RVV, false), 1018 MISA_CFG(RVG, false), 1019 MISA_CFG(RVB, false), 1020 }; 1021 1022 /* 1023 * We do not support user choice tracking for MISA 1024 * extensions yet because, so far, we do not silently 1025 * change MISA bits during realize() (RVG enables MISA 1026 * bits but the user is warned about it). 1027 */ 1028 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1029 { 1030 bool use_def_vals = riscv_cpu_is_generic(cpu_obj); 1031 int i; 1032 1033 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1034 const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1035 int bit = misa_cfg->misa_bit; 1036 const char *name = riscv_get_misa_ext_name(bit); 1037 const char *desc = riscv_get_misa_ext_description(bit); 1038 1039 /* Check if KVM already created the property */ 1040 if (object_property_find(cpu_obj, name)) { 1041 continue; 1042 } 1043 1044 object_property_add(cpu_obj, name, "bool", 1045 cpu_get_misa_ext_cfg, 1046 cpu_set_misa_ext_cfg, 1047 NULL, (void *)misa_cfg); 1048 object_property_set_description(cpu_obj, name, desc); 1049 if (use_def_vals) { 1050 riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj), bit, 1051 misa_cfg->enabled); 1052 } 1053 } 1054 } 1055 1056 static void cpu_set_profile(Object *obj, Visitor *v, const char *name, 1057 void *opaque, Error **errp) 1058 { 1059 RISCVCPUProfile *profile = opaque; 1060 RISCVCPU *cpu = RISCV_CPU(obj); 1061 bool value; 1062 int i, ext_offset; 1063 1064 if (riscv_cpu_is_vendor(obj)) { 1065 error_setg(errp, "Profile %s is not available for vendor CPUs", 1066 profile->name); 1067 return; 1068 } 1069 1070 if (cpu->env.misa_mxl != MXL_RV64) { 1071 error_setg(errp, "Profile %s only available for 64 bit CPUs", 1072 profile->name); 1073 return; 1074 } 1075 1076 if (!visit_type_bool(v, name, &value, errp)) { 1077 return; 1078 } 1079 1080 profile->user_set = true; 1081 profile->enabled = value; 1082 1083 if (profile->parent != NULL) { 1084 object_property_set_bool(obj, profile->parent->name, 1085 profile->enabled, NULL); 1086 } 1087 1088 if (profile->enabled) { 1089 cpu->env.priv_ver = profile->priv_spec; 1090 } 1091 1092 #ifndef CONFIG_USER_ONLY 1093 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { 1094 object_property_set_bool(obj, "mmu", true, NULL); 1095 const char *satp_prop = satp_mode_str(profile->satp_mode, 1096 riscv_cpu_is_32bit(cpu)); 1097 object_property_set_bool(obj, satp_prop, profile->enabled, NULL); 1098 } 1099 #endif 1100 1101 for (i = 0; misa_bits[i] != 0; i++) { 1102 uint32_t bit = misa_bits[i]; 1103 1104 if (!(profile->misa_ext & bit)) { 1105 continue; 1106 } 1107 1108 if (bit == RVI && !profile->enabled) { 1109 /* 1110 * Disabling profiles will not disable the base 1111 * ISA RV64I. 1112 */ 1113 continue; 1114 } 1115 1116 cpu_misa_ext_add_user_opt(bit, profile->enabled); 1117 riscv_cpu_write_misa_bit(cpu, bit, profile->enabled); 1118 } 1119 1120 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { 1121 ext_offset = profile->ext_offsets[i]; 1122 1123 if (profile->enabled) { 1124 if (cpu_cfg_offset_is_named_feat(ext_offset)) { 1125 riscv_cpu_enable_named_feat(cpu, ext_offset); 1126 } 1127 1128 cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset); 1129 } 1130 1131 cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled); 1132 isa_ext_update_enabled(cpu, ext_offset, profile->enabled); 1133 } 1134 } 1135 1136 static void cpu_get_profile(Object *obj, Visitor *v, const char *name, 1137 void *opaque, Error **errp) 1138 { 1139 RISCVCPUProfile *profile = opaque; 1140 bool value = profile->enabled; 1141 1142 visit_type_bool(v, name, &value, errp); 1143 } 1144 1145 static void riscv_cpu_add_profiles(Object *cpu_obj) 1146 { 1147 for (int i = 0; riscv_profiles[i] != NULL; i++) { 1148 const RISCVCPUProfile *profile = riscv_profiles[i]; 1149 1150 object_property_add(cpu_obj, profile->name, "bool", 1151 cpu_get_profile, cpu_set_profile, 1152 NULL, (void *)profile); 1153 1154 /* 1155 * CPUs might enable a profile right from the start. 1156 * Enable its mandatory extensions right away in this 1157 * case. 1158 */ 1159 if (profile->enabled) { 1160 object_property_set_bool(cpu_obj, profile->name, true, NULL); 1161 } 1162 } 1163 } 1164 1165 static bool cpu_ext_is_deprecated(const char *ext_name) 1166 { 1167 return isupper(ext_name[0]); 1168 } 1169 1170 /* 1171 * String will be allocated in the heap. Caller is responsible 1172 * for freeing it. 1173 */ 1174 static char *cpu_ext_to_lower(const char *ext_name) 1175 { 1176 char *ret = g_malloc0(strlen(ext_name) + 1); 1177 1178 strcpy(ret, ext_name); 1179 ret[0] = tolower(ret[0]); 1180 1181 return ret; 1182 } 1183 1184 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 1185 void *opaque, Error **errp) 1186 { 1187 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 1188 RISCVCPU *cpu = RISCV_CPU(obj); 1189 bool vendor_cpu = riscv_cpu_is_vendor(obj); 1190 bool prev_val, value; 1191 1192 if (!visit_type_bool(v, name, &value, errp)) { 1193 return; 1194 } 1195 1196 if (cpu_ext_is_deprecated(multi_ext_cfg->name)) { 1197 g_autofree char *lower = cpu_ext_to_lower(multi_ext_cfg->name); 1198 1199 warn_report("CPU property '%s' is deprecated. Please use '%s' instead", 1200 multi_ext_cfg->name, lower); 1201 } 1202 1203 cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value); 1204 1205 prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset); 1206 1207 if (value == prev_val) { 1208 return; 1209 } 1210 1211 if (value && vendor_cpu) { 1212 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 1213 error_setg(errp, "'%s' CPU does not allow enabling extensions", 1214 cpuname); 1215 return; 1216 } 1217 1218 if (value) { 1219 cpu_bump_multi_ext_priv_ver(&cpu->env, multi_ext_cfg->offset); 1220 } 1221 1222 isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value); 1223 } 1224 1225 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 1226 void *opaque, Error **errp) 1227 { 1228 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 1229 bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset); 1230 1231 visit_type_bool(v, name, &value, errp); 1232 } 1233 1234 static void cpu_add_multi_ext_prop(Object *cpu_obj, 1235 const RISCVCPUMultiExtConfig *multi_cfg) 1236 { 1237 bool generic_cpu = riscv_cpu_is_generic(cpu_obj); 1238 bool deprecated_ext = cpu_ext_is_deprecated(multi_cfg->name); 1239 1240 object_property_add(cpu_obj, multi_cfg->name, "bool", 1241 cpu_get_multi_ext_cfg, 1242 cpu_set_multi_ext_cfg, 1243 NULL, (void *)multi_cfg); 1244 1245 if (!generic_cpu || deprecated_ext) { 1246 return; 1247 } 1248 1249 /* 1250 * Set def val directly instead of using 1251 * object_property_set_bool() to save the set() 1252 * callback hash for user inputs. 1253 */ 1254 isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset, 1255 multi_cfg->enabled); 1256 } 1257 1258 static void riscv_cpu_add_multiext_prop_array(Object *obj, 1259 const RISCVCPUMultiExtConfig *array) 1260 { 1261 const RISCVCPUMultiExtConfig *prop; 1262 1263 g_assert(array); 1264 1265 for (prop = array; prop && prop->name; prop++) { 1266 cpu_add_multi_ext_prop(obj, prop); 1267 } 1268 } 1269 1270 /* 1271 * Add CPU properties with user-facing flags. 1272 * 1273 * This will overwrite existing env->misa_ext values with the 1274 * defaults set via riscv_cpu_add_misa_properties(). 1275 */ 1276 static void riscv_cpu_add_user_properties(Object *obj) 1277 { 1278 #ifndef CONFIG_USER_ONLY 1279 riscv_add_satp_mode_properties(obj); 1280 #endif 1281 1282 riscv_cpu_add_misa_properties(obj); 1283 1284 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions); 1285 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts); 1286 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts); 1287 1288 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts); 1289 1290 riscv_cpu_add_profiles(obj); 1291 } 1292 1293 /* 1294 * The 'max' type CPU will have all possible ratified 1295 * non-vendor extensions enabled. 1296 */ 1297 static void riscv_init_max_cpu_extensions(Object *obj) 1298 { 1299 RISCVCPU *cpu = RISCV_CPU(obj); 1300 CPURISCVState *env = &cpu->env; 1301 const RISCVCPUMultiExtConfig *prop; 1302 1303 /* Enable RVG, RVJ and RVV that are disabled by default */ 1304 riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVJ | RVV); 1305 1306 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1307 isa_ext_update_enabled(cpu, prop->offset, true); 1308 } 1309 1310 /* 1311 * Some extensions can't be added without backward compatibilty concerns. 1312 * Disable those, the user can still opt in to them on the command line. 1313 */ 1314 cpu->cfg.ext_svade = false; 1315 1316 /* set vector version */ 1317 env->vext_ver = VEXT_VERSION_1_00_0; 1318 1319 /* Zfinx is not compatible with F. Disable it */ 1320 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false); 1321 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false); 1322 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false); 1323 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false); 1324 1325 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false); 1326 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false); 1327 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false); 1328 1329 if (env->misa_mxl != MXL_RV32) { 1330 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false); 1331 } 1332 } 1333 1334 static bool riscv_cpu_has_max_extensions(Object *cpu_obj) 1335 { 1336 return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL; 1337 } 1338 1339 static void riscv_tcg_cpu_instance_init(CPUState *cs) 1340 { 1341 RISCVCPU *cpu = RISCV_CPU(cs); 1342 Object *obj = OBJECT(cpu); 1343 1344 misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1345 multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1346 riscv_cpu_add_user_properties(obj); 1347 1348 if (riscv_cpu_has_max_extensions(obj)) { 1349 riscv_init_max_cpu_extensions(obj); 1350 } 1351 } 1352 1353 static void riscv_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) 1354 { 1355 /* 1356 * All cpus use the same set of operations. 1357 */ 1358 cc->tcg_ops = &riscv_tcg_ops; 1359 } 1360 1361 static void riscv_tcg_cpu_class_init(CPUClass *cc) 1362 { 1363 cc->init_accel_cpu = riscv_tcg_cpu_init_ops; 1364 } 1365 1366 static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, void *data) 1367 { 1368 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); 1369 1370 acc->cpu_class_init = riscv_tcg_cpu_class_init; 1371 acc->cpu_instance_init = riscv_tcg_cpu_instance_init; 1372 acc->cpu_target_realize = riscv_tcg_cpu_realize; 1373 } 1374 1375 static const TypeInfo riscv_tcg_cpu_accel_type_info = { 1376 .name = ACCEL_CPU_NAME("tcg"), 1377 1378 .parent = TYPE_ACCEL_CPU, 1379 .class_init = riscv_tcg_cpu_accel_class_init, 1380 .abstract = true, 1381 }; 1382 1383 static void riscv_tcg_cpu_accel_register_types(void) 1384 { 1385 type_register_static(&riscv_tcg_cpu_accel_type_info); 1386 } 1387 type_init(riscv_tcg_cpu_accel_register_types); 1388