1 /* 2 * riscv TCG cpu class initialization 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/exec-all.h" 22 #include "tcg-cpu.h" 23 #include "cpu.h" 24 #include "pmu.h" 25 #include "time_helper.h" 26 #include "qapi/error.h" 27 #include "qapi/visitor.h" 28 #include "qemu/accel.h" 29 #include "qemu/error-report.h" 30 #include "qemu/log.h" 31 #include "hw/core/accel-cpu.h" 32 #include "hw/core/tcg-cpu-ops.h" 33 #include "tcg/tcg.h" 34 35 /* Hash that stores user set extensions */ 36 static GHashTable *multi_ext_user_opts; 37 38 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset) 39 { 40 return g_hash_table_contains(multi_ext_user_opts, 41 GUINT_TO_POINTER(ext_offset)); 42 } 43 44 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 45 const TranslationBlock *tb) 46 { 47 if (!(tb_cflags(tb) & CF_PCREL)) { 48 RISCVCPU *cpu = RISCV_CPU(cs); 49 CPURISCVState *env = &cpu->env; 50 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 51 52 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 53 54 if (xl == MXL_RV32) { 55 env->pc = (int32_t) tb->pc; 56 } else { 57 env->pc = tb->pc; 58 } 59 } 60 } 61 62 static void riscv_restore_state_to_opc(CPUState *cs, 63 const TranslationBlock *tb, 64 const uint64_t *data) 65 { 66 RISCVCPU *cpu = RISCV_CPU(cs); 67 CPURISCVState *env = &cpu->env; 68 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 69 target_ulong pc; 70 71 if (tb_cflags(tb) & CF_PCREL) { 72 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 73 } else { 74 pc = data[0]; 75 } 76 77 if (xl == MXL_RV32) { 78 env->pc = (int32_t)pc; 79 } else { 80 env->pc = pc; 81 } 82 env->bins = data[1]; 83 } 84 85 static const struct TCGCPUOps riscv_tcg_ops = { 86 .initialize = riscv_translate_init, 87 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 88 .restore_state_to_opc = riscv_restore_state_to_opc, 89 90 #ifndef CONFIG_USER_ONLY 91 .tlb_fill = riscv_cpu_tlb_fill, 92 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 93 .do_interrupt = riscv_cpu_do_interrupt, 94 .do_transaction_failed = riscv_cpu_do_transaction_failed, 95 .do_unaligned_access = riscv_cpu_do_unaligned_access, 96 .debug_excp_handler = riscv_cpu_debug_excp_handler, 97 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 98 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 99 #endif /* !CONFIG_USER_ONLY */ 100 }; 101 102 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset) 103 { 104 const RISCVIsaExtData *edata; 105 106 for (edata = isa_edata_arr; edata && edata->name; edata++) { 107 if (edata->ext_enable_offset != ext_offset) { 108 continue; 109 } 110 111 return edata->min_version; 112 } 113 114 g_assert_not_reached(); 115 } 116 117 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset, 118 bool value) 119 { 120 CPURISCVState *env = &cpu->env; 121 bool prev_val = isa_ext_is_enabled(cpu, ext_offset); 122 int min_version; 123 124 if (prev_val == value) { 125 return; 126 } 127 128 if (cpu_cfg_ext_is_user_set(ext_offset)) { 129 return; 130 } 131 132 if (value && env->priv_ver != PRIV_VERSION_LATEST) { 133 /* Do not enable it if priv_ver is older than min_version */ 134 min_version = cpu_cfg_ext_get_min_version(ext_offset); 135 if (env->priv_ver < min_version) { 136 return; 137 } 138 } 139 140 isa_ext_update_enabled(cpu, ext_offset, value); 141 } 142 143 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 144 { 145 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 146 error_setg(errp, "H extension requires priv spec 1.12.0"); 147 return; 148 } 149 } 150 151 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) 152 { 153 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 154 CPUClass *cc = CPU_CLASS(mcc); 155 CPURISCVState *env = &cpu->env; 156 157 /* Validate that MISA_MXL is set properly. */ 158 switch (env->misa_mxl_max) { 159 #ifdef TARGET_RISCV64 160 case MXL_RV64: 161 case MXL_RV128: 162 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 163 break; 164 #endif 165 case MXL_RV32: 166 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 167 break; 168 default: 169 g_assert_not_reached(); 170 } 171 172 if (env->misa_mxl_max != env->misa_mxl) { 173 error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); 174 return; 175 } 176 } 177 178 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) 179 { 180 CPURISCVState *env = &cpu->env; 181 int priv_version = -1; 182 183 if (cpu->cfg.priv_spec) { 184 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { 185 priv_version = PRIV_VERSION_1_12_0; 186 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { 187 priv_version = PRIV_VERSION_1_11_0; 188 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { 189 priv_version = PRIV_VERSION_1_10_0; 190 } else { 191 error_setg(errp, 192 "Unsupported privilege spec version '%s'", 193 cpu->cfg.priv_spec); 194 return; 195 } 196 197 env->priv_ver = priv_version; 198 } 199 } 200 201 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 202 Error **errp) 203 { 204 if (!is_power_of_2(cfg->vlen)) { 205 error_setg(errp, "Vector extension VLEN must be power of 2"); 206 return; 207 } 208 209 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { 210 error_setg(errp, 211 "Vector extension implementation only supports VLEN " 212 "in the range [128, %d]", RV_VLEN_MAX); 213 return; 214 } 215 216 if (!is_power_of_2(cfg->elen)) { 217 error_setg(errp, "Vector extension ELEN must be power of 2"); 218 return; 219 } 220 221 if (cfg->elen > 64 || cfg->elen < 8) { 222 error_setg(errp, 223 "Vector extension implementation only supports ELEN " 224 "in the range [8, 64]"); 225 return; 226 } 227 228 if (cfg->vext_spec) { 229 if (!g_strcmp0(cfg->vext_spec, "v1.0")) { 230 env->vext_ver = VEXT_VERSION_1_00_0; 231 } else { 232 error_setg(errp, "Unsupported vector spec version '%s'", 233 cfg->vext_spec); 234 return; 235 } 236 } else if (env->vext_ver == 0) { 237 qemu_log("vector version is not specified, " 238 "use the default value v1.0\n"); 239 240 env->vext_ver = VEXT_VERSION_1_00_0; 241 } 242 } 243 244 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 245 { 246 CPURISCVState *env = &cpu->env; 247 const RISCVIsaExtData *edata; 248 249 /* Force disable extensions if priv spec version does not match */ 250 for (edata = isa_edata_arr; edata && edata->name; edata++) { 251 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset) && 252 (env->priv_ver < edata->min_version)) { 253 isa_ext_update_enabled(cpu, edata->ext_enable_offset, false); 254 #ifndef CONFIG_USER_ONLY 255 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 256 " because privilege spec version does not match", 257 edata->name, env->mhartid); 258 #else 259 warn_report("disabling %s extension because " 260 "privilege spec version does not match", 261 edata->name); 262 #endif 263 } 264 } 265 } 266 267 /* 268 * Check consistency between chosen extensions while setting 269 * cpu->cfg accordingly. 270 */ 271 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 272 { 273 CPURISCVState *env = &cpu->env; 274 Error *local_err = NULL; 275 276 /* Do some ISA extension error checking */ 277 if (riscv_has_ext(env, RVG) && 278 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && 279 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && 280 riscv_has_ext(env, RVD) && 281 cpu->cfg.ext_zicsr && cpu->cfg.ext_zifencei)) { 282 283 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr)) && 284 !cpu->cfg.ext_zicsr) { 285 error_setg(errp, "RVG requires Zicsr but user set Zicsr to false"); 286 return; 287 } 288 289 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei)) && 290 !cpu->cfg.ext_zifencei) { 291 error_setg(errp, "RVG requires Zifencei but user set " 292 "Zifencei to false"); 293 return; 294 } 295 296 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zicsr), true); 297 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zifencei), true); 298 299 env->misa_ext |= RVI | RVM | RVA | RVF | RVD; 300 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD; 301 } 302 303 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 304 error_setg(errp, 305 "I and E extensions are incompatible"); 306 return; 307 } 308 309 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 310 error_setg(errp, 311 "Either I or E extension must be set"); 312 return; 313 } 314 315 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 316 error_setg(errp, 317 "Setting S extension without U extension is illegal"); 318 return; 319 } 320 321 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 322 error_setg(errp, 323 "H depends on an I base integer ISA with 32 x registers"); 324 return; 325 } 326 327 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 328 error_setg(errp, "H extension implicitly requires S-mode"); 329 return; 330 } 331 332 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) { 333 error_setg(errp, "F extension requires Zicsr"); 334 return; 335 } 336 337 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 338 error_setg(errp, "Zawrs extension requires A extension"); 339 return; 340 } 341 342 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 343 error_setg(errp, "Zfa extension requires F extension"); 344 return; 345 } 346 347 if (cpu->cfg.ext_zfh) { 348 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zfhmin), true); 349 } 350 351 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 352 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 353 return; 354 } 355 356 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 357 error_setg(errp, "Zfbfmin extension depends on F extension"); 358 return; 359 } 360 361 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 362 error_setg(errp, "D extension requires F extension"); 363 return; 364 } 365 366 if (riscv_has_ext(env, RVV)) { 367 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 368 if (local_err != NULL) { 369 error_propagate(errp, local_err); 370 return; 371 } 372 373 /* The V vector extension depends on the Zve64d extension */ 374 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64d), true); 375 } 376 377 /* The Zve64d extension depends on the Zve64f extension */ 378 if (cpu->cfg.ext_zve64d) { 379 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64f), true); 380 } 381 382 /* The Zve64f extension depends on the Zve32f extension */ 383 if (cpu->cfg.ext_zve64f) { 384 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve32f), true); 385 } 386 387 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) { 388 error_setg(errp, "Zve64d/V extensions require D extension"); 389 return; 390 } 391 392 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) { 393 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 394 return; 395 } 396 397 if (cpu->cfg.ext_zvfh) { 398 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvfhmin), true); 399 } 400 401 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 402 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 403 return; 404 } 405 406 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 407 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 408 return; 409 } 410 411 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) { 412 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension"); 413 return; 414 } 415 416 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 417 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 418 return; 419 } 420 421 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 422 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 423 return; 424 } 425 426 /* Set the ISA extensions, checks should have happened above */ 427 if (cpu->cfg.ext_zhinx) { 428 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 429 } 430 431 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 432 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 433 return; 434 } 435 436 if (cpu->cfg.ext_zfinx) { 437 if (!cpu->cfg.ext_zicsr) { 438 error_setg(errp, "Zfinx extension requires Zicsr"); 439 return; 440 } 441 if (riscv_has_ext(env, RVF)) { 442 error_setg(errp, 443 "Zfinx cannot be supported together with F extension"); 444 return; 445 } 446 } 447 448 if (cpu->cfg.ext_zce) { 449 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 450 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true); 451 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); 452 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); 453 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 454 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 455 } 456 } 457 458 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */ 459 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 460 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 461 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 462 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 463 } 464 if (riscv_has_ext(env, RVD)) { 465 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true); 466 } 467 } 468 469 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 470 error_setg(errp, "Zcf extension is only relevant to RV32"); 471 return; 472 } 473 474 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 475 error_setg(errp, "Zcf extension requires F extension"); 476 return; 477 } 478 479 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 480 error_setg(errp, "Zcd extension requires D extension"); 481 return; 482 } 483 484 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 485 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 486 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 487 "extension"); 488 return; 489 } 490 491 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 492 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 493 "Zcd extension"); 494 return; 495 } 496 497 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) { 498 error_setg(errp, "Zcmt extension requires Zicsr extension"); 499 return; 500 } 501 502 /* 503 * Shorthand vector crypto extensions 504 */ 505 if (cpu->cfg.ext_zvknc) { 506 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkn), true); 507 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true); 508 } 509 510 if (cpu->cfg.ext_zvkng) { 511 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkn), true); 512 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkg), true); 513 } 514 515 if (cpu->cfg.ext_zvkn) { 516 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkned), true); 517 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvknhb), true); 518 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkb), true); 519 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkt), true); 520 } 521 522 if (cpu->cfg.ext_zvksc) { 523 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvks), true); 524 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true); 525 } 526 527 if (cpu->cfg.ext_zvksg) { 528 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvks), true); 529 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkg), true); 530 } 531 532 if (cpu->cfg.ext_zvks) { 533 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvksed), true); 534 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvksh), true); 535 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkb), true); 536 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkt), true); 537 } 538 539 if (cpu->cfg.ext_zvkt) { 540 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbb), true); 541 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true); 542 } 543 544 /* 545 * In principle Zve*x would also suffice here, were they supported 546 * in qemu 547 */ 548 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg || 549 cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || 550 cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) { 551 error_setg(errp, 552 "Vector crypto extensions require V or Zve* extensions"); 553 return; 554 } 555 556 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64f) { 557 error_setg( 558 errp, 559 "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions"); 560 return; 561 } 562 563 if (cpu->cfg.ext_zk) { 564 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkn), true); 565 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkr), true); 566 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkt), true); 567 } 568 569 if (cpu->cfg.ext_zkn) { 570 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true); 571 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true); 572 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true); 573 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkne), true); 574 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknd), true); 575 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknh), true); 576 } 577 578 if (cpu->cfg.ext_zks) { 579 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true); 580 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true); 581 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true); 582 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksed), true); 583 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksh), true); 584 } 585 586 if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) { 587 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) { 588 error_setg(errp, "zicntr requires zicsr"); 589 return; 590 } 591 cpu->cfg.ext_zicntr = false; 592 } 593 594 if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) { 595 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) { 596 error_setg(errp, "zihpm requires zicsr"); 597 return; 598 } 599 cpu->cfg.ext_zihpm = false; 600 } 601 602 if (!cpu->cfg.ext_zihpm) { 603 cpu->cfg.pmu_num = 0; 604 cpu->pmu_avail_ctrs = 0; 605 } 606 607 /* 608 * Disable isa extensions based on priv spec after we 609 * validated and set everything we need. 610 */ 611 riscv_cpu_disable_priv_spec_isa_exts(cpu); 612 } 613 614 void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 615 { 616 CPURISCVState *env = &cpu->env; 617 Error *local_err = NULL; 618 619 riscv_cpu_validate_priv_spec(cpu, &local_err); 620 if (local_err != NULL) { 621 error_propagate(errp, local_err); 622 return; 623 } 624 625 riscv_cpu_validate_misa_priv(env, &local_err); 626 if (local_err != NULL) { 627 error_propagate(errp, local_err); 628 return; 629 } 630 631 if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) { 632 /* 633 * Enhanced PMP should only be available 634 * on harts with PMP support 635 */ 636 error_setg(errp, "Invalid configuration: Smepmp requires PMP support"); 637 return; 638 } 639 640 riscv_cpu_validate_set_extensions(cpu, &local_err); 641 if (local_err != NULL) { 642 error_propagate(errp, local_err); 643 return; 644 } 645 } 646 647 bool riscv_cpu_tcg_compatible(RISCVCPU *cpu) 648 { 649 return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL; 650 } 651 652 static bool riscv_cpu_is_generic(Object *cpu_obj) 653 { 654 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 655 } 656 657 /* 658 * We'll get here via the following path: 659 * 660 * riscv_cpu_realize() 661 * -> cpu_exec_realizefn() 662 * -> tcg_cpu_realize() (via accel_cpu_common_realize()) 663 */ 664 static bool tcg_cpu_realize(CPUState *cs, Error **errp) 665 { 666 RISCVCPU *cpu = RISCV_CPU(cs); 667 Error *local_err = NULL; 668 669 if (!riscv_cpu_tcg_compatible(cpu)) { 670 g_autofree char *name = riscv_cpu_get_name(cpu); 671 error_setg(errp, "'%s' CPU is not compatible with TCG acceleration", 672 name); 673 return false; 674 } 675 676 riscv_cpu_validate_misa_mxl(cpu, &local_err); 677 if (local_err != NULL) { 678 error_propagate(errp, local_err); 679 return false; 680 } 681 682 #ifndef CONFIG_USER_ONLY 683 CPURISCVState *env = &cpu->env; 684 685 CPU(cs)->tcg_cflags |= CF_PCREL; 686 687 if (cpu->cfg.ext_sstc) { 688 riscv_timer_init(cpu); 689 } 690 691 if (cpu->cfg.pmu_num) { 692 if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) { 693 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 694 riscv_pmu_timer_cb, cpu); 695 } 696 } 697 698 /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */ 699 if (riscv_has_ext(env, RVH)) { 700 env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP; 701 } 702 #endif 703 704 return true; 705 } 706 707 typedef struct RISCVCPUMisaExtConfig { 708 target_ulong misa_bit; 709 bool enabled; 710 } RISCVCPUMisaExtConfig; 711 712 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 713 void *opaque, Error **errp) 714 { 715 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 716 target_ulong misa_bit = misa_ext_cfg->misa_bit; 717 RISCVCPU *cpu = RISCV_CPU(obj); 718 CPURISCVState *env = &cpu->env; 719 bool generic_cpu = riscv_cpu_is_generic(obj); 720 bool prev_val, value; 721 722 if (!visit_type_bool(v, name, &value, errp)) { 723 return; 724 } 725 726 prev_val = env->misa_ext & misa_bit; 727 728 if (value == prev_val) { 729 return; 730 } 731 732 if (value) { 733 if (!generic_cpu) { 734 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 735 error_setg(errp, "'%s' CPU does not allow enabling extensions", 736 cpuname); 737 return; 738 } 739 740 env->misa_ext |= misa_bit; 741 env->misa_ext_mask |= misa_bit; 742 } else { 743 env->misa_ext &= ~misa_bit; 744 env->misa_ext_mask &= ~misa_bit; 745 } 746 } 747 748 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 749 void *opaque, Error **errp) 750 { 751 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 752 target_ulong misa_bit = misa_ext_cfg->misa_bit; 753 RISCVCPU *cpu = RISCV_CPU(obj); 754 CPURISCVState *env = &cpu->env; 755 bool value; 756 757 value = env->misa_ext & misa_bit; 758 759 visit_type_bool(v, name, &value, errp); 760 } 761 762 #define MISA_CFG(_bit, _enabled) \ 763 {.misa_bit = _bit, .enabled = _enabled} 764 765 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 766 MISA_CFG(RVA, true), 767 MISA_CFG(RVC, true), 768 MISA_CFG(RVD, true), 769 MISA_CFG(RVF, true), 770 MISA_CFG(RVI, true), 771 MISA_CFG(RVE, false), 772 MISA_CFG(RVM, true), 773 MISA_CFG(RVS, true), 774 MISA_CFG(RVU, true), 775 MISA_CFG(RVH, true), 776 MISA_CFG(RVJ, false), 777 MISA_CFG(RVV, false), 778 MISA_CFG(RVG, false), 779 }; 780 781 /* 782 * We do not support user choice tracking for MISA 783 * extensions yet because, so far, we do not silently 784 * change MISA bits during realize() (RVG enables MISA 785 * bits but the user is warned about it). 786 */ 787 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 788 { 789 bool use_def_vals = riscv_cpu_is_generic(cpu_obj); 790 int i; 791 792 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 793 const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 794 int bit = misa_cfg->misa_bit; 795 const char *name = riscv_get_misa_ext_name(bit); 796 const char *desc = riscv_get_misa_ext_description(bit); 797 798 /* Check if KVM already created the property */ 799 if (object_property_find(cpu_obj, name)) { 800 continue; 801 } 802 803 object_property_add(cpu_obj, name, "bool", 804 cpu_get_misa_ext_cfg, 805 cpu_set_misa_ext_cfg, 806 NULL, (void *)misa_cfg); 807 object_property_set_description(cpu_obj, name, desc); 808 if (use_def_vals) { 809 object_property_set_bool(cpu_obj, name, misa_cfg->enabled, NULL); 810 } 811 } 812 } 813 814 static bool cpu_ext_is_deprecated(const char *ext_name) 815 { 816 return isupper(ext_name[0]); 817 } 818 819 /* 820 * String will be allocated in the heap. Caller is responsible 821 * for freeing it. 822 */ 823 static char *cpu_ext_to_lower(const char *ext_name) 824 { 825 char *ret = g_malloc0(strlen(ext_name) + 1); 826 827 strcpy(ret, ext_name); 828 ret[0] = tolower(ret[0]); 829 830 return ret; 831 } 832 833 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 834 void *opaque, Error **errp) 835 { 836 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 837 RISCVCPU *cpu = RISCV_CPU(obj); 838 bool generic_cpu = riscv_cpu_is_generic(obj); 839 bool prev_val, value; 840 841 if (!visit_type_bool(v, name, &value, errp)) { 842 return; 843 } 844 845 if (cpu_ext_is_deprecated(multi_ext_cfg->name)) { 846 g_autofree char *lower = cpu_ext_to_lower(multi_ext_cfg->name); 847 848 warn_report("CPU property '%s' is deprecated. Please use '%s' instead", 849 multi_ext_cfg->name, lower); 850 } 851 852 g_hash_table_insert(multi_ext_user_opts, 853 GUINT_TO_POINTER(multi_ext_cfg->offset), 854 (gpointer)value); 855 856 prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset); 857 858 if (value == prev_val) { 859 return; 860 } 861 862 if (value && !generic_cpu) { 863 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 864 error_setg(errp, "'%s' CPU does not allow enabling extensions", 865 cpuname); 866 return; 867 } 868 869 isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value); 870 } 871 872 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 873 void *opaque, Error **errp) 874 { 875 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 876 bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset); 877 878 visit_type_bool(v, name, &value, errp); 879 } 880 881 static void cpu_add_multi_ext_prop(Object *cpu_obj, 882 const RISCVCPUMultiExtConfig *multi_cfg) 883 { 884 bool generic_cpu = riscv_cpu_is_generic(cpu_obj); 885 bool deprecated_ext = cpu_ext_is_deprecated(multi_cfg->name); 886 887 object_property_add(cpu_obj, multi_cfg->name, "bool", 888 cpu_get_multi_ext_cfg, 889 cpu_set_multi_ext_cfg, 890 NULL, (void *)multi_cfg); 891 892 if (!generic_cpu || deprecated_ext) { 893 return; 894 } 895 896 /* 897 * Set def val directly instead of using 898 * object_property_set_bool() to save the set() 899 * callback hash for user inputs. 900 */ 901 isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset, 902 multi_cfg->enabled); 903 } 904 905 static void riscv_cpu_add_multiext_prop_array(Object *obj, 906 const RISCVCPUMultiExtConfig *array) 907 { 908 const RISCVCPUMultiExtConfig *prop; 909 910 g_assert(array); 911 912 for (prop = array; prop && prop->name; prop++) { 913 cpu_add_multi_ext_prop(obj, prop); 914 } 915 } 916 917 /* 918 * Add CPU properties with user-facing flags. 919 * 920 * This will overwrite existing env->misa_ext values with the 921 * defaults set via riscv_cpu_add_misa_properties(). 922 */ 923 static void riscv_cpu_add_user_properties(Object *obj) 924 { 925 #ifndef CONFIG_USER_ONLY 926 riscv_add_satp_mode_properties(obj); 927 #endif 928 929 riscv_cpu_add_misa_properties(obj); 930 931 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions); 932 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts); 933 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts); 934 935 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts); 936 937 for (Property *prop = riscv_cpu_options; prop && prop->name; prop++) { 938 qdev_property_add_static(DEVICE(obj), prop); 939 } 940 } 941 942 /* 943 * The 'max' type CPU will have all possible ratified 944 * non-vendor extensions enabled. 945 */ 946 static void riscv_init_max_cpu_extensions(Object *obj) 947 { 948 RISCVCPU *cpu = RISCV_CPU(obj); 949 CPURISCVState *env = &cpu->env; 950 const RISCVCPUMultiExtConfig *prop; 951 952 /* Enable RVG, RVJ and RVV that are disabled by default */ 953 riscv_cpu_set_misa(env, env->misa_mxl, env->misa_ext | RVG | RVJ | RVV); 954 955 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 956 isa_ext_update_enabled(cpu, prop->offset, true); 957 } 958 959 /* set vector version */ 960 env->vext_ver = VEXT_VERSION_1_00_0; 961 962 /* Zfinx is not compatible with F. Disable it */ 963 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false); 964 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false); 965 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false); 966 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false); 967 968 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false); 969 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false); 970 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false); 971 972 if (env->misa_mxl != MXL_RV32) { 973 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false); 974 } 975 } 976 977 static bool riscv_cpu_has_max_extensions(Object *cpu_obj) 978 { 979 return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL; 980 } 981 982 static void tcg_cpu_instance_init(CPUState *cs) 983 { 984 RISCVCPU *cpu = RISCV_CPU(cs); 985 Object *obj = OBJECT(cpu); 986 987 multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 988 riscv_cpu_add_user_properties(obj); 989 990 if (riscv_cpu_has_max_extensions(obj)) { 991 riscv_init_max_cpu_extensions(obj); 992 } 993 } 994 995 static void tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) 996 { 997 /* 998 * All cpus use the same set of operations. 999 */ 1000 cc->tcg_ops = &riscv_tcg_ops; 1001 } 1002 1003 static void tcg_cpu_class_init(CPUClass *cc) 1004 { 1005 cc->init_accel_cpu = tcg_cpu_init_ops; 1006 } 1007 1008 static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data) 1009 { 1010 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); 1011 1012 acc->cpu_class_init = tcg_cpu_class_init; 1013 acc->cpu_instance_init = tcg_cpu_instance_init; 1014 acc->cpu_target_realize = tcg_cpu_realize; 1015 } 1016 1017 static const TypeInfo tcg_cpu_accel_type_info = { 1018 .name = ACCEL_CPU_NAME("tcg"), 1019 1020 .parent = TYPE_ACCEL_CPU, 1021 .class_init = tcg_cpu_accel_class_init, 1022 .abstract = true, 1023 }; 1024 1025 static void tcg_cpu_accel_register_types(void) 1026 { 1027 type_register_static(&tcg_cpu_accel_type_info); 1028 } 1029 type_init(tcg_cpu_accel_register_types); 1030