1 /* 2 * riscv TCG cpu class initialization 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/exec-all.h" 22 #include "tcg-cpu.h" 23 #include "cpu.h" 24 #include "pmu.h" 25 #include "time_helper.h" 26 #include "qapi/error.h" 27 #include "qapi/visitor.h" 28 #include "qemu/accel.h" 29 #include "qemu/error-report.h" 30 #include "qemu/log.h" 31 #include "hw/core/accel-cpu.h" 32 #include "hw/core/tcg-cpu-ops.h" 33 #include "tcg/tcg.h" 34 35 /* Hash that stores user set extensions */ 36 static GHashTable *multi_ext_user_opts; 37 38 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset) 39 { 40 return g_hash_table_contains(multi_ext_user_opts, 41 GUINT_TO_POINTER(ext_offset)); 42 } 43 44 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 45 const TranslationBlock *tb) 46 { 47 if (!(tb_cflags(tb) & CF_PCREL)) { 48 RISCVCPU *cpu = RISCV_CPU(cs); 49 CPURISCVState *env = &cpu->env; 50 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 51 52 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 53 54 if (xl == MXL_RV32) { 55 env->pc = (int32_t) tb->pc; 56 } else { 57 env->pc = tb->pc; 58 } 59 } 60 } 61 62 static void riscv_restore_state_to_opc(CPUState *cs, 63 const TranslationBlock *tb, 64 const uint64_t *data) 65 { 66 RISCVCPU *cpu = RISCV_CPU(cs); 67 CPURISCVState *env = &cpu->env; 68 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 69 target_ulong pc; 70 71 if (tb_cflags(tb) & CF_PCREL) { 72 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 73 } else { 74 pc = data[0]; 75 } 76 77 if (xl == MXL_RV32) { 78 env->pc = (int32_t)pc; 79 } else { 80 env->pc = pc; 81 } 82 env->bins = data[1]; 83 } 84 85 static const struct TCGCPUOps riscv_tcg_ops = { 86 .initialize = riscv_translate_init, 87 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 88 .restore_state_to_opc = riscv_restore_state_to_opc, 89 90 #ifndef CONFIG_USER_ONLY 91 .tlb_fill = riscv_cpu_tlb_fill, 92 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 93 .do_interrupt = riscv_cpu_do_interrupt, 94 .do_transaction_failed = riscv_cpu_do_transaction_failed, 95 .do_unaligned_access = riscv_cpu_do_unaligned_access, 96 .debug_excp_handler = riscv_cpu_debug_excp_handler, 97 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 98 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 99 #endif /* !CONFIG_USER_ONLY */ 100 }; 101 102 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset) 103 { 104 const RISCVIsaExtData *edata; 105 106 for (edata = isa_edata_arr; edata && edata->name; edata++) { 107 if (edata->ext_enable_offset != ext_offset) { 108 continue; 109 } 110 111 return edata->min_version; 112 } 113 114 g_assert_not_reached(); 115 } 116 117 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset, 118 bool value) 119 { 120 CPURISCVState *env = &cpu->env; 121 bool prev_val = isa_ext_is_enabled(cpu, ext_offset); 122 int min_version; 123 124 if (prev_val == value) { 125 return; 126 } 127 128 if (cpu_cfg_ext_is_user_set(ext_offset)) { 129 return; 130 } 131 132 if (value && env->priv_ver != PRIV_VERSION_LATEST) { 133 /* Do not enable it if priv_ver is older than min_version */ 134 min_version = cpu_cfg_ext_get_min_version(ext_offset); 135 if (env->priv_ver < min_version) { 136 return; 137 } 138 } 139 140 isa_ext_update_enabled(cpu, ext_offset, value); 141 } 142 143 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 144 { 145 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 146 error_setg(errp, "H extension requires priv spec 1.12.0"); 147 return; 148 } 149 } 150 151 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) 152 { 153 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 154 CPUClass *cc = CPU_CLASS(mcc); 155 CPURISCVState *env = &cpu->env; 156 157 /* Validate that MISA_MXL is set properly. */ 158 switch (env->misa_mxl_max) { 159 #ifdef TARGET_RISCV64 160 case MXL_RV64: 161 case MXL_RV128: 162 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 163 break; 164 #endif 165 case MXL_RV32: 166 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 167 break; 168 default: 169 g_assert_not_reached(); 170 } 171 172 if (env->misa_mxl_max != env->misa_mxl) { 173 error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); 174 return; 175 } 176 } 177 178 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) 179 { 180 CPURISCVState *env = &cpu->env; 181 int priv_version = -1; 182 183 if (cpu->cfg.priv_spec) { 184 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { 185 priv_version = PRIV_VERSION_1_12_0; 186 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { 187 priv_version = PRIV_VERSION_1_11_0; 188 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { 189 priv_version = PRIV_VERSION_1_10_0; 190 } else { 191 error_setg(errp, 192 "Unsupported privilege spec version '%s'", 193 cpu->cfg.priv_spec); 194 return; 195 } 196 197 env->priv_ver = priv_version; 198 } 199 } 200 201 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 202 Error **errp) 203 { 204 if (!is_power_of_2(cfg->vlen)) { 205 error_setg(errp, "Vector extension VLEN must be power of 2"); 206 return; 207 } 208 209 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { 210 error_setg(errp, 211 "Vector extension implementation only supports VLEN " 212 "in the range [128, %d]", RV_VLEN_MAX); 213 return; 214 } 215 216 if (!is_power_of_2(cfg->elen)) { 217 error_setg(errp, "Vector extension ELEN must be power of 2"); 218 return; 219 } 220 221 if (cfg->elen > 64 || cfg->elen < 8) { 222 error_setg(errp, 223 "Vector extension implementation only supports ELEN " 224 "in the range [8, 64]"); 225 return; 226 } 227 228 if (cfg->vext_spec) { 229 if (!g_strcmp0(cfg->vext_spec, "v1.0")) { 230 env->vext_ver = VEXT_VERSION_1_00_0; 231 } else { 232 error_setg(errp, "Unsupported vector spec version '%s'", 233 cfg->vext_spec); 234 return; 235 } 236 } else if (env->vext_ver == 0) { 237 qemu_log("vector version is not specified, " 238 "use the default value v1.0\n"); 239 240 env->vext_ver = VEXT_VERSION_1_00_0; 241 } 242 } 243 244 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 245 { 246 CPURISCVState *env = &cpu->env; 247 const RISCVIsaExtData *edata; 248 249 /* Force disable extensions if priv spec version does not match */ 250 for (edata = isa_edata_arr; edata && edata->name; edata++) { 251 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset) && 252 (env->priv_ver < edata->min_version)) { 253 isa_ext_update_enabled(cpu, edata->ext_enable_offset, false); 254 #ifndef CONFIG_USER_ONLY 255 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 256 " because privilege spec version does not match", 257 edata->name, env->mhartid); 258 #else 259 warn_report("disabling %s extension because " 260 "privilege spec version does not match", 261 edata->name); 262 #endif 263 } 264 } 265 } 266 267 /* 268 * Check consistency between chosen extensions while setting 269 * cpu->cfg accordingly. 270 */ 271 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 272 { 273 CPURISCVState *env = &cpu->env; 274 Error *local_err = NULL; 275 276 /* Do some ISA extension error checking */ 277 if (riscv_has_ext(env, RVG) && 278 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && 279 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && 280 riscv_has_ext(env, RVD) && 281 cpu->cfg.ext_zicsr && cpu->cfg.ext_zifencei)) { 282 283 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr)) && 284 !cpu->cfg.ext_zicsr) { 285 error_setg(errp, "RVG requires Zicsr but user set Zicsr to false"); 286 return; 287 } 288 289 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei)) && 290 !cpu->cfg.ext_zifencei) { 291 error_setg(errp, "RVG requires Zifencei but user set " 292 "Zifencei to false"); 293 return; 294 } 295 296 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zicsr), true); 297 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zifencei), true); 298 299 env->misa_ext |= RVI | RVM | RVA | RVF | RVD; 300 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD; 301 } 302 303 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 304 error_setg(errp, 305 "I and E extensions are incompatible"); 306 return; 307 } 308 309 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 310 error_setg(errp, 311 "Either I or E extension must be set"); 312 return; 313 } 314 315 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 316 error_setg(errp, 317 "Setting S extension without U extension is illegal"); 318 return; 319 } 320 321 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 322 error_setg(errp, 323 "H depends on an I base integer ISA with 32 x registers"); 324 return; 325 } 326 327 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 328 error_setg(errp, "H extension implicitly requires S-mode"); 329 return; 330 } 331 332 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) { 333 error_setg(errp, "F extension requires Zicsr"); 334 return; 335 } 336 337 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 338 error_setg(errp, "Zawrs extension requires A extension"); 339 return; 340 } 341 342 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 343 error_setg(errp, "Zfa extension requires F extension"); 344 return; 345 } 346 347 if (cpu->cfg.ext_zfh) { 348 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zfhmin), true); 349 } 350 351 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 352 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 353 return; 354 } 355 356 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 357 error_setg(errp, "Zfbfmin extension depends on F extension"); 358 return; 359 } 360 361 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 362 error_setg(errp, "D extension requires F extension"); 363 return; 364 } 365 366 if (riscv_has_ext(env, RVV)) { 367 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 368 if (local_err != NULL) { 369 error_propagate(errp, local_err); 370 return; 371 } 372 373 /* The V vector extension depends on the Zve64d extension */ 374 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64d), true); 375 } 376 377 /* The Zve64d extension depends on the Zve64f extension */ 378 if (cpu->cfg.ext_zve64d) { 379 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64f), true); 380 } 381 382 /* The Zve64f extension depends on the Zve32f extension */ 383 if (cpu->cfg.ext_zve64f) { 384 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve32f), true); 385 } 386 387 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) { 388 error_setg(errp, "Zve64d/V extensions require D extension"); 389 return; 390 } 391 392 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) { 393 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 394 return; 395 } 396 397 if (cpu->cfg.ext_zvfh) { 398 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvfhmin), true); 399 } 400 401 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 402 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 403 return; 404 } 405 406 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 407 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 408 return; 409 } 410 411 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) { 412 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension"); 413 return; 414 } 415 416 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 417 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 418 return; 419 } 420 421 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 422 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 423 return; 424 } 425 426 /* Set the ISA extensions, checks should have happened above */ 427 if (cpu->cfg.ext_zhinx) { 428 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 429 } 430 431 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 432 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 433 return; 434 } 435 436 if (cpu->cfg.ext_zfinx) { 437 if (!cpu->cfg.ext_zicsr) { 438 error_setg(errp, "Zfinx extension requires Zicsr"); 439 return; 440 } 441 if (riscv_has_ext(env, RVF)) { 442 error_setg(errp, 443 "Zfinx cannot be supported together with F extension"); 444 return; 445 } 446 } 447 448 if (cpu->cfg.ext_zce) { 449 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 450 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true); 451 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); 452 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); 453 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 454 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 455 } 456 } 457 458 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */ 459 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 460 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 461 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 462 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 463 } 464 if (riscv_has_ext(env, RVD)) { 465 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true); 466 } 467 } 468 469 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 470 error_setg(errp, "Zcf extension is only relevant to RV32"); 471 return; 472 } 473 474 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 475 error_setg(errp, "Zcf extension requires F extension"); 476 return; 477 } 478 479 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 480 error_setg(errp, "Zcd extension requires D extension"); 481 return; 482 } 483 484 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 485 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 486 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 487 "extension"); 488 return; 489 } 490 491 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 492 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 493 "Zcd extension"); 494 return; 495 } 496 497 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) { 498 error_setg(errp, "Zcmt extension requires Zicsr extension"); 499 return; 500 } 501 502 /* 503 * Shorthand vector crypto extensions 504 */ 505 if (cpu->cfg.ext_zvknc) { 506 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkn), true); 507 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true); 508 } 509 510 if (cpu->cfg.ext_zvkng) { 511 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkn), true); 512 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkg), true); 513 } 514 515 if (cpu->cfg.ext_zvkn) { 516 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkned), true); 517 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvknhb), true); 518 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkb), true); 519 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkt), true); 520 } 521 522 if (cpu->cfg.ext_zvksc) { 523 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvks), true); 524 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true); 525 } 526 527 if (cpu->cfg.ext_zvksg) { 528 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvks), true); 529 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkg), true); 530 } 531 532 if (cpu->cfg.ext_zvks) { 533 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvksed), true); 534 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvksh), true); 535 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkb), true); 536 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkt), true); 537 } 538 539 if (cpu->cfg.ext_zvkt) { 540 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbb), true); 541 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true); 542 } 543 544 /* 545 * In principle Zve*x would also suffice here, were they supported 546 * in qemu 547 */ 548 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg || 549 cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || 550 cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) { 551 error_setg(errp, 552 "Vector crypto extensions require V or Zve* extensions"); 553 return; 554 } 555 556 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64f) { 557 error_setg( 558 errp, 559 "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions"); 560 return; 561 } 562 563 if (cpu->cfg.ext_zk) { 564 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkn), true); 565 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkr), true); 566 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkt), true); 567 } 568 569 if (cpu->cfg.ext_zkn) { 570 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true); 571 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true); 572 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true); 573 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkne), true); 574 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknd), true); 575 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknh), true); 576 } 577 578 if (cpu->cfg.ext_zks) { 579 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true); 580 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true); 581 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true); 582 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksed), true); 583 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksh), true); 584 } 585 586 if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) { 587 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) { 588 error_setg(errp, "zicntr requires zicsr"); 589 return; 590 } 591 cpu->cfg.ext_zicntr = false; 592 } 593 594 if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) { 595 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) { 596 error_setg(errp, "zihpm requires zicsr"); 597 return; 598 } 599 cpu->cfg.ext_zihpm = false; 600 } 601 602 if (!cpu->cfg.ext_zihpm) { 603 cpu->cfg.pmu_mask = 0; 604 cpu->pmu_avail_ctrs = 0; 605 } 606 607 /* 608 * Disable isa extensions based on priv spec after we 609 * validated and set everything we need. 610 */ 611 riscv_cpu_disable_priv_spec_isa_exts(cpu); 612 } 613 614 void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 615 { 616 CPURISCVState *env = &cpu->env; 617 Error *local_err = NULL; 618 619 riscv_cpu_validate_priv_spec(cpu, &local_err); 620 if (local_err != NULL) { 621 error_propagate(errp, local_err); 622 return; 623 } 624 625 riscv_cpu_validate_misa_priv(env, &local_err); 626 if (local_err != NULL) { 627 error_propagate(errp, local_err); 628 return; 629 } 630 631 if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) { 632 /* 633 * Enhanced PMP should only be available 634 * on harts with PMP support 635 */ 636 error_setg(errp, "Invalid configuration: Smepmp requires PMP support"); 637 return; 638 } 639 640 riscv_cpu_validate_set_extensions(cpu, &local_err); 641 if (local_err != NULL) { 642 error_propagate(errp, local_err); 643 return; 644 } 645 } 646 647 bool riscv_cpu_tcg_compatible(RISCVCPU *cpu) 648 { 649 return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL; 650 } 651 652 static bool riscv_cpu_is_generic(Object *cpu_obj) 653 { 654 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 655 } 656 657 /* 658 * We'll get here via the following path: 659 * 660 * riscv_cpu_realize() 661 * -> cpu_exec_realizefn() 662 * -> tcg_cpu_realize() (via accel_cpu_common_realize()) 663 */ 664 static bool tcg_cpu_realize(CPUState *cs, Error **errp) 665 { 666 RISCVCPU *cpu = RISCV_CPU(cs); 667 Error *local_err = NULL; 668 669 if (!riscv_cpu_tcg_compatible(cpu)) { 670 g_autofree char *name = riscv_cpu_get_name(cpu); 671 error_setg(errp, "'%s' CPU is not compatible with TCG acceleration", 672 name); 673 return false; 674 } 675 676 riscv_cpu_validate_misa_mxl(cpu, &local_err); 677 if (local_err != NULL) { 678 error_propagate(errp, local_err); 679 return false; 680 } 681 682 #ifndef CONFIG_USER_ONLY 683 CPURISCVState *env = &cpu->env; 684 685 CPU(cs)->tcg_cflags |= CF_PCREL; 686 687 if (cpu->cfg.ext_sstc) { 688 riscv_timer_init(cpu); 689 } 690 691 if (cpu->cfg.pmu_mask) { 692 riscv_pmu_init(cpu, &local_err); 693 if (local_err != NULL) { 694 error_propagate(errp, local_err); 695 return false; 696 } 697 698 if (cpu->cfg.ext_sscofpmf) { 699 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 700 riscv_pmu_timer_cb, cpu); 701 } 702 } 703 704 /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */ 705 if (riscv_has_ext(env, RVH)) { 706 env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP; 707 } 708 #endif 709 710 return true; 711 } 712 713 typedef struct RISCVCPUMisaExtConfig { 714 target_ulong misa_bit; 715 bool enabled; 716 } RISCVCPUMisaExtConfig; 717 718 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 719 void *opaque, Error **errp) 720 { 721 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 722 target_ulong misa_bit = misa_ext_cfg->misa_bit; 723 RISCVCPU *cpu = RISCV_CPU(obj); 724 CPURISCVState *env = &cpu->env; 725 bool generic_cpu = riscv_cpu_is_generic(obj); 726 bool prev_val, value; 727 728 if (!visit_type_bool(v, name, &value, errp)) { 729 return; 730 } 731 732 prev_val = env->misa_ext & misa_bit; 733 734 if (value == prev_val) { 735 return; 736 } 737 738 if (value) { 739 if (!generic_cpu) { 740 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 741 error_setg(errp, "'%s' CPU does not allow enabling extensions", 742 cpuname); 743 return; 744 } 745 746 env->misa_ext |= misa_bit; 747 env->misa_ext_mask |= misa_bit; 748 } else { 749 env->misa_ext &= ~misa_bit; 750 env->misa_ext_mask &= ~misa_bit; 751 } 752 } 753 754 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 755 void *opaque, Error **errp) 756 { 757 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 758 target_ulong misa_bit = misa_ext_cfg->misa_bit; 759 RISCVCPU *cpu = RISCV_CPU(obj); 760 CPURISCVState *env = &cpu->env; 761 bool value; 762 763 value = env->misa_ext & misa_bit; 764 765 visit_type_bool(v, name, &value, errp); 766 } 767 768 #define MISA_CFG(_bit, _enabled) \ 769 {.misa_bit = _bit, .enabled = _enabled} 770 771 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 772 MISA_CFG(RVA, true), 773 MISA_CFG(RVC, true), 774 MISA_CFG(RVD, true), 775 MISA_CFG(RVF, true), 776 MISA_CFG(RVI, true), 777 MISA_CFG(RVE, false), 778 MISA_CFG(RVM, true), 779 MISA_CFG(RVS, true), 780 MISA_CFG(RVU, true), 781 MISA_CFG(RVH, true), 782 MISA_CFG(RVJ, false), 783 MISA_CFG(RVV, false), 784 MISA_CFG(RVG, false), 785 }; 786 787 /* 788 * We do not support user choice tracking for MISA 789 * extensions yet because, so far, we do not silently 790 * change MISA bits during realize() (RVG enables MISA 791 * bits but the user is warned about it). 792 */ 793 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 794 { 795 bool use_def_vals = riscv_cpu_is_generic(cpu_obj); 796 int i; 797 798 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 799 const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 800 int bit = misa_cfg->misa_bit; 801 const char *name = riscv_get_misa_ext_name(bit); 802 const char *desc = riscv_get_misa_ext_description(bit); 803 804 /* Check if KVM already created the property */ 805 if (object_property_find(cpu_obj, name)) { 806 continue; 807 } 808 809 object_property_add(cpu_obj, name, "bool", 810 cpu_get_misa_ext_cfg, 811 cpu_set_misa_ext_cfg, 812 NULL, (void *)misa_cfg); 813 object_property_set_description(cpu_obj, name, desc); 814 if (use_def_vals) { 815 object_property_set_bool(cpu_obj, name, misa_cfg->enabled, NULL); 816 } 817 } 818 } 819 820 static bool cpu_ext_is_deprecated(const char *ext_name) 821 { 822 return isupper(ext_name[0]); 823 } 824 825 /* 826 * String will be allocated in the heap. Caller is responsible 827 * for freeing it. 828 */ 829 static char *cpu_ext_to_lower(const char *ext_name) 830 { 831 char *ret = g_malloc0(strlen(ext_name) + 1); 832 833 strcpy(ret, ext_name); 834 ret[0] = tolower(ret[0]); 835 836 return ret; 837 } 838 839 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 840 void *opaque, Error **errp) 841 { 842 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 843 RISCVCPU *cpu = RISCV_CPU(obj); 844 bool generic_cpu = riscv_cpu_is_generic(obj); 845 bool prev_val, value; 846 847 if (!visit_type_bool(v, name, &value, errp)) { 848 return; 849 } 850 851 if (cpu_ext_is_deprecated(multi_ext_cfg->name)) { 852 g_autofree char *lower = cpu_ext_to_lower(multi_ext_cfg->name); 853 854 warn_report("CPU property '%s' is deprecated. Please use '%s' instead", 855 multi_ext_cfg->name, lower); 856 } 857 858 g_hash_table_insert(multi_ext_user_opts, 859 GUINT_TO_POINTER(multi_ext_cfg->offset), 860 (gpointer)value); 861 862 prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset); 863 864 if (value == prev_val) { 865 return; 866 } 867 868 if (value && !generic_cpu) { 869 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 870 error_setg(errp, "'%s' CPU does not allow enabling extensions", 871 cpuname); 872 return; 873 } 874 875 isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value); 876 } 877 878 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 879 void *opaque, Error **errp) 880 { 881 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 882 bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset); 883 884 visit_type_bool(v, name, &value, errp); 885 } 886 887 static void cpu_add_multi_ext_prop(Object *cpu_obj, 888 const RISCVCPUMultiExtConfig *multi_cfg) 889 { 890 bool generic_cpu = riscv_cpu_is_generic(cpu_obj); 891 bool deprecated_ext = cpu_ext_is_deprecated(multi_cfg->name); 892 893 object_property_add(cpu_obj, multi_cfg->name, "bool", 894 cpu_get_multi_ext_cfg, 895 cpu_set_multi_ext_cfg, 896 NULL, (void *)multi_cfg); 897 898 if (!generic_cpu || deprecated_ext) { 899 return; 900 } 901 902 /* 903 * Set def val directly instead of using 904 * object_property_set_bool() to save the set() 905 * callback hash for user inputs. 906 */ 907 isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset, 908 multi_cfg->enabled); 909 } 910 911 static void riscv_cpu_add_multiext_prop_array(Object *obj, 912 const RISCVCPUMultiExtConfig *array) 913 { 914 const RISCVCPUMultiExtConfig *prop; 915 916 g_assert(array); 917 918 for (prop = array; prop && prop->name; prop++) { 919 cpu_add_multi_ext_prop(obj, prop); 920 } 921 } 922 923 /* 924 * Add CPU properties with user-facing flags. 925 * 926 * This will overwrite existing env->misa_ext values with the 927 * defaults set via riscv_cpu_add_misa_properties(). 928 */ 929 static void riscv_cpu_add_user_properties(Object *obj) 930 { 931 #ifndef CONFIG_USER_ONLY 932 riscv_add_satp_mode_properties(obj); 933 #endif 934 935 riscv_cpu_add_misa_properties(obj); 936 937 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions); 938 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts); 939 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts); 940 941 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts); 942 943 for (Property *prop = riscv_cpu_options; prop && prop->name; prop++) { 944 qdev_property_add_static(DEVICE(obj), prop); 945 } 946 } 947 948 /* 949 * The 'max' type CPU will have all possible ratified 950 * non-vendor extensions enabled. 951 */ 952 static void riscv_init_max_cpu_extensions(Object *obj) 953 { 954 RISCVCPU *cpu = RISCV_CPU(obj); 955 CPURISCVState *env = &cpu->env; 956 const RISCVCPUMultiExtConfig *prop; 957 958 /* Enable RVG, RVJ and RVV that are disabled by default */ 959 riscv_cpu_set_misa(env, env->misa_mxl, env->misa_ext | RVG | RVJ | RVV); 960 961 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 962 isa_ext_update_enabled(cpu, prop->offset, true); 963 } 964 965 /* set vector version */ 966 env->vext_ver = VEXT_VERSION_1_00_0; 967 968 /* Zfinx is not compatible with F. Disable it */ 969 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false); 970 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false); 971 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false); 972 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false); 973 974 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false); 975 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false); 976 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false); 977 978 if (env->misa_mxl != MXL_RV32) { 979 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false); 980 } 981 } 982 983 static bool riscv_cpu_has_max_extensions(Object *cpu_obj) 984 { 985 return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL; 986 } 987 988 static void tcg_cpu_instance_init(CPUState *cs) 989 { 990 RISCVCPU *cpu = RISCV_CPU(cs); 991 Object *obj = OBJECT(cpu); 992 993 multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 994 riscv_cpu_add_user_properties(obj); 995 996 if (riscv_cpu_has_max_extensions(obj)) { 997 riscv_init_max_cpu_extensions(obj); 998 } 999 } 1000 1001 static void tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) 1002 { 1003 /* 1004 * All cpus use the same set of operations. 1005 */ 1006 cc->tcg_ops = &riscv_tcg_ops; 1007 } 1008 1009 static void tcg_cpu_class_init(CPUClass *cc) 1010 { 1011 cc->init_accel_cpu = tcg_cpu_init_ops; 1012 } 1013 1014 static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data) 1015 { 1016 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); 1017 1018 acc->cpu_class_init = tcg_cpu_class_init; 1019 acc->cpu_instance_init = tcg_cpu_instance_init; 1020 acc->cpu_target_realize = tcg_cpu_realize; 1021 } 1022 1023 static const TypeInfo tcg_cpu_accel_type_info = { 1024 .name = ACCEL_CPU_NAME("tcg"), 1025 1026 .parent = TYPE_ACCEL_CPU, 1027 .class_init = tcg_cpu_accel_class_init, 1028 .abstract = true, 1029 }; 1030 1031 static void tcg_cpu_accel_register_types(void) 1032 { 1033 type_register_static(&tcg_cpu_accel_type_info); 1034 } 1035 type_init(tcg_cpu_accel_register_types); 1036