1 /* 2 * riscv TCG cpu class initialization 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/exec-all.h" 22 #include "tcg-cpu.h" 23 #include "cpu.h" 24 #include "pmu.h" 25 #include "time_helper.h" 26 #include "qapi/error.h" 27 #include "qapi/visitor.h" 28 #include "qemu/accel.h" 29 #include "qemu/error-report.h" 30 #include "qemu/log.h" 31 #include "hw/core/accel-cpu.h" 32 #include "hw/core/tcg-cpu-ops.h" 33 #include "tcg/tcg.h" 34 35 /* Hash that stores user set extensions */ 36 static GHashTable *multi_ext_user_opts; 37 38 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset) 39 { 40 return g_hash_table_contains(multi_ext_user_opts, 41 GUINT_TO_POINTER(ext_offset)); 42 } 43 44 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 45 const TranslationBlock *tb) 46 { 47 if (!(tb_cflags(tb) & CF_PCREL)) { 48 RISCVCPU *cpu = RISCV_CPU(cs); 49 CPURISCVState *env = &cpu->env; 50 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 51 52 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 53 54 if (xl == MXL_RV32) { 55 env->pc = (int32_t) tb->pc; 56 } else { 57 env->pc = tb->pc; 58 } 59 } 60 } 61 62 static void riscv_restore_state_to_opc(CPUState *cs, 63 const TranslationBlock *tb, 64 const uint64_t *data) 65 { 66 RISCVCPU *cpu = RISCV_CPU(cs); 67 CPURISCVState *env = &cpu->env; 68 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 69 target_ulong pc; 70 71 if (tb_cflags(tb) & CF_PCREL) { 72 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 73 } else { 74 pc = data[0]; 75 } 76 77 if (xl == MXL_RV32) { 78 env->pc = (int32_t)pc; 79 } else { 80 env->pc = pc; 81 } 82 env->bins = data[1]; 83 } 84 85 static const struct TCGCPUOps riscv_tcg_ops = { 86 .initialize = riscv_translate_init, 87 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 88 .restore_state_to_opc = riscv_restore_state_to_opc, 89 90 #ifndef CONFIG_USER_ONLY 91 .tlb_fill = riscv_cpu_tlb_fill, 92 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 93 .do_interrupt = riscv_cpu_do_interrupt, 94 .do_transaction_failed = riscv_cpu_do_transaction_failed, 95 .do_unaligned_access = riscv_cpu_do_unaligned_access, 96 .debug_excp_handler = riscv_cpu_debug_excp_handler, 97 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 98 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 99 #endif /* !CONFIG_USER_ONLY */ 100 }; 101 102 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset) 103 { 104 const RISCVIsaExtData *edata; 105 106 for (edata = isa_edata_arr; edata && edata->name; edata++) { 107 if (edata->ext_enable_offset != ext_offset) { 108 continue; 109 } 110 111 return edata->min_version; 112 } 113 114 g_assert_not_reached(); 115 } 116 117 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset, 118 bool value) 119 { 120 CPURISCVState *env = &cpu->env; 121 bool prev_val = isa_ext_is_enabled(cpu, ext_offset); 122 int min_version; 123 124 if (prev_val == value) { 125 return; 126 } 127 128 if (cpu_cfg_ext_is_user_set(ext_offset)) { 129 return; 130 } 131 132 if (value && env->priv_ver != PRIV_VERSION_LATEST) { 133 /* Do not enable it if priv_ver is older than min_version */ 134 min_version = cpu_cfg_ext_get_min_version(ext_offset); 135 if (env->priv_ver < min_version) { 136 return; 137 } 138 } 139 140 isa_ext_update_enabled(cpu, ext_offset, value); 141 } 142 143 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 144 { 145 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 146 error_setg(errp, "H extension requires priv spec 1.12.0"); 147 return; 148 } 149 } 150 151 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) 152 { 153 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 154 CPUClass *cc = CPU_CLASS(mcc); 155 CPURISCVState *env = &cpu->env; 156 157 /* Validate that MISA_MXL is set properly. */ 158 switch (env->misa_mxl_max) { 159 #ifdef TARGET_RISCV64 160 case MXL_RV64: 161 case MXL_RV128: 162 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 163 break; 164 #endif 165 case MXL_RV32: 166 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 167 break; 168 default: 169 g_assert_not_reached(); 170 } 171 172 if (env->misa_mxl_max != env->misa_mxl) { 173 error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); 174 return; 175 } 176 } 177 178 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) 179 { 180 CPURISCVState *env = &cpu->env; 181 int priv_version = -1; 182 183 if (cpu->cfg.priv_spec) { 184 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { 185 priv_version = PRIV_VERSION_1_12_0; 186 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { 187 priv_version = PRIV_VERSION_1_11_0; 188 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { 189 priv_version = PRIV_VERSION_1_10_0; 190 } else { 191 error_setg(errp, 192 "Unsupported privilege spec version '%s'", 193 cpu->cfg.priv_spec); 194 return; 195 } 196 197 env->priv_ver = priv_version; 198 } 199 } 200 201 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 202 Error **errp) 203 { 204 if (!is_power_of_2(cfg->vlen)) { 205 error_setg(errp, "Vector extension VLEN must be power of 2"); 206 return; 207 } 208 209 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { 210 error_setg(errp, 211 "Vector extension implementation only supports VLEN " 212 "in the range [128, %d]", RV_VLEN_MAX); 213 return; 214 } 215 216 if (!is_power_of_2(cfg->elen)) { 217 error_setg(errp, "Vector extension ELEN must be power of 2"); 218 return; 219 } 220 221 if (cfg->elen > 64 || cfg->elen < 8) { 222 error_setg(errp, 223 "Vector extension implementation only supports ELEN " 224 "in the range [8, 64]"); 225 return; 226 } 227 228 if (cfg->vext_spec) { 229 if (!g_strcmp0(cfg->vext_spec, "v1.0")) { 230 env->vext_ver = VEXT_VERSION_1_00_0; 231 } else { 232 error_setg(errp, "Unsupported vector spec version '%s'", 233 cfg->vext_spec); 234 return; 235 } 236 } else if (env->vext_ver == 0) { 237 qemu_log("vector version is not specified, " 238 "use the default value v1.0\n"); 239 240 env->vext_ver = VEXT_VERSION_1_00_0; 241 } 242 } 243 244 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 245 { 246 CPURISCVState *env = &cpu->env; 247 const RISCVIsaExtData *edata; 248 249 /* Force disable extensions if priv spec version does not match */ 250 for (edata = isa_edata_arr; edata && edata->name; edata++) { 251 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset) && 252 (env->priv_ver < edata->min_version)) { 253 /* 254 * These two extensions are always enabled as they were supported 255 * by QEMU before they were added as extensions in the ISA. 256 */ 257 if (!strcmp(edata->name, "zicntr") || 258 !strcmp(edata->name, "zihpm")) { 259 continue; 260 } 261 262 isa_ext_update_enabled(cpu, edata->ext_enable_offset, false); 263 #ifndef CONFIG_USER_ONLY 264 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 265 " because privilege spec version does not match", 266 edata->name, env->mhartid); 267 #else 268 warn_report("disabling %s extension because " 269 "privilege spec version does not match", 270 edata->name); 271 #endif 272 } 273 } 274 } 275 276 /* 277 * Check consistency between chosen extensions while setting 278 * cpu->cfg accordingly. 279 */ 280 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 281 { 282 CPURISCVState *env = &cpu->env; 283 Error *local_err = NULL; 284 285 /* Do some ISA extension error checking */ 286 if (riscv_has_ext(env, RVG) && 287 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && 288 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && 289 riscv_has_ext(env, RVD) && 290 cpu->cfg.ext_zicsr && cpu->cfg.ext_zifencei)) { 291 292 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr)) && 293 !cpu->cfg.ext_zicsr) { 294 error_setg(errp, "RVG requires Zicsr but user set Zicsr to false"); 295 return; 296 } 297 298 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei)) && 299 !cpu->cfg.ext_zifencei) { 300 error_setg(errp, "RVG requires Zifencei but user set " 301 "Zifencei to false"); 302 return; 303 } 304 305 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zicsr), true); 306 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zifencei), true); 307 308 env->misa_ext |= RVI | RVM | RVA | RVF | RVD; 309 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD; 310 } 311 312 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 313 error_setg(errp, 314 "I and E extensions are incompatible"); 315 return; 316 } 317 318 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 319 error_setg(errp, 320 "Either I or E extension must be set"); 321 return; 322 } 323 324 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 325 error_setg(errp, 326 "Setting S extension without U extension is illegal"); 327 return; 328 } 329 330 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 331 error_setg(errp, 332 "H depends on an I base integer ISA with 32 x registers"); 333 return; 334 } 335 336 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 337 error_setg(errp, "H extension implicitly requires S-mode"); 338 return; 339 } 340 341 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) { 342 error_setg(errp, "F extension requires Zicsr"); 343 return; 344 } 345 346 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 347 error_setg(errp, "Zawrs extension requires A extension"); 348 return; 349 } 350 351 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 352 error_setg(errp, "Zfa extension requires F extension"); 353 return; 354 } 355 356 if (cpu->cfg.ext_zfh) { 357 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zfhmin), true); 358 } 359 360 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 361 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 362 return; 363 } 364 365 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 366 error_setg(errp, "Zfbfmin extension depends on F extension"); 367 return; 368 } 369 370 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 371 error_setg(errp, "D extension requires F extension"); 372 return; 373 } 374 375 if (riscv_has_ext(env, RVV)) { 376 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 377 if (local_err != NULL) { 378 error_propagate(errp, local_err); 379 return; 380 } 381 382 /* The V vector extension depends on the Zve64d extension */ 383 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64d), true); 384 } 385 386 /* The Zve64d extension depends on the Zve64f extension */ 387 if (cpu->cfg.ext_zve64d) { 388 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64f), true); 389 } 390 391 /* The Zve64f extension depends on the Zve32f extension */ 392 if (cpu->cfg.ext_zve64f) { 393 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve32f), true); 394 } 395 396 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) { 397 error_setg(errp, "Zve64d/V extensions require D extension"); 398 return; 399 } 400 401 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) { 402 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 403 return; 404 } 405 406 if (cpu->cfg.ext_zvfh) { 407 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvfhmin), true); 408 } 409 410 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 411 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 412 return; 413 } 414 415 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 416 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 417 return; 418 } 419 420 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) { 421 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension"); 422 return; 423 } 424 425 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 426 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 427 return; 428 } 429 430 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 431 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 432 return; 433 } 434 435 /* Set the ISA extensions, checks should have happened above */ 436 if (cpu->cfg.ext_zhinx) { 437 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 438 } 439 440 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 441 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 442 return; 443 } 444 445 if (cpu->cfg.ext_zfinx) { 446 if (!cpu->cfg.ext_zicsr) { 447 error_setg(errp, "Zfinx extension requires Zicsr"); 448 return; 449 } 450 if (riscv_has_ext(env, RVF)) { 451 error_setg(errp, 452 "Zfinx cannot be supported together with F extension"); 453 return; 454 } 455 } 456 457 if (cpu->cfg.ext_zce) { 458 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 459 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true); 460 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); 461 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); 462 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 463 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 464 } 465 } 466 467 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */ 468 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 469 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 470 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 471 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 472 } 473 if (riscv_has_ext(env, RVD)) { 474 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true); 475 } 476 } 477 478 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 479 error_setg(errp, "Zcf extension is only relevant to RV32"); 480 return; 481 } 482 483 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 484 error_setg(errp, "Zcf extension requires F extension"); 485 return; 486 } 487 488 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 489 error_setg(errp, "Zcd extension requires D extension"); 490 return; 491 } 492 493 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 494 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 495 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 496 "extension"); 497 return; 498 } 499 500 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 501 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 502 "Zcd extension"); 503 return; 504 } 505 506 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) { 507 error_setg(errp, "Zcmt extension requires Zicsr extension"); 508 return; 509 } 510 511 /* 512 * Shorthand vector crypto extensions 513 */ 514 if (cpu->cfg.ext_zvknc) { 515 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkn), true); 516 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true); 517 } 518 519 if (cpu->cfg.ext_zvkng) { 520 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkn), true); 521 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkg), true); 522 } 523 524 if (cpu->cfg.ext_zvkn) { 525 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkned), true); 526 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvknhb), true); 527 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkb), true); 528 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkt), true); 529 } 530 531 if (cpu->cfg.ext_zvksc) { 532 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvks), true); 533 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true); 534 } 535 536 if (cpu->cfg.ext_zvksg) { 537 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvks), true); 538 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkg), true); 539 } 540 541 if (cpu->cfg.ext_zvks) { 542 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvksed), true); 543 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvksh), true); 544 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkb), true); 545 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkt), true); 546 } 547 548 if (cpu->cfg.ext_zvkt) { 549 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbb), true); 550 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true); 551 } 552 553 /* 554 * In principle Zve*x would also suffice here, were they supported 555 * in qemu 556 */ 557 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg || 558 cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || 559 cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) { 560 error_setg(errp, 561 "Vector crypto extensions require V or Zve* extensions"); 562 return; 563 } 564 565 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64f) { 566 error_setg( 567 errp, 568 "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions"); 569 return; 570 } 571 572 if (cpu->cfg.ext_zk) { 573 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkn), true); 574 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkr), true); 575 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkt), true); 576 } 577 578 if (cpu->cfg.ext_zkn) { 579 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true); 580 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true); 581 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true); 582 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkne), true); 583 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknd), true); 584 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknh), true); 585 } 586 587 if (cpu->cfg.ext_zks) { 588 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true); 589 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true); 590 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true); 591 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksed), true); 592 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksh), true); 593 } 594 595 if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) { 596 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) { 597 error_setg(errp, "zicntr requires zicsr"); 598 return; 599 } 600 cpu->cfg.ext_zicntr = false; 601 } 602 603 if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) { 604 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) { 605 error_setg(errp, "zihpm requires zicsr"); 606 return; 607 } 608 cpu->cfg.ext_zihpm = false; 609 } 610 611 if (!cpu->cfg.ext_zihpm) { 612 cpu->cfg.pmu_mask = 0; 613 cpu->pmu_avail_ctrs = 0; 614 } 615 616 /* 617 * Disable isa extensions based on priv spec after we 618 * validated and set everything we need. 619 */ 620 riscv_cpu_disable_priv_spec_isa_exts(cpu); 621 } 622 623 void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 624 { 625 CPURISCVState *env = &cpu->env; 626 Error *local_err = NULL; 627 628 riscv_cpu_validate_priv_spec(cpu, &local_err); 629 if (local_err != NULL) { 630 error_propagate(errp, local_err); 631 return; 632 } 633 634 riscv_cpu_validate_misa_priv(env, &local_err); 635 if (local_err != NULL) { 636 error_propagate(errp, local_err); 637 return; 638 } 639 640 if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) { 641 /* 642 * Enhanced PMP should only be available 643 * on harts with PMP support 644 */ 645 error_setg(errp, "Invalid configuration: Smepmp requires PMP support"); 646 return; 647 } 648 649 riscv_cpu_validate_set_extensions(cpu, &local_err); 650 if (local_err != NULL) { 651 error_propagate(errp, local_err); 652 return; 653 } 654 } 655 656 bool riscv_cpu_tcg_compatible(RISCVCPU *cpu) 657 { 658 return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL; 659 } 660 661 static bool riscv_cpu_is_generic(Object *cpu_obj) 662 { 663 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 664 } 665 666 /* 667 * We'll get here via the following path: 668 * 669 * riscv_cpu_realize() 670 * -> cpu_exec_realizefn() 671 * -> tcg_cpu_realize() (via accel_cpu_common_realize()) 672 */ 673 static bool tcg_cpu_realize(CPUState *cs, Error **errp) 674 { 675 RISCVCPU *cpu = RISCV_CPU(cs); 676 Error *local_err = NULL; 677 678 if (!riscv_cpu_tcg_compatible(cpu)) { 679 g_autofree char *name = riscv_cpu_get_name(cpu); 680 error_setg(errp, "'%s' CPU is not compatible with TCG acceleration", 681 name); 682 return false; 683 } 684 685 riscv_cpu_validate_misa_mxl(cpu, &local_err); 686 if (local_err != NULL) { 687 error_propagate(errp, local_err); 688 return false; 689 } 690 691 #ifndef CONFIG_USER_ONLY 692 CPURISCVState *env = &cpu->env; 693 694 CPU(cs)->tcg_cflags |= CF_PCREL; 695 696 if (cpu->cfg.ext_sstc) { 697 riscv_timer_init(cpu); 698 } 699 700 if (cpu->cfg.pmu_mask) { 701 riscv_pmu_init(cpu, &local_err); 702 if (local_err != NULL) { 703 error_propagate(errp, local_err); 704 return false; 705 } 706 707 if (cpu->cfg.ext_sscofpmf) { 708 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 709 riscv_pmu_timer_cb, cpu); 710 } 711 } 712 713 /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */ 714 if (riscv_has_ext(env, RVH)) { 715 env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP; 716 } 717 #endif 718 719 return true; 720 } 721 722 typedef struct RISCVCPUMisaExtConfig { 723 target_ulong misa_bit; 724 bool enabled; 725 } RISCVCPUMisaExtConfig; 726 727 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 728 void *opaque, Error **errp) 729 { 730 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 731 target_ulong misa_bit = misa_ext_cfg->misa_bit; 732 RISCVCPU *cpu = RISCV_CPU(obj); 733 CPURISCVState *env = &cpu->env; 734 bool generic_cpu = riscv_cpu_is_generic(obj); 735 bool prev_val, value; 736 737 if (!visit_type_bool(v, name, &value, errp)) { 738 return; 739 } 740 741 prev_val = env->misa_ext & misa_bit; 742 743 if (value == prev_val) { 744 return; 745 } 746 747 if (value) { 748 if (!generic_cpu) { 749 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 750 error_setg(errp, "'%s' CPU does not allow enabling extensions", 751 cpuname); 752 return; 753 } 754 755 env->misa_ext |= misa_bit; 756 env->misa_ext_mask |= misa_bit; 757 } else { 758 env->misa_ext &= ~misa_bit; 759 env->misa_ext_mask &= ~misa_bit; 760 } 761 } 762 763 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 764 void *opaque, Error **errp) 765 { 766 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 767 target_ulong misa_bit = misa_ext_cfg->misa_bit; 768 RISCVCPU *cpu = RISCV_CPU(obj); 769 CPURISCVState *env = &cpu->env; 770 bool value; 771 772 value = env->misa_ext & misa_bit; 773 774 visit_type_bool(v, name, &value, errp); 775 } 776 777 #define MISA_CFG(_bit, _enabled) \ 778 {.misa_bit = _bit, .enabled = _enabled} 779 780 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 781 MISA_CFG(RVA, true), 782 MISA_CFG(RVC, true), 783 MISA_CFG(RVD, true), 784 MISA_CFG(RVF, true), 785 MISA_CFG(RVI, true), 786 MISA_CFG(RVE, false), 787 MISA_CFG(RVM, true), 788 MISA_CFG(RVS, true), 789 MISA_CFG(RVU, true), 790 MISA_CFG(RVH, true), 791 MISA_CFG(RVJ, false), 792 MISA_CFG(RVV, false), 793 MISA_CFG(RVG, false), 794 }; 795 796 /* 797 * We do not support user choice tracking for MISA 798 * extensions yet because, so far, we do not silently 799 * change MISA bits during realize() (RVG enables MISA 800 * bits but the user is warned about it). 801 */ 802 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 803 { 804 bool use_def_vals = riscv_cpu_is_generic(cpu_obj); 805 int i; 806 807 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 808 const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 809 int bit = misa_cfg->misa_bit; 810 const char *name = riscv_get_misa_ext_name(bit); 811 const char *desc = riscv_get_misa_ext_description(bit); 812 813 /* Check if KVM already created the property */ 814 if (object_property_find(cpu_obj, name)) { 815 continue; 816 } 817 818 object_property_add(cpu_obj, name, "bool", 819 cpu_get_misa_ext_cfg, 820 cpu_set_misa_ext_cfg, 821 NULL, (void *)misa_cfg); 822 object_property_set_description(cpu_obj, name, desc); 823 if (use_def_vals) { 824 object_property_set_bool(cpu_obj, name, misa_cfg->enabled, NULL); 825 } 826 } 827 } 828 829 static bool cpu_ext_is_deprecated(const char *ext_name) 830 { 831 return isupper(ext_name[0]); 832 } 833 834 /* 835 * String will be allocated in the heap. Caller is responsible 836 * for freeing it. 837 */ 838 static char *cpu_ext_to_lower(const char *ext_name) 839 { 840 char *ret = g_malloc0(strlen(ext_name) + 1); 841 842 strcpy(ret, ext_name); 843 ret[0] = tolower(ret[0]); 844 845 return ret; 846 } 847 848 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 849 void *opaque, Error **errp) 850 { 851 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 852 RISCVCPU *cpu = RISCV_CPU(obj); 853 bool generic_cpu = riscv_cpu_is_generic(obj); 854 bool prev_val, value; 855 856 if (!visit_type_bool(v, name, &value, errp)) { 857 return; 858 } 859 860 if (cpu_ext_is_deprecated(multi_ext_cfg->name)) { 861 g_autofree char *lower = cpu_ext_to_lower(multi_ext_cfg->name); 862 863 warn_report("CPU property '%s' is deprecated. Please use '%s' instead", 864 multi_ext_cfg->name, lower); 865 } 866 867 g_hash_table_insert(multi_ext_user_opts, 868 GUINT_TO_POINTER(multi_ext_cfg->offset), 869 (gpointer)value); 870 871 prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset); 872 873 if (value == prev_val) { 874 return; 875 } 876 877 if (value && !generic_cpu) { 878 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 879 error_setg(errp, "'%s' CPU does not allow enabling extensions", 880 cpuname); 881 return; 882 } 883 884 isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value); 885 } 886 887 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 888 void *opaque, Error **errp) 889 { 890 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 891 bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset); 892 893 visit_type_bool(v, name, &value, errp); 894 } 895 896 static void cpu_add_multi_ext_prop(Object *cpu_obj, 897 const RISCVCPUMultiExtConfig *multi_cfg) 898 { 899 bool generic_cpu = riscv_cpu_is_generic(cpu_obj); 900 bool deprecated_ext = cpu_ext_is_deprecated(multi_cfg->name); 901 902 object_property_add(cpu_obj, multi_cfg->name, "bool", 903 cpu_get_multi_ext_cfg, 904 cpu_set_multi_ext_cfg, 905 NULL, (void *)multi_cfg); 906 907 if (!generic_cpu || deprecated_ext) { 908 return; 909 } 910 911 /* 912 * Set def val directly instead of using 913 * object_property_set_bool() to save the set() 914 * callback hash for user inputs. 915 */ 916 isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset, 917 multi_cfg->enabled); 918 } 919 920 static void riscv_cpu_add_multiext_prop_array(Object *obj, 921 const RISCVCPUMultiExtConfig *array) 922 { 923 const RISCVCPUMultiExtConfig *prop; 924 925 g_assert(array); 926 927 for (prop = array; prop && prop->name; prop++) { 928 cpu_add_multi_ext_prop(obj, prop); 929 } 930 } 931 932 /* 933 * Add CPU properties with user-facing flags. 934 * 935 * This will overwrite existing env->misa_ext values with the 936 * defaults set via riscv_cpu_add_misa_properties(). 937 */ 938 static void riscv_cpu_add_user_properties(Object *obj) 939 { 940 #ifndef CONFIG_USER_ONLY 941 riscv_add_satp_mode_properties(obj); 942 #endif 943 944 riscv_cpu_add_misa_properties(obj); 945 946 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions); 947 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts); 948 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts); 949 950 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts); 951 952 for (Property *prop = riscv_cpu_options; prop && prop->name; prop++) { 953 qdev_property_add_static(DEVICE(obj), prop); 954 } 955 } 956 957 /* 958 * The 'max' type CPU will have all possible ratified 959 * non-vendor extensions enabled. 960 */ 961 static void riscv_init_max_cpu_extensions(Object *obj) 962 { 963 RISCVCPU *cpu = RISCV_CPU(obj); 964 CPURISCVState *env = &cpu->env; 965 const RISCVCPUMultiExtConfig *prop; 966 967 /* Enable RVG, RVJ and RVV that are disabled by default */ 968 riscv_cpu_set_misa(env, env->misa_mxl, env->misa_ext | RVG | RVJ | RVV); 969 970 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 971 isa_ext_update_enabled(cpu, prop->offset, true); 972 } 973 974 /* set vector version */ 975 env->vext_ver = VEXT_VERSION_1_00_0; 976 977 /* Zfinx is not compatible with F. Disable it */ 978 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false); 979 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false); 980 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false); 981 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false); 982 983 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false); 984 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false); 985 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false); 986 987 if (env->misa_mxl != MXL_RV32) { 988 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false); 989 } 990 } 991 992 static bool riscv_cpu_has_max_extensions(Object *cpu_obj) 993 { 994 return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL; 995 } 996 997 static void tcg_cpu_instance_init(CPUState *cs) 998 { 999 RISCVCPU *cpu = RISCV_CPU(cs); 1000 Object *obj = OBJECT(cpu); 1001 1002 multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1003 riscv_cpu_add_user_properties(obj); 1004 1005 if (riscv_cpu_has_max_extensions(obj)) { 1006 riscv_init_max_cpu_extensions(obj); 1007 } 1008 } 1009 1010 static void tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) 1011 { 1012 /* 1013 * All cpus use the same set of operations. 1014 */ 1015 cc->tcg_ops = &riscv_tcg_ops; 1016 } 1017 1018 static void tcg_cpu_class_init(CPUClass *cc) 1019 { 1020 cc->init_accel_cpu = tcg_cpu_init_ops; 1021 } 1022 1023 static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data) 1024 { 1025 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); 1026 1027 acc->cpu_class_init = tcg_cpu_class_init; 1028 acc->cpu_instance_init = tcg_cpu_instance_init; 1029 acc->cpu_target_realize = tcg_cpu_realize; 1030 } 1031 1032 static const TypeInfo tcg_cpu_accel_type_info = { 1033 .name = ACCEL_CPU_NAME("tcg"), 1034 1035 .parent = TYPE_ACCEL_CPU, 1036 .class_init = tcg_cpu_accel_class_init, 1037 .abstract = true, 1038 }; 1039 1040 static void tcg_cpu_accel_register_types(void) 1041 { 1042 type_register_static(&tcg_cpu_accel_type_info); 1043 } 1044 type_init(tcg_cpu_accel_register_types); 1045