1 /* 2 * riscv TCG cpu class initialization 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/exec-all.h" 22 #include "tcg-cpu.h" 23 #include "cpu.h" 24 #include "pmu.h" 25 #include "time_helper.h" 26 #include "qapi/error.h" 27 #include "qemu/accel.h" 28 #include "qemu/error-report.h" 29 #include "qemu/log.h" 30 #include "hw/core/accel-cpu.h" 31 #include "hw/core/tcg-cpu-ops.h" 32 #include "tcg/tcg.h" 33 34 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 35 const TranslationBlock *tb) 36 { 37 if (!(tb_cflags(tb) & CF_PCREL)) { 38 RISCVCPU *cpu = RISCV_CPU(cs); 39 CPURISCVState *env = &cpu->env; 40 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 41 42 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 43 44 if (xl == MXL_RV32) { 45 env->pc = (int32_t) tb->pc; 46 } else { 47 env->pc = tb->pc; 48 } 49 } 50 } 51 52 static void riscv_restore_state_to_opc(CPUState *cs, 53 const TranslationBlock *tb, 54 const uint64_t *data) 55 { 56 RISCVCPU *cpu = RISCV_CPU(cs); 57 CPURISCVState *env = &cpu->env; 58 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 59 target_ulong pc; 60 61 if (tb_cflags(tb) & CF_PCREL) { 62 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 63 } else { 64 pc = data[0]; 65 } 66 67 if (xl == MXL_RV32) { 68 env->pc = (int32_t)pc; 69 } else { 70 env->pc = pc; 71 } 72 env->bins = data[1]; 73 } 74 75 static const struct TCGCPUOps riscv_tcg_ops = { 76 .initialize = riscv_translate_init, 77 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 78 .restore_state_to_opc = riscv_restore_state_to_opc, 79 80 #ifndef CONFIG_USER_ONLY 81 .tlb_fill = riscv_cpu_tlb_fill, 82 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 83 .do_interrupt = riscv_cpu_do_interrupt, 84 .do_transaction_failed = riscv_cpu_do_transaction_failed, 85 .do_unaligned_access = riscv_cpu_do_unaligned_access, 86 .debug_excp_handler = riscv_cpu_debug_excp_handler, 87 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 88 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 89 #endif /* !CONFIG_USER_ONLY */ 90 }; 91 92 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset, 93 bool value) 94 { 95 CPURISCVState *env = &cpu->env; 96 bool prev_val = isa_ext_is_enabled(cpu, ext_offset); 97 int min_version; 98 99 if (prev_val == value) { 100 return; 101 } 102 103 if (cpu_cfg_ext_is_user_set(ext_offset)) { 104 return; 105 } 106 107 if (value && env->priv_ver != PRIV_VERSION_LATEST) { 108 /* Do not enable it if priv_ver is older than min_version */ 109 min_version = cpu_cfg_ext_get_min_version(ext_offset); 110 if (env->priv_ver < min_version) { 111 return; 112 } 113 } 114 115 isa_ext_update_enabled(cpu, ext_offset, value); 116 } 117 118 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 119 { 120 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 121 error_setg(errp, "H extension requires priv spec 1.12.0"); 122 return; 123 } 124 } 125 126 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) 127 { 128 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 129 CPUClass *cc = CPU_CLASS(mcc); 130 CPURISCVState *env = &cpu->env; 131 132 /* Validate that MISA_MXL is set properly. */ 133 switch (env->misa_mxl_max) { 134 #ifdef TARGET_RISCV64 135 case MXL_RV64: 136 case MXL_RV128: 137 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 138 break; 139 #endif 140 case MXL_RV32: 141 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 142 break; 143 default: 144 g_assert_not_reached(); 145 } 146 147 if (env->misa_mxl_max != env->misa_mxl) { 148 error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); 149 return; 150 } 151 } 152 153 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) 154 { 155 CPURISCVState *env = &cpu->env; 156 int priv_version = -1; 157 158 if (cpu->cfg.priv_spec) { 159 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { 160 priv_version = PRIV_VERSION_1_12_0; 161 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { 162 priv_version = PRIV_VERSION_1_11_0; 163 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { 164 priv_version = PRIV_VERSION_1_10_0; 165 } else { 166 error_setg(errp, 167 "Unsupported privilege spec version '%s'", 168 cpu->cfg.priv_spec); 169 return; 170 } 171 172 env->priv_ver = priv_version; 173 } 174 } 175 176 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 177 Error **errp) 178 { 179 if (!is_power_of_2(cfg->vlen)) { 180 error_setg(errp, "Vector extension VLEN must be power of 2"); 181 return; 182 } 183 184 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { 185 error_setg(errp, 186 "Vector extension implementation only supports VLEN " 187 "in the range [128, %d]", RV_VLEN_MAX); 188 return; 189 } 190 191 if (!is_power_of_2(cfg->elen)) { 192 error_setg(errp, "Vector extension ELEN must be power of 2"); 193 return; 194 } 195 196 if (cfg->elen > 64 || cfg->elen < 8) { 197 error_setg(errp, 198 "Vector extension implementation only supports ELEN " 199 "in the range [8, 64]"); 200 return; 201 } 202 203 if (cfg->vext_spec) { 204 if (!g_strcmp0(cfg->vext_spec, "v1.0")) { 205 env->vext_ver = VEXT_VERSION_1_00_0; 206 } else { 207 error_setg(errp, "Unsupported vector spec version '%s'", 208 cfg->vext_spec); 209 return; 210 } 211 } else if (env->vext_ver == 0) { 212 qemu_log("vector version is not specified, " 213 "use the default value v1.0\n"); 214 215 env->vext_ver = VEXT_VERSION_1_00_0; 216 } 217 } 218 219 /* 220 * Check consistency between chosen extensions while setting 221 * cpu->cfg accordingly. 222 */ 223 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 224 { 225 CPURISCVState *env = &cpu->env; 226 Error *local_err = NULL; 227 228 /* Do some ISA extension error checking */ 229 if (riscv_has_ext(env, RVG) && 230 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && 231 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && 232 riscv_has_ext(env, RVD) && 233 cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) { 234 235 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_icsr)) && 236 !cpu->cfg.ext_icsr) { 237 error_setg(errp, "RVG requires Zicsr but user set Zicsr to false"); 238 return; 239 } 240 241 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_ifencei)) && 242 !cpu->cfg.ext_ifencei) { 243 error_setg(errp, "RVG requires Zifencei but user set " 244 "Zifencei to false"); 245 return; 246 } 247 248 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei"); 249 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_icsr), true); 250 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_ifencei), true); 251 252 env->misa_ext |= RVI | RVM | RVA | RVF | RVD; 253 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD; 254 } 255 256 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 257 error_setg(errp, 258 "I and E extensions are incompatible"); 259 return; 260 } 261 262 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 263 error_setg(errp, 264 "Either I or E extension must be set"); 265 return; 266 } 267 268 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 269 error_setg(errp, 270 "Setting S extension without U extension is illegal"); 271 return; 272 } 273 274 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 275 error_setg(errp, 276 "H depends on an I base integer ISA with 32 x registers"); 277 return; 278 } 279 280 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 281 error_setg(errp, "H extension implicitly requires S-mode"); 282 return; 283 } 284 285 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_icsr) { 286 error_setg(errp, "F extension requires Zicsr"); 287 return; 288 } 289 290 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 291 error_setg(errp, "Zawrs extension requires A extension"); 292 return; 293 } 294 295 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 296 error_setg(errp, "Zfa extension requires F extension"); 297 return; 298 } 299 300 if (cpu->cfg.ext_zfh) { 301 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zfhmin), true); 302 } 303 304 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 305 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 306 return; 307 } 308 309 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 310 error_setg(errp, "Zfbfmin extension depends on F extension"); 311 return; 312 } 313 314 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 315 error_setg(errp, "D extension requires F extension"); 316 return; 317 } 318 319 if (riscv_has_ext(env, RVV)) { 320 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 321 if (local_err != NULL) { 322 error_propagate(errp, local_err); 323 return; 324 } 325 326 /* The V vector extension depends on the Zve64d extension */ 327 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64d), true); 328 } 329 330 /* The Zve64d extension depends on the Zve64f extension */ 331 if (cpu->cfg.ext_zve64d) { 332 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64f), true); 333 } 334 335 /* The Zve64f extension depends on the Zve32f extension */ 336 if (cpu->cfg.ext_zve64f) { 337 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve32f), true); 338 } 339 340 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) { 341 error_setg(errp, "Zve64d/V extensions require D extension"); 342 return; 343 } 344 345 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) { 346 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 347 return; 348 } 349 350 if (cpu->cfg.ext_zvfh) { 351 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvfhmin), true); 352 } 353 354 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 355 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 356 return; 357 } 358 359 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 360 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 361 return; 362 } 363 364 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) { 365 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension"); 366 return; 367 } 368 369 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 370 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 371 return; 372 } 373 374 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 375 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 376 return; 377 } 378 379 /* Set the ISA extensions, checks should have happened above */ 380 if (cpu->cfg.ext_zhinx) { 381 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 382 } 383 384 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 385 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 386 return; 387 } 388 389 if (cpu->cfg.ext_zfinx) { 390 if (!cpu->cfg.ext_icsr) { 391 error_setg(errp, "Zfinx extension requires Zicsr"); 392 return; 393 } 394 if (riscv_has_ext(env, RVF)) { 395 error_setg(errp, 396 "Zfinx cannot be supported together with F extension"); 397 return; 398 } 399 } 400 401 if (cpu->cfg.ext_zce) { 402 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 403 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true); 404 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); 405 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); 406 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 407 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 408 } 409 } 410 411 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */ 412 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 413 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 414 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 415 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 416 } 417 if (riscv_has_ext(env, RVD)) { 418 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true); 419 } 420 } 421 422 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 423 error_setg(errp, "Zcf extension is only relevant to RV32"); 424 return; 425 } 426 427 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 428 error_setg(errp, "Zcf extension requires F extension"); 429 return; 430 } 431 432 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 433 error_setg(errp, "Zcd extension requires D extension"); 434 return; 435 } 436 437 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 438 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 439 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 440 "extension"); 441 return; 442 } 443 444 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 445 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 446 "Zcd extension"); 447 return; 448 } 449 450 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_icsr) { 451 error_setg(errp, "Zcmt extension requires Zicsr extension"); 452 return; 453 } 454 455 /* 456 * In principle Zve*x would also suffice here, were they supported 457 * in qemu 458 */ 459 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkg || cpu->cfg.ext_zvkned || 460 cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || cpu->cfg.ext_zvksh) && 461 !cpu->cfg.ext_zve32f) { 462 error_setg(errp, 463 "Vector crypto extensions require V or Zve* extensions"); 464 return; 465 } 466 467 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64f) { 468 error_setg( 469 errp, 470 "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions"); 471 return; 472 } 473 474 if (cpu->cfg.ext_zk) { 475 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkn), true); 476 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkr), true); 477 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkt), true); 478 } 479 480 if (cpu->cfg.ext_zkn) { 481 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true); 482 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true); 483 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true); 484 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkne), true); 485 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknd), true); 486 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknh), true); 487 } 488 489 if (cpu->cfg.ext_zks) { 490 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true); 491 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true); 492 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true); 493 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksed), true); 494 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksh), true); 495 } 496 497 /* 498 * Disable isa extensions based on priv spec after we 499 * validated and set everything we need. 500 */ 501 riscv_cpu_disable_priv_spec_isa_exts(cpu); 502 } 503 504 /* 505 * We'll get here via the following path: 506 * 507 * riscv_cpu_realize() 508 * -> cpu_exec_realizefn() 509 * -> tcg_cpu_realize() (via accel_cpu_common_realize()) 510 */ 511 static bool tcg_cpu_realize(CPUState *cs, Error **errp) 512 { 513 RISCVCPU *cpu = RISCV_CPU(cs); 514 CPURISCVState *env = &cpu->env; 515 Error *local_err = NULL; 516 517 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST)) { 518 error_setg(errp, "'host' CPU is not compatible with TCG acceleration"); 519 return false; 520 } 521 522 riscv_cpu_validate_misa_mxl(cpu, &local_err); 523 if (local_err != NULL) { 524 error_propagate(errp, local_err); 525 return false; 526 } 527 528 riscv_cpu_validate_priv_spec(cpu, &local_err); 529 if (local_err != NULL) { 530 error_propagate(errp, local_err); 531 return false; 532 } 533 534 riscv_cpu_validate_misa_priv(env, &local_err); 535 if (local_err != NULL) { 536 error_propagate(errp, local_err); 537 return false; 538 } 539 540 if (cpu->cfg.epmp && !cpu->cfg.pmp) { 541 /* 542 * Enhanced PMP should only be available 543 * on harts with PMP support 544 */ 545 error_setg(errp, "Invalid configuration: EPMP requires PMP support"); 546 return false; 547 } 548 549 riscv_cpu_validate_set_extensions(cpu, &local_err); 550 if (local_err != NULL) { 551 error_propagate(errp, local_err); 552 return false; 553 } 554 555 #ifndef CONFIG_USER_ONLY 556 CPU(cs)->tcg_cflags |= CF_PCREL; 557 558 if (cpu->cfg.ext_sstc) { 559 riscv_timer_init(cpu); 560 } 561 562 if (cpu->cfg.pmu_num) { 563 if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) { 564 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 565 riscv_pmu_timer_cb, cpu); 566 } 567 } 568 #endif 569 570 return true; 571 } 572 573 static void tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) 574 { 575 /* 576 * All cpus use the same set of operations. 577 */ 578 cc->tcg_ops = &riscv_tcg_ops; 579 } 580 581 static void tcg_cpu_class_init(CPUClass *cc) 582 { 583 cc->init_accel_cpu = tcg_cpu_init_ops; 584 } 585 586 static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data) 587 { 588 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); 589 590 acc->cpu_class_init = tcg_cpu_class_init; 591 acc->cpu_target_realize = tcg_cpu_realize; 592 } 593 594 static const TypeInfo tcg_cpu_accel_type_info = { 595 .name = ACCEL_CPU_NAME("tcg"), 596 597 .parent = TYPE_ACCEL_CPU, 598 .class_init = tcg_cpu_accel_class_init, 599 .abstract = true, 600 }; 601 602 static void tcg_cpu_accel_register_types(void) 603 { 604 type_register_static(&tcg_cpu_accel_type_info); 605 } 606 type_init(tcg_cpu_accel_register_types); 607