1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Check for KVM_GET_REG_LIST regressions. 4 * 5 * Copyright (c) 2023 Intel Corporation 6 * 7 */ 8 #include <stdio.h> 9 #include "kvm_util.h" 10 #include "test_util.h" 11 #include "processor.h" 12 13 #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK) 14 15 static bool isa_ext_cant_disable[KVM_RISCV_ISA_EXT_MAX]; 16 17 bool filter_reg(__u64 reg) 18 { 19 switch (reg & ~REG_MASK) { 20 /* 21 * Same set of ISA_EXT registers are not present on all host because 22 * ISA_EXT registers are visible to the KVM user space based on the 23 * ISA extensions available on the host. Also, disabling an ISA 24 * extension using corresponding ISA_EXT register does not affect 25 * the visibility of the ISA_EXT register itself. 26 * 27 * Based on above, we should filter-out all ISA_EXT registers. 28 */ 29 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A: 30 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C: 31 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D: 32 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F: 33 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_H: 34 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I: 35 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M: 36 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT: 37 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC: 38 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL: 39 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE: 40 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM: 41 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ: 42 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB: 43 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA: 44 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_V: 45 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVNAPOT: 46 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA: 47 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS: 48 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR: 49 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICSR: 50 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI: 51 case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM: 52 return true; 53 /* AIA registers are always available when Ssaia can't be disabled */ 54 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect): 55 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1): 56 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2): 57 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh): 58 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph): 59 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h): 60 case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h): 61 return isa_ext_cant_disable[KVM_RISCV_ISA_EXT_SSAIA]; 62 default: 63 break; 64 } 65 66 return false; 67 } 68 69 bool check_reject_set(int err) 70 { 71 return err == EINVAL; 72 } 73 74 static inline bool vcpu_has_ext(struct kvm_vcpu *vcpu, int ext) 75 { 76 int ret; 77 unsigned long value; 78 79 ret = __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(ext), &value); 80 return (ret) ? false : !!value; 81 } 82 83 void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) 84 { 85 unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 }; 86 struct vcpu_reg_sublist *s; 87 int rc; 88 89 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) 90 __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(i), &isa_ext_state[i]); 91 92 /* 93 * Disable all extensions which were enabled by default 94 * if they were available in the risc-v host. 95 */ 96 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) { 97 rc = __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(i), 0); 98 if (rc && isa_ext_state[i]) 99 isa_ext_cant_disable[i] = true; 100 } 101 102 for_each_sublist(c, s) { 103 if (!s->feature) 104 continue; 105 106 /* Try to enable the desired extension */ 107 __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(s->feature), 1); 108 109 /* Double check whether the desired extension was enabled */ 110 __TEST_REQUIRE(vcpu_has_ext(vcpu, s->feature), 111 "%s not available, skipping tests\n", s->name); 112 } 113 } 114 115 static const char *config_id_to_str(__u64 id) 116 { 117 /* reg_off is the offset into struct kvm_riscv_config */ 118 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CONFIG); 119 120 switch (reg_off) { 121 case KVM_REG_RISCV_CONFIG_REG(isa): 122 return "KVM_REG_RISCV_CONFIG_REG(isa)"; 123 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): 124 return "KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)"; 125 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): 126 return "KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)"; 127 case KVM_REG_RISCV_CONFIG_REG(mvendorid): 128 return "KVM_REG_RISCV_CONFIG_REG(mvendorid)"; 129 case KVM_REG_RISCV_CONFIG_REG(marchid): 130 return "KVM_REG_RISCV_CONFIG_REG(marchid)"; 131 case KVM_REG_RISCV_CONFIG_REG(mimpid): 132 return "KVM_REG_RISCV_CONFIG_REG(mimpid)"; 133 case KVM_REG_RISCV_CONFIG_REG(satp_mode): 134 return "KVM_REG_RISCV_CONFIG_REG(satp_mode)"; 135 } 136 137 /* 138 * Config regs would grow regularly with new pseudo reg added, so 139 * just show raw id to indicate a new pseudo config reg. 140 */ 141 return strdup_printf("KVM_REG_RISCV_CONFIG_REG(%lld) /* UNKNOWN */", reg_off); 142 } 143 144 static const char *core_id_to_str(const char *prefix, __u64 id) 145 { 146 /* reg_off is the offset into struct kvm_riscv_core */ 147 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CORE); 148 149 switch (reg_off) { 150 case KVM_REG_RISCV_CORE_REG(regs.pc): 151 return "KVM_REG_RISCV_CORE_REG(regs.pc)"; 152 case KVM_REG_RISCV_CORE_REG(regs.ra): 153 return "KVM_REG_RISCV_CORE_REG(regs.ra)"; 154 case KVM_REG_RISCV_CORE_REG(regs.sp): 155 return "KVM_REG_RISCV_CORE_REG(regs.sp)"; 156 case KVM_REG_RISCV_CORE_REG(regs.gp): 157 return "KVM_REG_RISCV_CORE_REG(regs.gp)"; 158 case KVM_REG_RISCV_CORE_REG(regs.tp): 159 return "KVM_REG_RISCV_CORE_REG(regs.tp)"; 160 case KVM_REG_RISCV_CORE_REG(regs.t0) ... KVM_REG_RISCV_CORE_REG(regs.t2): 161 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)", 162 reg_off - KVM_REG_RISCV_CORE_REG(regs.t0)); 163 case KVM_REG_RISCV_CORE_REG(regs.s0) ... KVM_REG_RISCV_CORE_REG(regs.s1): 164 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)", 165 reg_off - KVM_REG_RISCV_CORE_REG(regs.s0)); 166 case KVM_REG_RISCV_CORE_REG(regs.a0) ... KVM_REG_RISCV_CORE_REG(regs.a7): 167 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.a%lld)", 168 reg_off - KVM_REG_RISCV_CORE_REG(regs.a0)); 169 case KVM_REG_RISCV_CORE_REG(regs.s2) ... KVM_REG_RISCV_CORE_REG(regs.s11): 170 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)", 171 reg_off - KVM_REG_RISCV_CORE_REG(regs.s2) + 2); 172 case KVM_REG_RISCV_CORE_REG(regs.t3) ... KVM_REG_RISCV_CORE_REG(regs.t6): 173 return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)", 174 reg_off - KVM_REG_RISCV_CORE_REG(regs.t3) + 3); 175 case KVM_REG_RISCV_CORE_REG(mode): 176 return "KVM_REG_RISCV_CORE_REG(mode)"; 177 } 178 179 TEST_FAIL("%s: Unknown core reg id: 0x%llx", prefix, id); 180 return NULL; 181 } 182 183 #define RISCV_CSR_GENERAL(csr) \ 184 "KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(" #csr ")" 185 #define RISCV_CSR_AIA(csr) \ 186 "KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_REG(" #csr ")" 187 188 static const char *general_csr_id_to_str(__u64 reg_off) 189 { 190 /* reg_off is the offset into struct kvm_riscv_csr */ 191 switch (reg_off) { 192 case KVM_REG_RISCV_CSR_REG(sstatus): 193 return RISCV_CSR_GENERAL(sstatus); 194 case KVM_REG_RISCV_CSR_REG(sie): 195 return RISCV_CSR_GENERAL(sie); 196 case KVM_REG_RISCV_CSR_REG(stvec): 197 return RISCV_CSR_GENERAL(stvec); 198 case KVM_REG_RISCV_CSR_REG(sscratch): 199 return RISCV_CSR_GENERAL(sscratch); 200 case KVM_REG_RISCV_CSR_REG(sepc): 201 return RISCV_CSR_GENERAL(sepc); 202 case KVM_REG_RISCV_CSR_REG(scause): 203 return RISCV_CSR_GENERAL(scause); 204 case KVM_REG_RISCV_CSR_REG(stval): 205 return RISCV_CSR_GENERAL(stval); 206 case KVM_REG_RISCV_CSR_REG(sip): 207 return RISCV_CSR_GENERAL(sip); 208 case KVM_REG_RISCV_CSR_REG(satp): 209 return RISCV_CSR_GENERAL(satp); 210 case KVM_REG_RISCV_CSR_REG(scounteren): 211 return RISCV_CSR_GENERAL(scounteren); 212 } 213 214 TEST_FAIL("Unknown general csr reg: 0x%llx", reg_off); 215 return NULL; 216 } 217 218 static const char *aia_csr_id_to_str(__u64 reg_off) 219 { 220 /* reg_off is the offset into struct kvm_riscv_aia_csr */ 221 switch (reg_off) { 222 case KVM_REG_RISCV_CSR_AIA_REG(siselect): 223 return RISCV_CSR_AIA(siselect); 224 case KVM_REG_RISCV_CSR_AIA_REG(iprio1): 225 return RISCV_CSR_AIA(iprio1); 226 case KVM_REG_RISCV_CSR_AIA_REG(iprio2): 227 return RISCV_CSR_AIA(iprio2); 228 case KVM_REG_RISCV_CSR_AIA_REG(sieh): 229 return RISCV_CSR_AIA(sieh); 230 case KVM_REG_RISCV_CSR_AIA_REG(siph): 231 return RISCV_CSR_AIA(siph); 232 case KVM_REG_RISCV_CSR_AIA_REG(iprio1h): 233 return RISCV_CSR_AIA(iprio1h); 234 case KVM_REG_RISCV_CSR_AIA_REG(iprio2h): 235 return RISCV_CSR_AIA(iprio2h); 236 } 237 238 TEST_FAIL("Unknown aia csr reg: 0x%llx", reg_off); 239 return NULL; 240 } 241 242 static const char *csr_id_to_str(const char *prefix, __u64 id) 243 { 244 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CSR); 245 __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK; 246 247 reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK; 248 249 switch (reg_subtype) { 250 case KVM_REG_RISCV_CSR_GENERAL: 251 return general_csr_id_to_str(reg_off); 252 case KVM_REG_RISCV_CSR_AIA: 253 return aia_csr_id_to_str(reg_off); 254 } 255 256 TEST_FAIL("%s: Unknown csr subtype: 0x%llx", prefix, reg_subtype); 257 return NULL; 258 } 259 260 static const char *timer_id_to_str(const char *prefix, __u64 id) 261 { 262 /* reg_off is the offset into struct kvm_riscv_timer */ 263 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_TIMER); 264 265 switch (reg_off) { 266 case KVM_REG_RISCV_TIMER_REG(frequency): 267 return "KVM_REG_RISCV_TIMER_REG(frequency)"; 268 case KVM_REG_RISCV_TIMER_REG(time): 269 return "KVM_REG_RISCV_TIMER_REG(time)"; 270 case KVM_REG_RISCV_TIMER_REG(compare): 271 return "KVM_REG_RISCV_TIMER_REG(compare)"; 272 case KVM_REG_RISCV_TIMER_REG(state): 273 return "KVM_REG_RISCV_TIMER_REG(state)"; 274 } 275 276 TEST_FAIL("%s: Unknown timer reg id: 0x%llx", prefix, id); 277 return NULL; 278 } 279 280 static const char *fp_f_id_to_str(const char *prefix, __u64 id) 281 { 282 /* reg_off is the offset into struct __riscv_f_ext_state */ 283 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_F); 284 285 switch (reg_off) { 286 case KVM_REG_RISCV_FP_F_REG(f[0]) ... 287 KVM_REG_RISCV_FP_F_REG(f[31]): 288 return strdup_printf("KVM_REG_RISCV_FP_F_REG(f[%lld])", reg_off); 289 case KVM_REG_RISCV_FP_F_REG(fcsr): 290 return "KVM_REG_RISCV_FP_F_REG(fcsr)"; 291 } 292 293 TEST_FAIL("%s: Unknown fp_f reg id: 0x%llx", prefix, id); 294 return NULL; 295 } 296 297 static const char *fp_d_id_to_str(const char *prefix, __u64 id) 298 { 299 /* reg_off is the offset into struct __riscv_d_ext_state */ 300 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_D); 301 302 switch (reg_off) { 303 case KVM_REG_RISCV_FP_D_REG(f[0]) ... 304 KVM_REG_RISCV_FP_D_REG(f[31]): 305 return strdup_printf("KVM_REG_RISCV_FP_D_REG(f[%lld])", reg_off); 306 case KVM_REG_RISCV_FP_D_REG(fcsr): 307 return "KVM_REG_RISCV_FP_D_REG(fcsr)"; 308 } 309 310 TEST_FAIL("%s: Unknown fp_d reg id: 0x%llx", prefix, id); 311 return NULL; 312 } 313 314 static const char *isa_ext_id_to_str(__u64 id) 315 { 316 /* reg_off is the offset into unsigned long kvm_isa_ext_arr[] */ 317 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_ISA_EXT); 318 319 static const char * const kvm_isa_ext_reg_name[] = { 320 "KVM_RISCV_ISA_EXT_A", 321 "KVM_RISCV_ISA_EXT_C", 322 "KVM_RISCV_ISA_EXT_D", 323 "KVM_RISCV_ISA_EXT_F", 324 "KVM_RISCV_ISA_EXT_H", 325 "KVM_RISCV_ISA_EXT_I", 326 "KVM_RISCV_ISA_EXT_M", 327 "KVM_RISCV_ISA_EXT_SVPBMT", 328 "KVM_RISCV_ISA_EXT_SSTC", 329 "KVM_RISCV_ISA_EXT_SVINVAL", 330 "KVM_RISCV_ISA_EXT_ZIHINTPAUSE", 331 "KVM_RISCV_ISA_EXT_ZICBOM", 332 "KVM_RISCV_ISA_EXT_ZICBOZ", 333 "KVM_RISCV_ISA_EXT_ZBB", 334 "KVM_RISCV_ISA_EXT_SSAIA", 335 "KVM_RISCV_ISA_EXT_V", 336 "KVM_RISCV_ISA_EXT_SVNAPOT", 337 "KVM_RISCV_ISA_EXT_ZBA", 338 "KVM_RISCV_ISA_EXT_ZBS", 339 "KVM_RISCV_ISA_EXT_ZICNTR", 340 "KVM_RISCV_ISA_EXT_ZICSR", 341 "KVM_RISCV_ISA_EXT_ZIFENCEI", 342 "KVM_RISCV_ISA_EXT_ZIHPM", 343 }; 344 345 if (reg_off >= ARRAY_SIZE(kvm_isa_ext_reg_name)) { 346 /* 347 * isa_ext regs would grow regularly with new isa extension added, so 348 * just show "reg" to indicate a new extension. 349 */ 350 return strdup_printf("%lld /* UNKNOWN */", reg_off); 351 } 352 353 return kvm_isa_ext_reg_name[reg_off]; 354 } 355 356 static const char *sbi_ext_single_id_to_str(__u64 reg_off) 357 { 358 /* reg_off is KVM_RISCV_SBI_EXT_ID */ 359 static const char * const kvm_sbi_ext_reg_name[] = { 360 "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01", 361 "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME", 362 "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI", 363 "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE", 364 "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST", 365 "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM", 366 "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU", 367 "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL", 368 "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR", 369 }; 370 371 if (reg_off >= ARRAY_SIZE(kvm_sbi_ext_reg_name)) { 372 /* 373 * sbi_ext regs would grow regularly with new sbi extension added, so 374 * just show "reg" to indicate a new extension. 375 */ 376 return strdup_printf("KVM_REG_RISCV_SBI_SINGLE | %lld /* UNKNOWN */", reg_off); 377 } 378 379 return kvm_sbi_ext_reg_name[reg_off]; 380 } 381 382 static const char *sbi_ext_multi_id_to_str(__u64 reg_subtype, __u64 reg_off) 383 { 384 if (reg_off > KVM_REG_RISCV_SBI_MULTI_REG_LAST) { 385 /* 386 * sbi_ext regs would grow regularly with new sbi extension added, so 387 * just show "reg" to indicate a new extension. 388 */ 389 return strdup_printf("%lld /* UNKNOWN */", reg_off); 390 } 391 392 switch (reg_subtype) { 393 case KVM_REG_RISCV_SBI_MULTI_EN: 394 return strdup_printf("KVM_REG_RISCV_SBI_MULTI_EN | %lld", reg_off); 395 case KVM_REG_RISCV_SBI_MULTI_DIS: 396 return strdup_printf("KVM_REG_RISCV_SBI_MULTI_DIS | %lld", reg_off); 397 } 398 399 return NULL; 400 } 401 402 static const char *sbi_ext_id_to_str(const char *prefix, __u64 id) 403 { 404 __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_EXT); 405 __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK; 406 407 reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK; 408 409 switch (reg_subtype) { 410 case KVM_REG_RISCV_SBI_SINGLE: 411 return sbi_ext_single_id_to_str(reg_off); 412 case KVM_REG_RISCV_SBI_MULTI_EN: 413 case KVM_REG_RISCV_SBI_MULTI_DIS: 414 return sbi_ext_multi_id_to_str(reg_subtype, reg_off); 415 } 416 417 TEST_FAIL("%s: Unknown sbi ext subtype: 0x%llx", prefix, reg_subtype); 418 return NULL; 419 } 420 421 void print_reg(const char *prefix, __u64 id) 422 { 423 const char *reg_size = NULL; 424 425 TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_RISCV, 426 "%s: KVM_REG_RISCV missing in reg id: 0x%llx", prefix, id); 427 428 switch (id & KVM_REG_SIZE_MASK) { 429 case KVM_REG_SIZE_U32: 430 reg_size = "KVM_REG_SIZE_U32"; 431 break; 432 case KVM_REG_SIZE_U64: 433 reg_size = "KVM_REG_SIZE_U64"; 434 break; 435 case KVM_REG_SIZE_U128: 436 reg_size = "KVM_REG_SIZE_U128"; 437 break; 438 default: 439 TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx", 440 prefix, (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id); 441 } 442 443 switch (id & KVM_REG_RISCV_TYPE_MASK) { 444 case KVM_REG_RISCV_CONFIG: 445 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CONFIG | %s,\n", 446 reg_size, config_id_to_str(id)); 447 break; 448 case KVM_REG_RISCV_CORE: 449 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CORE | %s,\n", 450 reg_size, core_id_to_str(prefix, id)); 451 break; 452 case KVM_REG_RISCV_CSR: 453 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CSR | %s,\n", 454 reg_size, csr_id_to_str(prefix, id)); 455 break; 456 case KVM_REG_RISCV_TIMER: 457 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_TIMER | %s,\n", 458 reg_size, timer_id_to_str(prefix, id)); 459 break; 460 case KVM_REG_RISCV_FP_F: 461 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_F | %s,\n", 462 reg_size, fp_f_id_to_str(prefix, id)); 463 break; 464 case KVM_REG_RISCV_FP_D: 465 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_D | %s,\n", 466 reg_size, fp_d_id_to_str(prefix, id)); 467 break; 468 case KVM_REG_RISCV_ISA_EXT: 469 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_ISA_EXT | %s,\n", 470 reg_size, isa_ext_id_to_str(id)); 471 break; 472 case KVM_REG_RISCV_SBI_EXT: 473 printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_EXT | %s,\n", 474 reg_size, sbi_ext_id_to_str(prefix, id)); 475 break; 476 default: 477 TEST_FAIL("%s: Unexpected reg type: 0x%llx in reg id: 0x%llx", prefix, 478 (id & KVM_REG_RISCV_TYPE_MASK) >> KVM_REG_RISCV_TYPE_SHIFT, id); 479 } 480 } 481 482 /* 483 * The current blessed list was primed with the output of kernel version 484 * v6.5-rc3 and then later updated with new registers. 485 */ 486 static __u64 base_regs[] = { 487 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(isa), 488 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mvendorid), 489 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(marchid), 490 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mimpid), 491 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(satp_mode), 492 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.pc), 493 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.ra), 494 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.sp), 495 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.gp), 496 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.tp), 497 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t0), 498 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t1), 499 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t2), 500 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s0), 501 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s1), 502 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a0), 503 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a1), 504 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a2), 505 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a3), 506 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a4), 507 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a5), 508 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a6), 509 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a7), 510 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s2), 511 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s3), 512 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s4), 513 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s5), 514 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s6), 515 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s7), 516 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s8), 517 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s9), 518 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s10), 519 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s11), 520 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t3), 521 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t4), 522 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t5), 523 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t6), 524 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(mode), 525 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sstatus), 526 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sie), 527 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stvec), 528 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sscratch), 529 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sepc), 530 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scause), 531 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stval), 532 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sip), 533 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(satp), 534 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scounteren), 535 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(frequency), 536 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time), 537 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare), 538 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state), 539 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01, 540 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME, 541 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI, 542 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE, 543 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST, 544 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM, 545 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU, 546 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL, 547 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR, 548 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_EN | 0, 549 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_DIS | 0, 550 }; 551 552 /* 553 * The skips_set list registers that should skip set test. 554 * - KVM_REG_RISCV_TIMER_REG(state): set would fail if it was not initialized properly. 555 */ 556 static __u64 base_skips_set[] = { 557 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state), 558 }; 559 560 static __u64 h_regs[] = { 561 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_H, 562 }; 563 564 static __u64 zicbom_regs[] = { 565 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size), 566 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM, 567 }; 568 569 static __u64 zicboz_regs[] = { 570 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size), 571 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ, 572 }; 573 574 static __u64 svpbmt_regs[] = { 575 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT, 576 }; 577 578 static __u64 sstc_regs[] = { 579 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC, 580 }; 581 582 static __u64 svinval_regs[] = { 583 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL, 584 }; 585 586 static __u64 zihintpause_regs[] = { 587 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE, 588 }; 589 590 static __u64 zba_regs[] = { 591 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA, 592 }; 593 594 static __u64 zbb_regs[] = { 595 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB, 596 }; 597 598 static __u64 zbs_regs[] = { 599 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS, 600 }; 601 602 static __u64 zicntr_regs[] = { 603 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR, 604 }; 605 606 static __u64 zicsr_regs[] = { 607 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICSR, 608 }; 609 610 static __u64 zifencei_regs[] = { 611 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI, 612 }; 613 614 static __u64 zihpm_regs[] = { 615 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM, 616 }; 617 618 static __u64 aia_regs[] = { 619 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect), 620 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1), 621 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2), 622 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh), 623 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph), 624 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h), 625 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h), 626 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA, 627 }; 628 629 static __u64 fp_f_regs[] = { 630 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[0]), 631 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[1]), 632 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[2]), 633 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[3]), 634 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[4]), 635 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[5]), 636 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[6]), 637 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[7]), 638 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[8]), 639 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[9]), 640 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[10]), 641 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[11]), 642 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[12]), 643 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[13]), 644 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[14]), 645 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[15]), 646 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[16]), 647 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[17]), 648 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[18]), 649 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[19]), 650 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[20]), 651 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[21]), 652 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[22]), 653 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[23]), 654 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[24]), 655 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[25]), 656 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[26]), 657 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[27]), 658 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[28]), 659 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[29]), 660 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[30]), 661 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[31]), 662 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(fcsr), 663 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F, 664 }; 665 666 static __u64 fp_d_regs[] = { 667 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[0]), 668 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[1]), 669 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[2]), 670 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[3]), 671 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[4]), 672 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[5]), 673 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[6]), 674 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[7]), 675 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[8]), 676 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[9]), 677 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[10]), 678 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[11]), 679 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[12]), 680 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[13]), 681 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[14]), 682 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[15]), 683 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[16]), 684 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[17]), 685 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[18]), 686 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[19]), 687 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[20]), 688 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[21]), 689 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[22]), 690 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[23]), 691 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[24]), 692 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[25]), 693 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[26]), 694 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[27]), 695 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[28]), 696 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[29]), 697 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[30]), 698 KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[31]), 699 KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(fcsr), 700 KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D, 701 }; 702 703 #define BASE_SUBLIST \ 704 {"base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), \ 705 .skips_set = base_skips_set, .skips_set_n = ARRAY_SIZE(base_skips_set),} 706 #define H_REGS_SUBLIST \ 707 {"h", .feature = KVM_RISCV_ISA_EXT_H, .regs = h_regs, .regs_n = ARRAY_SIZE(h_regs),} 708 #define ZICBOM_REGS_SUBLIST \ 709 {"zicbom", .feature = KVM_RISCV_ISA_EXT_ZICBOM, .regs = zicbom_regs, .regs_n = ARRAY_SIZE(zicbom_regs),} 710 #define ZICBOZ_REGS_SUBLIST \ 711 {"zicboz", .feature = KVM_RISCV_ISA_EXT_ZICBOZ, .regs = zicboz_regs, .regs_n = ARRAY_SIZE(zicboz_regs),} 712 #define SVPBMT_REGS_SUBLIST \ 713 {"svpbmt", .feature = KVM_RISCV_ISA_EXT_SVPBMT, .regs = svpbmt_regs, .regs_n = ARRAY_SIZE(svpbmt_regs),} 714 #define SSTC_REGS_SUBLIST \ 715 {"sstc", .feature = KVM_RISCV_ISA_EXT_SSTC, .regs = sstc_regs, .regs_n = ARRAY_SIZE(sstc_regs),} 716 #define SVINVAL_REGS_SUBLIST \ 717 {"svinval", .feature = KVM_RISCV_ISA_EXT_SVINVAL, .regs = svinval_regs, .regs_n = ARRAY_SIZE(svinval_regs),} 718 #define ZIHINTPAUSE_REGS_SUBLIST \ 719 {"zihintpause", .feature = KVM_RISCV_ISA_EXT_ZIHINTPAUSE, .regs = zihintpause_regs, .regs_n = ARRAY_SIZE(zihintpause_regs),} 720 #define ZBA_REGS_SUBLIST \ 721 {"zba", .feature = KVM_RISCV_ISA_EXT_ZBA, .regs = zba_regs, .regs_n = ARRAY_SIZE(zba_regs),} 722 #define ZBB_REGS_SUBLIST \ 723 {"zbb", .feature = KVM_RISCV_ISA_EXT_ZBB, .regs = zbb_regs, .regs_n = ARRAY_SIZE(zbb_regs),} 724 #define ZBS_REGS_SUBLIST \ 725 {"zbs", .feature = KVM_RISCV_ISA_EXT_ZBS, .regs = zbs_regs, .regs_n = ARRAY_SIZE(zbs_regs),} 726 #define ZICNTR_REGS_SUBLIST \ 727 {"zicntr", .feature = KVM_RISCV_ISA_EXT_ZICNTR, .regs = zicntr_regs, .regs_n = ARRAY_SIZE(zicntr_regs),} 728 #define ZICSR_REGS_SUBLIST \ 729 {"zicsr", .feature = KVM_RISCV_ISA_EXT_ZICSR, .regs = zicsr_regs, .regs_n = ARRAY_SIZE(zicsr_regs),} 730 #define ZIFENCEI_REGS_SUBLIST \ 731 {"zifencei", .feature = KVM_RISCV_ISA_EXT_ZIFENCEI, .regs = zifencei_regs, .regs_n = ARRAY_SIZE(zifencei_regs),} 732 #define ZIHPM_REGS_SUBLIST \ 733 {"zihpm", .feature = KVM_RISCV_ISA_EXT_ZIHPM, .regs = zihpm_regs, .regs_n = ARRAY_SIZE(zihpm_regs),} 734 #define AIA_REGS_SUBLIST \ 735 {"aia", .feature = KVM_RISCV_ISA_EXT_SSAIA, .regs = aia_regs, .regs_n = ARRAY_SIZE(aia_regs),} 736 #define FP_F_REGS_SUBLIST \ 737 {"fp_f", .feature = KVM_RISCV_ISA_EXT_F, .regs = fp_f_regs, \ 738 .regs_n = ARRAY_SIZE(fp_f_regs),} 739 #define FP_D_REGS_SUBLIST \ 740 {"fp_d", .feature = KVM_RISCV_ISA_EXT_D, .regs = fp_d_regs, \ 741 .regs_n = ARRAY_SIZE(fp_d_regs),} 742 743 static struct vcpu_reg_list h_config = { 744 .sublists = { 745 BASE_SUBLIST, 746 H_REGS_SUBLIST, 747 {0}, 748 }, 749 }; 750 751 static struct vcpu_reg_list zicbom_config = { 752 .sublists = { 753 BASE_SUBLIST, 754 ZICBOM_REGS_SUBLIST, 755 {0}, 756 }, 757 }; 758 759 static struct vcpu_reg_list zicboz_config = { 760 .sublists = { 761 BASE_SUBLIST, 762 ZICBOZ_REGS_SUBLIST, 763 {0}, 764 }, 765 }; 766 767 static struct vcpu_reg_list svpbmt_config = { 768 .sublists = { 769 BASE_SUBLIST, 770 SVPBMT_REGS_SUBLIST, 771 {0}, 772 }, 773 }; 774 775 static struct vcpu_reg_list sstc_config = { 776 .sublists = { 777 BASE_SUBLIST, 778 SSTC_REGS_SUBLIST, 779 {0}, 780 }, 781 }; 782 783 static struct vcpu_reg_list svinval_config = { 784 .sublists = { 785 BASE_SUBLIST, 786 SVINVAL_REGS_SUBLIST, 787 {0}, 788 }, 789 }; 790 791 static struct vcpu_reg_list zihintpause_config = { 792 .sublists = { 793 BASE_SUBLIST, 794 ZIHINTPAUSE_REGS_SUBLIST, 795 {0}, 796 }, 797 }; 798 799 static struct vcpu_reg_list zba_config = { 800 .sublists = { 801 BASE_SUBLIST, 802 ZBA_REGS_SUBLIST, 803 {0}, 804 }, 805 }; 806 807 static struct vcpu_reg_list zbb_config = { 808 .sublists = { 809 BASE_SUBLIST, 810 ZBB_REGS_SUBLIST, 811 {0}, 812 }, 813 }; 814 815 static struct vcpu_reg_list zbs_config = { 816 .sublists = { 817 BASE_SUBLIST, 818 ZBS_REGS_SUBLIST, 819 {0}, 820 }, 821 }; 822 823 static struct vcpu_reg_list zicntr_config = { 824 .sublists = { 825 BASE_SUBLIST, 826 ZICNTR_REGS_SUBLIST, 827 {0}, 828 }, 829 }; 830 831 static struct vcpu_reg_list zicsr_config = { 832 .sublists = { 833 BASE_SUBLIST, 834 ZICSR_REGS_SUBLIST, 835 {0}, 836 }, 837 }; 838 839 static struct vcpu_reg_list zifencei_config = { 840 .sublists = { 841 BASE_SUBLIST, 842 ZIFENCEI_REGS_SUBLIST, 843 {0}, 844 }, 845 }; 846 847 static struct vcpu_reg_list zihpm_config = { 848 .sublists = { 849 BASE_SUBLIST, 850 ZIHPM_REGS_SUBLIST, 851 {0}, 852 }, 853 }; 854 855 static struct vcpu_reg_list aia_config = { 856 .sublists = { 857 BASE_SUBLIST, 858 AIA_REGS_SUBLIST, 859 {0}, 860 }, 861 }; 862 863 static struct vcpu_reg_list fp_f_config = { 864 .sublists = { 865 BASE_SUBLIST, 866 FP_F_REGS_SUBLIST, 867 {0}, 868 }, 869 }; 870 871 static struct vcpu_reg_list fp_d_config = { 872 .sublists = { 873 BASE_SUBLIST, 874 FP_D_REGS_SUBLIST, 875 {0}, 876 }, 877 }; 878 879 struct vcpu_reg_list *vcpu_configs[] = { 880 &h_config, 881 &zicbom_config, 882 &zicboz_config, 883 &svpbmt_config, 884 &sstc_config, 885 &svinval_config, 886 &zihintpause_config, 887 &zba_config, 888 &zbb_config, 889 &zbs_config, 890 &zicntr_config, 891 &zicsr_config, 892 &zifencei_config, 893 &zihpm_config, 894 &aia_config, 895 &fp_f_config, 896 &fp_d_config, 897 }; 898 int vcpu_configs_n = ARRAY_SIZE(vcpu_configs); 899