1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2019 Western Digital Corporation or its affiliates. 4 * Copyright (C) 2023 Ventana Micro Systems Inc. 5 * 6 * Authors: 7 * Anup Patel <apatel@ventanamicro.com> 8 */ 9 10 #include <linux/bitops.h> 11 #include <linux/errno.h> 12 #include <linux/err.h> 13 #include <linux/uaccess.h> 14 #include <linux/kvm_host.h> 15 #include <asm/cacheflush.h> 16 #include <asm/hwcap.h> 17 #include <asm/kvm_vcpu_vector.h> 18 #include <asm/vector.h> 19 20 #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0) 21 22 #define KVM_ISA_EXT_ARR(ext) \ 23 [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext 24 25 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */ 26 static const unsigned long kvm_isa_ext_arr[] = { 27 /* Single letter extensions (alphabetically sorted) */ 28 [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a, 29 [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c, 30 [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d, 31 [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f, 32 [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h, 33 [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i, 34 [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m, 35 [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v, 36 /* Multi letter extensions (alphabetically sorted) */ 37 KVM_ISA_EXT_ARR(SSAIA), 38 KVM_ISA_EXT_ARR(SSTC), 39 KVM_ISA_EXT_ARR(SVINVAL), 40 KVM_ISA_EXT_ARR(SVNAPOT), 41 KVM_ISA_EXT_ARR(SVPBMT), 42 KVM_ISA_EXT_ARR(ZBA), 43 KVM_ISA_EXT_ARR(ZBB), 44 KVM_ISA_EXT_ARR(ZBS), 45 KVM_ISA_EXT_ARR(ZICBOM), 46 KVM_ISA_EXT_ARR(ZICBOZ), 47 KVM_ISA_EXT_ARR(ZICNTR), 48 KVM_ISA_EXT_ARR(ZICSR), 49 KVM_ISA_EXT_ARR(ZIFENCEI), 50 KVM_ISA_EXT_ARR(ZIHINTPAUSE), 51 KVM_ISA_EXT_ARR(ZIHPM), 52 }; 53 54 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext) 55 { 56 unsigned long i; 57 58 for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) { 59 if (kvm_isa_ext_arr[i] == base_ext) 60 return i; 61 } 62 63 return KVM_RISCV_ISA_EXT_MAX; 64 } 65 66 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext) 67 { 68 switch (ext) { 69 case KVM_RISCV_ISA_EXT_H: 70 return false; 71 case KVM_RISCV_ISA_EXT_V: 72 return riscv_v_vstate_ctrl_user_allowed(); 73 default: 74 break; 75 } 76 77 return true; 78 } 79 80 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) 81 { 82 switch (ext) { 83 case KVM_RISCV_ISA_EXT_A: 84 case KVM_RISCV_ISA_EXT_C: 85 case KVM_RISCV_ISA_EXT_I: 86 case KVM_RISCV_ISA_EXT_M: 87 case KVM_RISCV_ISA_EXT_SSAIA: 88 case KVM_RISCV_ISA_EXT_SSTC: 89 case KVM_RISCV_ISA_EXT_SVINVAL: 90 case KVM_RISCV_ISA_EXT_SVNAPOT: 91 case KVM_RISCV_ISA_EXT_ZBA: 92 case KVM_RISCV_ISA_EXT_ZBB: 93 case KVM_RISCV_ISA_EXT_ZBS: 94 case KVM_RISCV_ISA_EXT_ZICNTR: 95 case KVM_RISCV_ISA_EXT_ZICSR: 96 case KVM_RISCV_ISA_EXT_ZIFENCEI: 97 case KVM_RISCV_ISA_EXT_ZIHINTPAUSE: 98 case KVM_RISCV_ISA_EXT_ZIHPM: 99 return false; 100 default: 101 break; 102 } 103 104 return true; 105 } 106 107 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu) 108 { 109 unsigned long host_isa, i; 110 111 for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) { 112 host_isa = kvm_isa_ext_arr[i]; 113 if (__riscv_isa_extension_available(NULL, host_isa) && 114 kvm_riscv_vcpu_isa_enable_allowed(i)) 115 set_bit(host_isa, vcpu->arch.isa); 116 } 117 } 118 119 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu, 120 const struct kvm_one_reg *reg) 121 { 122 unsigned long __user *uaddr = 123 (unsigned long __user *)(unsigned long)reg->addr; 124 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 125 KVM_REG_SIZE_MASK | 126 KVM_REG_RISCV_CONFIG); 127 unsigned long reg_val; 128 129 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 130 return -EINVAL; 131 132 switch (reg_num) { 133 case KVM_REG_RISCV_CONFIG_REG(isa): 134 reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK; 135 break; 136 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): 137 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM)) 138 return -ENOENT; 139 reg_val = riscv_cbom_block_size; 140 break; 141 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): 142 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ)) 143 return -ENOENT; 144 reg_val = riscv_cboz_block_size; 145 break; 146 case KVM_REG_RISCV_CONFIG_REG(mvendorid): 147 reg_val = vcpu->arch.mvendorid; 148 break; 149 case KVM_REG_RISCV_CONFIG_REG(marchid): 150 reg_val = vcpu->arch.marchid; 151 break; 152 case KVM_REG_RISCV_CONFIG_REG(mimpid): 153 reg_val = vcpu->arch.mimpid; 154 break; 155 case KVM_REG_RISCV_CONFIG_REG(satp_mode): 156 reg_val = satp_mode >> SATP_MODE_SHIFT; 157 break; 158 default: 159 return -ENOENT; 160 } 161 162 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) 163 return -EFAULT; 164 165 return 0; 166 } 167 168 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, 169 const struct kvm_one_reg *reg) 170 { 171 unsigned long __user *uaddr = 172 (unsigned long __user *)(unsigned long)reg->addr; 173 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 174 KVM_REG_SIZE_MASK | 175 KVM_REG_RISCV_CONFIG); 176 unsigned long i, isa_ext, reg_val; 177 178 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 179 return -EINVAL; 180 181 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) 182 return -EFAULT; 183 184 switch (reg_num) { 185 case KVM_REG_RISCV_CONFIG_REG(isa): 186 /* 187 * This ONE REG interface is only defined for 188 * single letter extensions. 189 */ 190 if (fls(reg_val) >= RISCV_ISA_EXT_BASE) 191 return -EINVAL; 192 193 /* 194 * Return early (i.e. do nothing) if reg_val is the same 195 * value retrievable via kvm_riscv_vcpu_get_reg_config(). 196 */ 197 if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK)) 198 break; 199 200 if (!vcpu->arch.ran_atleast_once) { 201 /* Ignore the enable/disable request for certain extensions */ 202 for (i = 0; i < RISCV_ISA_EXT_BASE; i++) { 203 isa_ext = kvm_riscv_vcpu_base2isa_ext(i); 204 if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) { 205 reg_val &= ~BIT(i); 206 continue; 207 } 208 if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext)) 209 if (reg_val & BIT(i)) 210 reg_val &= ~BIT(i); 211 if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext)) 212 if (!(reg_val & BIT(i))) 213 reg_val |= BIT(i); 214 } 215 reg_val &= riscv_isa_extension_base(NULL); 216 /* Do not modify anything beyond single letter extensions */ 217 reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) | 218 (reg_val & KVM_RISCV_BASE_ISA_MASK); 219 vcpu->arch.isa[0] = reg_val; 220 kvm_riscv_vcpu_fp_reset(vcpu); 221 } else { 222 return -EBUSY; 223 } 224 break; 225 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): 226 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM)) 227 return -ENOENT; 228 if (reg_val != riscv_cbom_block_size) 229 return -EINVAL; 230 break; 231 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): 232 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ)) 233 return -ENOENT; 234 if (reg_val != riscv_cboz_block_size) 235 return -EINVAL; 236 break; 237 case KVM_REG_RISCV_CONFIG_REG(mvendorid): 238 if (reg_val == vcpu->arch.mvendorid) 239 break; 240 if (!vcpu->arch.ran_atleast_once) 241 vcpu->arch.mvendorid = reg_val; 242 else 243 return -EBUSY; 244 break; 245 case KVM_REG_RISCV_CONFIG_REG(marchid): 246 if (reg_val == vcpu->arch.marchid) 247 break; 248 if (!vcpu->arch.ran_atleast_once) 249 vcpu->arch.marchid = reg_val; 250 else 251 return -EBUSY; 252 break; 253 case KVM_REG_RISCV_CONFIG_REG(mimpid): 254 if (reg_val == vcpu->arch.mimpid) 255 break; 256 if (!vcpu->arch.ran_atleast_once) 257 vcpu->arch.mimpid = reg_val; 258 else 259 return -EBUSY; 260 break; 261 case KVM_REG_RISCV_CONFIG_REG(satp_mode): 262 if (reg_val != (satp_mode >> SATP_MODE_SHIFT)) 263 return -EINVAL; 264 break; 265 default: 266 return -ENOENT; 267 } 268 269 return 0; 270 } 271 272 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu, 273 const struct kvm_one_reg *reg) 274 { 275 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; 276 unsigned long __user *uaddr = 277 (unsigned long __user *)(unsigned long)reg->addr; 278 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 279 KVM_REG_SIZE_MASK | 280 KVM_REG_RISCV_CORE); 281 unsigned long reg_val; 282 283 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 284 return -EINVAL; 285 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) 286 return -ENOENT; 287 288 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc)) 289 reg_val = cntx->sepc; 290 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num && 291 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6)) 292 reg_val = ((unsigned long *)cntx)[reg_num]; 293 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) 294 reg_val = (cntx->sstatus & SR_SPP) ? 295 KVM_RISCV_MODE_S : KVM_RISCV_MODE_U; 296 else 297 return -ENOENT; 298 299 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) 300 return -EFAULT; 301 302 return 0; 303 } 304 305 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu, 306 const struct kvm_one_reg *reg) 307 { 308 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; 309 unsigned long __user *uaddr = 310 (unsigned long __user *)(unsigned long)reg->addr; 311 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 312 KVM_REG_SIZE_MASK | 313 KVM_REG_RISCV_CORE); 314 unsigned long reg_val; 315 316 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 317 return -EINVAL; 318 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) 319 return -ENOENT; 320 321 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) 322 return -EFAULT; 323 324 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc)) 325 cntx->sepc = reg_val; 326 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num && 327 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6)) 328 ((unsigned long *)cntx)[reg_num] = reg_val; 329 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) { 330 if (reg_val == KVM_RISCV_MODE_S) 331 cntx->sstatus |= SR_SPP; 332 else 333 cntx->sstatus &= ~SR_SPP; 334 } else 335 return -ENOENT; 336 337 return 0; 338 } 339 340 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu, 341 unsigned long reg_num, 342 unsigned long *out_val) 343 { 344 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; 345 346 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) 347 return -ENOENT; 348 349 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { 350 kvm_riscv_vcpu_flush_interrupts(vcpu); 351 *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK; 352 *out_val |= csr->hvip & ~IRQ_LOCAL_MASK; 353 } else 354 *out_val = ((unsigned long *)csr)[reg_num]; 355 356 return 0; 357 } 358 359 static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu, 360 unsigned long reg_num, 361 unsigned long reg_val) 362 { 363 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; 364 365 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) 366 return -ENOENT; 367 368 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { 369 reg_val &= VSIP_VALID_MASK; 370 reg_val <<= VSIP_TO_HVIP_SHIFT; 371 } 372 373 ((unsigned long *)csr)[reg_num] = reg_val; 374 375 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) 376 WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0); 377 378 return 0; 379 } 380 381 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu, 382 const struct kvm_one_reg *reg) 383 { 384 int rc; 385 unsigned long __user *uaddr = 386 (unsigned long __user *)(unsigned long)reg->addr; 387 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 388 KVM_REG_SIZE_MASK | 389 KVM_REG_RISCV_CSR); 390 unsigned long reg_val, reg_subtype; 391 392 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 393 return -EINVAL; 394 395 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK; 396 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK; 397 switch (reg_subtype) { 398 case KVM_REG_RISCV_CSR_GENERAL: 399 rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, ®_val); 400 break; 401 case KVM_REG_RISCV_CSR_AIA: 402 rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, ®_val); 403 break; 404 default: 405 rc = -ENOENT; 406 break; 407 } 408 if (rc) 409 return rc; 410 411 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) 412 return -EFAULT; 413 414 return 0; 415 } 416 417 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu, 418 const struct kvm_one_reg *reg) 419 { 420 int rc; 421 unsigned long __user *uaddr = 422 (unsigned long __user *)(unsigned long)reg->addr; 423 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 424 KVM_REG_SIZE_MASK | 425 KVM_REG_RISCV_CSR); 426 unsigned long reg_val, reg_subtype; 427 428 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 429 return -EINVAL; 430 431 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) 432 return -EFAULT; 433 434 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK; 435 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK; 436 switch (reg_subtype) { 437 case KVM_REG_RISCV_CSR_GENERAL: 438 rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val); 439 break; 440 case KVM_REG_RISCV_CSR_AIA: 441 rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val); 442 break; 443 default: 444 rc = -ENOENT; 445 break; 446 } 447 if (rc) 448 return rc; 449 450 return 0; 451 } 452 453 static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu, 454 unsigned long reg_num, 455 unsigned long *reg_val) 456 { 457 unsigned long host_isa_ext; 458 459 if (reg_num >= KVM_RISCV_ISA_EXT_MAX || 460 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr)) 461 return -ENOENT; 462 463 *reg_val = 0; 464 host_isa_ext = kvm_isa_ext_arr[reg_num]; 465 if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext)) 466 *reg_val = 1; /* Mark the given extension as available */ 467 468 return 0; 469 } 470 471 static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu, 472 unsigned long reg_num, 473 unsigned long reg_val) 474 { 475 unsigned long host_isa_ext; 476 477 if (reg_num >= KVM_RISCV_ISA_EXT_MAX || 478 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr)) 479 return -ENOENT; 480 481 host_isa_ext = kvm_isa_ext_arr[reg_num]; 482 if (!__riscv_isa_extension_available(NULL, host_isa_ext)) 483 return -ENOENT; 484 485 if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa)) 486 return 0; 487 488 if (!vcpu->arch.ran_atleast_once) { 489 /* 490 * All multi-letter extension and a few single letter 491 * extension can be disabled 492 */ 493 if (reg_val == 1 && 494 kvm_riscv_vcpu_isa_enable_allowed(reg_num)) 495 set_bit(host_isa_ext, vcpu->arch.isa); 496 else if (!reg_val && 497 kvm_riscv_vcpu_isa_disable_allowed(reg_num)) 498 clear_bit(host_isa_ext, vcpu->arch.isa); 499 else 500 return -EINVAL; 501 kvm_riscv_vcpu_fp_reset(vcpu); 502 } else { 503 return -EBUSY; 504 } 505 506 return 0; 507 } 508 509 static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu, 510 unsigned long reg_num, 511 unsigned long *reg_val) 512 { 513 unsigned long i, ext_id, ext_val; 514 515 if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST) 516 return -ENOENT; 517 518 for (i = 0; i < BITS_PER_LONG; i++) { 519 ext_id = i + reg_num * BITS_PER_LONG; 520 if (ext_id >= KVM_RISCV_ISA_EXT_MAX) 521 break; 522 523 ext_val = 0; 524 riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val); 525 if (ext_val) 526 *reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id); 527 } 528 529 return 0; 530 } 531 532 static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu, 533 unsigned long reg_num, 534 unsigned long reg_val, bool enable) 535 { 536 unsigned long i, ext_id; 537 538 if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST) 539 return -ENOENT; 540 541 for_each_set_bit(i, ®_val, BITS_PER_LONG) { 542 ext_id = i + reg_num * BITS_PER_LONG; 543 if (ext_id >= KVM_RISCV_ISA_EXT_MAX) 544 break; 545 546 riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable); 547 } 548 549 return 0; 550 } 551 552 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu, 553 const struct kvm_one_reg *reg) 554 { 555 int rc; 556 unsigned long __user *uaddr = 557 (unsigned long __user *)(unsigned long)reg->addr; 558 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 559 KVM_REG_SIZE_MASK | 560 KVM_REG_RISCV_ISA_EXT); 561 unsigned long reg_val, reg_subtype; 562 563 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 564 return -EINVAL; 565 566 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK; 567 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK; 568 569 reg_val = 0; 570 switch (reg_subtype) { 571 case KVM_REG_RISCV_ISA_SINGLE: 572 rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, ®_val); 573 break; 574 case KVM_REG_RISCV_ISA_MULTI_EN: 575 case KVM_REG_RISCV_ISA_MULTI_DIS: 576 rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, ®_val); 577 if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS) 578 reg_val = ~reg_val; 579 break; 580 default: 581 rc = -ENOENT; 582 } 583 if (rc) 584 return rc; 585 586 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) 587 return -EFAULT; 588 589 return 0; 590 } 591 592 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu, 593 const struct kvm_one_reg *reg) 594 { 595 unsigned long __user *uaddr = 596 (unsigned long __user *)(unsigned long)reg->addr; 597 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 598 KVM_REG_SIZE_MASK | 599 KVM_REG_RISCV_ISA_EXT); 600 unsigned long reg_val, reg_subtype; 601 602 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 603 return -EINVAL; 604 605 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK; 606 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK; 607 608 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) 609 return -EFAULT; 610 611 switch (reg_subtype) { 612 case KVM_REG_RISCV_ISA_SINGLE: 613 return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val); 614 case KVM_REG_RISCV_SBI_MULTI_EN: 615 return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true); 616 case KVM_REG_RISCV_SBI_MULTI_DIS: 617 return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false); 618 default: 619 return -ENOENT; 620 } 621 622 return 0; 623 } 624 625 static int copy_config_reg_indices(const struct kvm_vcpu *vcpu, 626 u64 __user *uindices) 627 { 628 int n = 0; 629 630 for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long); 631 i++) { 632 u64 size; 633 u64 reg; 634 635 /* 636 * Avoid reporting config reg if the corresponding extension 637 * was not available. 638 */ 639 if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) && 640 !riscv_isa_extension_available(vcpu->arch.isa, ZICBOM)) 641 continue; 642 else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) && 643 !riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ)) 644 continue; 645 646 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; 647 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i; 648 649 if (uindices) { 650 if (put_user(reg, uindices)) 651 return -EFAULT; 652 uindices++; 653 } 654 655 n++; 656 } 657 658 return n; 659 } 660 661 static unsigned long num_config_regs(const struct kvm_vcpu *vcpu) 662 { 663 return copy_config_reg_indices(vcpu, NULL); 664 } 665 666 static inline unsigned long num_core_regs(void) 667 { 668 return sizeof(struct kvm_riscv_core) / sizeof(unsigned long); 669 } 670 671 static int copy_core_reg_indices(u64 __user *uindices) 672 { 673 int n = num_core_regs(); 674 675 for (int i = 0; i < n; i++) { 676 u64 size = IS_ENABLED(CONFIG_32BIT) ? 677 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; 678 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i; 679 680 if (uindices) { 681 if (put_user(reg, uindices)) 682 return -EFAULT; 683 uindices++; 684 } 685 } 686 687 return n; 688 } 689 690 static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu) 691 { 692 unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long); 693 694 if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) 695 n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long); 696 697 return n; 698 } 699 700 static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu, 701 u64 __user *uindices) 702 { 703 int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long); 704 int n2 = 0; 705 706 /* copy general csr regs */ 707 for (int i = 0; i < n1; i++) { 708 u64 size = IS_ENABLED(CONFIG_32BIT) ? 709 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; 710 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR | 711 KVM_REG_RISCV_CSR_GENERAL | i; 712 713 if (uindices) { 714 if (put_user(reg, uindices)) 715 return -EFAULT; 716 uindices++; 717 } 718 } 719 720 /* copy AIA csr regs */ 721 if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) { 722 n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long); 723 724 for (int i = 0; i < n2; i++) { 725 u64 size = IS_ENABLED(CONFIG_32BIT) ? 726 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; 727 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR | 728 KVM_REG_RISCV_CSR_AIA | i; 729 730 if (uindices) { 731 if (put_user(reg, uindices)) 732 return -EFAULT; 733 uindices++; 734 } 735 } 736 } 737 738 return n1 + n2; 739 } 740 741 static inline unsigned long num_timer_regs(void) 742 { 743 return sizeof(struct kvm_riscv_timer) / sizeof(u64); 744 } 745 746 static int copy_timer_reg_indices(u64 __user *uindices) 747 { 748 int n = num_timer_regs(); 749 750 for (int i = 0; i < n; i++) { 751 u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | 752 KVM_REG_RISCV_TIMER | i; 753 754 if (uindices) { 755 if (put_user(reg, uindices)) 756 return -EFAULT; 757 uindices++; 758 } 759 } 760 761 return n; 762 } 763 764 static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu) 765 { 766 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; 767 768 if (riscv_isa_extension_available(vcpu->arch.isa, f)) 769 return sizeof(cntx->fp.f) / sizeof(u32); 770 else 771 return 0; 772 } 773 774 static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu, 775 u64 __user *uindices) 776 { 777 int n = num_fp_f_regs(vcpu); 778 779 for (int i = 0; i < n; i++) { 780 u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | 781 KVM_REG_RISCV_FP_F | i; 782 783 if (uindices) { 784 if (put_user(reg, uindices)) 785 return -EFAULT; 786 uindices++; 787 } 788 } 789 790 return n; 791 } 792 793 static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu) 794 { 795 const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; 796 797 if (riscv_isa_extension_available(vcpu->arch.isa, d)) 798 return sizeof(cntx->fp.d.f) / sizeof(u64) + 1; 799 else 800 return 0; 801 } 802 803 static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu, 804 u64 __user *uindices) 805 { 806 int i; 807 int n = num_fp_d_regs(vcpu); 808 u64 reg; 809 810 /* copy fp.d.f indices */ 811 for (i = 0; i < n-1; i++) { 812 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | 813 KVM_REG_RISCV_FP_D | i; 814 815 if (uindices) { 816 if (put_user(reg, uindices)) 817 return -EFAULT; 818 uindices++; 819 } 820 } 821 822 /* copy fp.d.fcsr indices */ 823 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i; 824 if (uindices) { 825 if (put_user(reg, uindices)) 826 return -EFAULT; 827 uindices++; 828 } 829 830 return n; 831 } 832 833 static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu, 834 u64 __user *uindices) 835 { 836 unsigned int n = 0; 837 unsigned long isa_ext; 838 839 for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) { 840 u64 size = IS_ENABLED(CONFIG_32BIT) ? 841 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; 842 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i; 843 844 isa_ext = kvm_isa_ext_arr[i]; 845 if (!__riscv_isa_extension_available(vcpu->arch.isa, isa_ext)) 846 continue; 847 848 if (uindices) { 849 if (put_user(reg, uindices)) 850 return -EFAULT; 851 uindices++; 852 } 853 854 n++; 855 } 856 857 return n; 858 } 859 860 static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu) 861 { 862 return copy_isa_ext_reg_indices(vcpu, NULL);; 863 } 864 865 static inline unsigned long num_sbi_ext_regs(void) 866 { 867 /* 868 * number of KVM_REG_RISCV_SBI_SINGLE + 869 * 2 x (number of KVM_REG_RISCV_SBI_MULTI) 870 */ 871 return KVM_RISCV_SBI_EXT_MAX + 2*(KVM_REG_RISCV_SBI_MULTI_REG_LAST+1); 872 } 873 874 static int copy_sbi_ext_reg_indices(u64 __user *uindices) 875 { 876 int n; 877 878 /* copy KVM_REG_RISCV_SBI_SINGLE */ 879 n = KVM_RISCV_SBI_EXT_MAX; 880 for (int i = 0; i < n; i++) { 881 u64 size = IS_ENABLED(CONFIG_32BIT) ? 882 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; 883 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT | 884 KVM_REG_RISCV_SBI_SINGLE | i; 885 886 if (uindices) { 887 if (put_user(reg, uindices)) 888 return -EFAULT; 889 uindices++; 890 } 891 } 892 893 /* copy KVM_REG_RISCV_SBI_MULTI */ 894 n = KVM_REG_RISCV_SBI_MULTI_REG_LAST + 1; 895 for (int i = 0; i < n; i++) { 896 u64 size = IS_ENABLED(CONFIG_32BIT) ? 897 KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; 898 u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT | 899 KVM_REG_RISCV_SBI_MULTI_EN | i; 900 901 if (uindices) { 902 if (put_user(reg, uindices)) 903 return -EFAULT; 904 uindices++; 905 } 906 907 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT | 908 KVM_REG_RISCV_SBI_MULTI_DIS | i; 909 910 if (uindices) { 911 if (put_user(reg, uindices)) 912 return -EFAULT; 913 uindices++; 914 } 915 } 916 917 return num_sbi_ext_regs(); 918 } 919 920 /* 921 * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG 922 * 923 * This is for all registers. 924 */ 925 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu) 926 { 927 unsigned long res = 0; 928 929 res += num_config_regs(vcpu); 930 res += num_core_regs(); 931 res += num_csr_regs(vcpu); 932 res += num_timer_regs(); 933 res += num_fp_f_regs(vcpu); 934 res += num_fp_d_regs(vcpu); 935 res += num_isa_ext_regs(vcpu); 936 res += num_sbi_ext_regs(); 937 938 return res; 939 } 940 941 /* 942 * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers. 943 */ 944 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu, 945 u64 __user *uindices) 946 { 947 int ret; 948 949 ret = copy_config_reg_indices(vcpu, uindices); 950 if (ret < 0) 951 return ret; 952 uindices += ret; 953 954 ret = copy_core_reg_indices(uindices); 955 if (ret < 0) 956 return ret; 957 uindices += ret; 958 959 ret = copy_csr_reg_indices(vcpu, uindices); 960 if (ret < 0) 961 return ret; 962 uindices += ret; 963 964 ret = copy_timer_reg_indices(uindices); 965 if (ret < 0) 966 return ret; 967 uindices += ret; 968 969 ret = copy_fp_f_reg_indices(vcpu, uindices); 970 if (ret < 0) 971 return ret; 972 uindices += ret; 973 974 ret = copy_fp_d_reg_indices(vcpu, uindices); 975 if (ret < 0) 976 return ret; 977 uindices += ret; 978 979 ret = copy_isa_ext_reg_indices(vcpu, uindices); 980 if (ret < 0) 981 return ret; 982 uindices += ret; 983 984 ret = copy_sbi_ext_reg_indices(uindices); 985 if (ret < 0) 986 return ret; 987 988 return 0; 989 } 990 991 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, 992 const struct kvm_one_reg *reg) 993 { 994 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) { 995 case KVM_REG_RISCV_CONFIG: 996 return kvm_riscv_vcpu_set_reg_config(vcpu, reg); 997 case KVM_REG_RISCV_CORE: 998 return kvm_riscv_vcpu_set_reg_core(vcpu, reg); 999 case KVM_REG_RISCV_CSR: 1000 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg); 1001 case KVM_REG_RISCV_TIMER: 1002 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg); 1003 case KVM_REG_RISCV_FP_F: 1004 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, 1005 KVM_REG_RISCV_FP_F); 1006 case KVM_REG_RISCV_FP_D: 1007 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, 1008 KVM_REG_RISCV_FP_D); 1009 case KVM_REG_RISCV_ISA_EXT: 1010 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg); 1011 case KVM_REG_RISCV_SBI_EXT: 1012 return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg); 1013 case KVM_REG_RISCV_VECTOR: 1014 return kvm_riscv_vcpu_set_reg_vector(vcpu, reg); 1015 default: 1016 break; 1017 } 1018 1019 return -ENOENT; 1020 } 1021 1022 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, 1023 const struct kvm_one_reg *reg) 1024 { 1025 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) { 1026 case KVM_REG_RISCV_CONFIG: 1027 return kvm_riscv_vcpu_get_reg_config(vcpu, reg); 1028 case KVM_REG_RISCV_CORE: 1029 return kvm_riscv_vcpu_get_reg_core(vcpu, reg); 1030 case KVM_REG_RISCV_CSR: 1031 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg); 1032 case KVM_REG_RISCV_TIMER: 1033 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg); 1034 case KVM_REG_RISCV_FP_F: 1035 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, 1036 KVM_REG_RISCV_FP_F); 1037 case KVM_REG_RISCV_FP_D: 1038 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, 1039 KVM_REG_RISCV_FP_D); 1040 case KVM_REG_RISCV_ISA_EXT: 1041 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg); 1042 case KVM_REG_RISCV_SBI_EXT: 1043 return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg); 1044 case KVM_REG_RISCV_VECTOR: 1045 return kvm_riscv_vcpu_get_reg_vector(vcpu, reg); 1046 default: 1047 break; 1048 } 1049 1050 return -ENOENT; 1051 } 1052