199cdc6c1SAnup Patel // SPDX-License-Identifier: GPL-2.0
299cdc6c1SAnup Patel /*
399cdc6c1SAnup Patel * Copyright (C) 2019 Western Digital Corporation or its affiliates.
499cdc6c1SAnup Patel *
599cdc6c1SAnup Patel * Authors:
699cdc6c1SAnup Patel * Anup Patel <anup.patel@wdc.com>
799cdc6c1SAnup Patel */
899cdc6c1SAnup Patel
999cdc6c1SAnup Patel #include <linux/errno.h>
1099cdc6c1SAnup Patel #include <linux/err.h>
1199cdc6c1SAnup Patel #include <linux/module.h>
1299cdc6c1SAnup Patel #include <linux/kvm_host.h>
1399cdc6c1SAnup Patel #include <asm/csr.h>
1499cdc6c1SAnup Patel #include <asm/hwcap.h>
1599cdc6c1SAnup Patel #include <asm/sbi.h>
1699cdc6c1SAnup Patel
kvm_arch_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1799cdc6c1SAnup Patel long kvm_arch_dev_ioctl(struct file *filp,
1899cdc6c1SAnup Patel unsigned int ioctl, unsigned long arg)
1999cdc6c1SAnup Patel {
2099cdc6c1SAnup Patel return -EINVAL;
2199cdc6c1SAnup Patel }
2299cdc6c1SAnup Patel
kvm_arch_hardware_enable(void)2399cdc6c1SAnup Patel int kvm_arch_hardware_enable(void)
2499cdc6c1SAnup Patel {
2599cdc6c1SAnup Patel unsigned long hideleg, hedeleg;
2699cdc6c1SAnup Patel
2799cdc6c1SAnup Patel hedeleg = 0;
2899cdc6c1SAnup Patel hedeleg |= (1UL << EXC_INST_MISALIGNED);
2999cdc6c1SAnup Patel hedeleg |= (1UL << EXC_BREAKPOINT);
3099cdc6c1SAnup Patel hedeleg |= (1UL << EXC_SYSCALL);
3199cdc6c1SAnup Patel hedeleg |= (1UL << EXC_INST_PAGE_FAULT);
3299cdc6c1SAnup Patel hedeleg |= (1UL << EXC_LOAD_PAGE_FAULT);
3399cdc6c1SAnup Patel hedeleg |= (1UL << EXC_STORE_PAGE_FAULT);
3499cdc6c1SAnup Patel csr_write(CSR_HEDELEG, hedeleg);
3599cdc6c1SAnup Patel
3699cdc6c1SAnup Patel hideleg = 0;
3799cdc6c1SAnup Patel hideleg |= (1UL << IRQ_VS_SOFT);
3899cdc6c1SAnup Patel hideleg |= (1UL << IRQ_VS_TIMER);
3999cdc6c1SAnup Patel hideleg |= (1UL << IRQ_VS_EXT);
4099cdc6c1SAnup Patel csr_write(CSR_HIDELEG, hideleg);
4199cdc6c1SAnup Patel
42f04bafb5SAtish Patra /* VS should access only the time counter directly. Everything else should trap */
43f04bafb5SAtish Patra csr_write(CSR_HCOUNTEREN, 0x02);
4499cdc6c1SAnup Patel
4599cdc6c1SAnup Patel csr_write(CSR_HVIP, 0);
4699cdc6c1SAnup Patel
4754e43320SAnup Patel kvm_riscv_aia_enable();
4854e43320SAnup Patel
4999cdc6c1SAnup Patel return 0;
5099cdc6c1SAnup Patel }
5199cdc6c1SAnup Patel
kvm_arch_hardware_disable(void)5299cdc6c1SAnup Patel void kvm_arch_hardware_disable(void)
5399cdc6c1SAnup Patel {
5454e43320SAnup Patel kvm_riscv_aia_disable();
5554e43320SAnup Patel
5633e5b574SVincent Chen /*
5733e5b574SVincent Chen * After clearing the hideleg CSR, the host kernel will receive
5833e5b574SVincent Chen * spurious interrupts if hvip CSR has pending interrupts and the
5933e5b574SVincent Chen * corresponding enable bits in vsie CSR are asserted. To avoid it,
6033e5b574SVincent Chen * hvip CSR and vsie CSR must be cleared before clearing hideleg CSR.
6133e5b574SVincent Chen */
6233e5b574SVincent Chen csr_write(CSR_VSIE, 0);
6333e5b574SVincent Chen csr_write(CSR_HVIP, 0);
6499cdc6c1SAnup Patel csr_write(CSR_HEDELEG, 0);
6599cdc6c1SAnup Patel csr_write(CSR_HIDELEG, 0);
6699cdc6c1SAnup Patel }
6799cdc6c1SAnup Patel
riscv_kvm_init(void)6820deee32SSean Christopherson static int __init riscv_kvm_init(void)
6920deee32SSean Christopherson {
7054e43320SAnup Patel int rc;
719d05c1feSAnup Patel const char *str;
729d05c1feSAnup Patel
7399cdc6c1SAnup Patel if (!riscv_isa_extension_available(NULL, h)) {
7499cdc6c1SAnup Patel kvm_info("hypervisor extension not available\n");
7599cdc6c1SAnup Patel return -ENODEV;
7699cdc6c1SAnup Patel }
7799cdc6c1SAnup Patel
7899cdc6c1SAnup Patel if (sbi_spec_is_0_1()) {
7999cdc6c1SAnup Patel kvm_info("require SBI v0.2 or higher\n");
8099cdc6c1SAnup Patel return -ENODEV;
8199cdc6c1SAnup Patel }
8299cdc6c1SAnup Patel
83*41cad828SAndrew Jones if (!sbi_probe_extension(SBI_EXT_RFENCE)) {
8499cdc6c1SAnup Patel kvm_info("require SBI RFENCE extension\n");
8599cdc6c1SAnup Patel return -ENODEV;
8699cdc6c1SAnup Patel }
8799cdc6c1SAnup Patel
8826708234SAnup Patel kvm_riscv_gstage_mode_detect();
899d05c1feSAnup Patel
9026708234SAnup Patel kvm_riscv_gstage_vmid_detect();
91fd7bb4a2SAnup Patel
9254e43320SAnup Patel rc = kvm_riscv_aia_init();
9354e43320SAnup Patel if (rc && rc != -ENODEV)
9454e43320SAnup Patel return rc;
9554e43320SAnup Patel
9699cdc6c1SAnup Patel kvm_info("hypervisor extension available\n");
9799cdc6c1SAnup Patel
9826708234SAnup Patel switch (kvm_riscv_gstage_mode()) {
999d05c1feSAnup Patel case HGATP_MODE_SV32X4:
1009d05c1feSAnup Patel str = "Sv32x4";
1019d05c1feSAnup Patel break;
1029d05c1feSAnup Patel case HGATP_MODE_SV39X4:
1039d05c1feSAnup Patel str = "Sv39x4";
1049d05c1feSAnup Patel break;
1059d05c1feSAnup Patel case HGATP_MODE_SV48X4:
1069d05c1feSAnup Patel str = "Sv48x4";
1079d05c1feSAnup Patel break;
108b4bbb95eSAnup Patel case HGATP_MODE_SV57X4:
109b4bbb95eSAnup Patel str = "Sv57x4";
110b4bbb95eSAnup Patel break;
1119d05c1feSAnup Patel default:
1129d05c1feSAnup Patel return -ENODEV;
1139d05c1feSAnup Patel }
1149d05c1feSAnup Patel kvm_info("using %s G-stage page table format\n", str);
1159d05c1feSAnup Patel
11626708234SAnup Patel kvm_info("VMID %ld bits available\n", kvm_riscv_gstage_vmid_bits());
117fd7bb4a2SAnup Patel
11854e43320SAnup Patel if (kvm_riscv_aia_available())
11954e43320SAnup Patel kvm_info("AIA available with %d guest external interrupts\n",
12054e43320SAnup Patel kvm_riscv_aia_nr_hgei);
12154e43320SAnup Patel
12254e43320SAnup Patel rc = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
12354e43320SAnup Patel if (rc) {
12454e43320SAnup Patel kvm_riscv_aia_exit();
12554e43320SAnup Patel return rc;
12654e43320SAnup Patel }
12754e43320SAnup Patel
12899cdc6c1SAnup Patel return 0;
12999cdc6c1SAnup Patel }
130e78a1117SXiakaiPan module_init(riscv_kvm_init);
131e78a1117SXiakaiPan
riscv_kvm_exit(void)132e78a1117SXiakaiPan static void __exit riscv_kvm_exit(void)
13354e43320SAnup Patel {
13454e43320SAnup Patel kvm_riscv_aia_exit();
135e78a1117SXiakaiPan
136e78a1117SXiakaiPan kvm_exit();
137e78a1117SXiakaiPan }
138 module_exit(riscv_kvm_exit);
139