1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2021 Western Digital Corporation or its affiliates. 4 * 5 * Authors: 6 * Atish Patra <atish.patra@wdc.com> 7 */ 8 9 #include <linux/errno.h> 10 #include <linux/err.h> 11 #include <linux/kvm_host.h> 12 #include <asm/sbi.h> 13 #include <asm/kvm_vcpu_timer.h> 14 #include <asm/kvm_vcpu_pmu.h> 15 #include <asm/kvm_vcpu_sbi.h> 16 17 static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, 18 struct kvm_vcpu_sbi_return *retdata) 19 { 20 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; 21 u64 next_cycle; 22 23 if (cp->a6 != SBI_EXT_TIME_SET_TIMER) { 24 retdata->err_val = SBI_ERR_NOT_SUPPORTED; 25 return 0; 26 } 27 28 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_SET_TIMER); 29 #if __riscv_xlen == 32 30 next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0; 31 #else 32 next_cycle = (u64)cp->a0; 33 #endif 34 kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle); 35 36 return 0; 37 } 38 39 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time = { 40 .extid_start = SBI_EXT_TIME, 41 .extid_end = SBI_EXT_TIME, 42 .handler = kvm_sbi_ext_time_handler, 43 }; 44 45 static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, 46 struct kvm_vcpu_sbi_return *retdata) 47 { 48 int ret = 0; 49 unsigned long i; 50 struct kvm_vcpu *tmp; 51 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; 52 unsigned long hmask = cp->a0; 53 unsigned long hbase = cp->a1; 54 unsigned long hart_bit = 0, sentmask = 0; 55 56 if (cp->a6 != SBI_EXT_IPI_SEND_IPI) { 57 retdata->err_val = SBI_ERR_NOT_SUPPORTED; 58 return 0; 59 } 60 61 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_IPI_SENT); 62 kvm_for_each_vcpu(i, tmp, vcpu->kvm) { 63 if (hbase != -1UL) { 64 if (tmp->vcpu_id < hbase) 65 continue; 66 hart_bit = tmp->vcpu_id - hbase; 67 if (hart_bit >= __riscv_xlen) 68 goto done; 69 if (!(hmask & (1UL << hart_bit))) 70 continue; 71 } 72 ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT); 73 if (ret < 0) 74 break; 75 sentmask |= 1UL << hart_bit; 76 kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD); 77 } 78 79 done: 80 if (hbase != -1UL && (hmask ^ sentmask)) 81 retdata->err_val = SBI_ERR_INVALID_PARAM; 82 83 return ret; 84 } 85 86 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi = { 87 .extid_start = SBI_EXT_IPI, 88 .extid_end = SBI_EXT_IPI, 89 .handler = kvm_sbi_ext_ipi_handler, 90 }; 91 92 static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, 93 struct kvm_vcpu_sbi_return *retdata) 94 { 95 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; 96 unsigned long hmask = cp->a0; 97 unsigned long hbase = cp->a1; 98 unsigned long funcid = cp->a6; 99 100 switch (funcid) { 101 case SBI_EXT_RFENCE_REMOTE_FENCE_I: 102 kvm_riscv_fence_i(vcpu->kvm, hbase, hmask); 103 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_SENT); 104 break; 105 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA: 106 if (cp->a2 == 0 && cp->a3 == 0) 107 kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask); 108 else 109 kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask, 110 cp->a2, cp->a3, PAGE_SHIFT); 111 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT); 112 break; 113 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID: 114 if (cp->a2 == 0 && cp->a3 == 0) 115 kvm_riscv_hfence_vvma_asid_all(vcpu->kvm, 116 hbase, hmask, cp->a4); 117 else 118 kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm, 119 hbase, hmask, 120 cp->a2, cp->a3, 121 PAGE_SHIFT, cp->a4); 122 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_SENT); 123 break; 124 case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA: 125 case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID: 126 case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA: 127 case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID: 128 /* 129 * Until nested virtualization is implemented, the 130 * SBI HFENCE calls should be treated as NOPs 131 */ 132 break; 133 default: 134 retdata->err_val = SBI_ERR_NOT_SUPPORTED; 135 } 136 137 return 0; 138 } 139 140 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence = { 141 .extid_start = SBI_EXT_RFENCE, 142 .extid_end = SBI_EXT_RFENCE, 143 .handler = kvm_sbi_ext_rfence_handler, 144 }; 145 146 static int kvm_sbi_ext_srst_handler(struct kvm_vcpu *vcpu, 147 struct kvm_run *run, 148 struct kvm_vcpu_sbi_return *retdata) 149 { 150 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; 151 unsigned long funcid = cp->a6; 152 u32 reason = cp->a1; 153 u32 type = cp->a0; 154 155 switch (funcid) { 156 case SBI_EXT_SRST_RESET: 157 switch (type) { 158 case SBI_SRST_RESET_TYPE_SHUTDOWN: 159 kvm_riscv_vcpu_sbi_system_reset(vcpu, run, 160 KVM_SYSTEM_EVENT_SHUTDOWN, 161 reason); 162 retdata->uexit = true; 163 break; 164 case SBI_SRST_RESET_TYPE_COLD_REBOOT: 165 case SBI_SRST_RESET_TYPE_WARM_REBOOT: 166 kvm_riscv_vcpu_sbi_system_reset(vcpu, run, 167 KVM_SYSTEM_EVENT_RESET, 168 reason); 169 retdata->uexit = true; 170 break; 171 default: 172 retdata->err_val = SBI_ERR_NOT_SUPPORTED; 173 } 174 break; 175 default: 176 retdata->err_val = SBI_ERR_NOT_SUPPORTED; 177 } 178 179 return 0; 180 } 181 182 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst = { 183 .extid_start = SBI_EXT_SRST, 184 .extid_end = SBI_EXT_SRST, 185 .handler = kvm_sbi_ext_srst_handler, 186 }; 187