1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2021 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Atish Patra <atish.patra@wdc.com>
7 */
8
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <asm/sbi.h>
13 #include <asm/kvm_vcpu_sbi.h>
14
kvm_sbi_hsm_vcpu_start(struct kvm_vcpu * vcpu)15 static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
16 {
17 struct kvm_cpu_context *reset_cntx;
18 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
19 struct kvm_vcpu *target_vcpu;
20 unsigned long target_vcpuid = cp->a0;
21 int ret = 0;
22
23 target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
24 if (!target_vcpu)
25 return SBI_ERR_INVALID_PARAM;
26
27 spin_lock(&target_vcpu->arch.mp_state_lock);
28
29 if (!kvm_riscv_vcpu_stopped(target_vcpu)) {
30 ret = SBI_ERR_ALREADY_AVAILABLE;
31 goto out;
32 }
33
34 reset_cntx = &target_vcpu->arch.guest_reset_context;
35 /* start address */
36 reset_cntx->sepc = cp->a1;
37 /* target vcpu id to start */
38 reset_cntx->a0 = target_vcpuid;
39 /* private data passed from kernel */
40 reset_cntx->a1 = cp->a2;
41 kvm_make_request(KVM_REQ_VCPU_RESET, target_vcpu);
42
43 __kvm_riscv_vcpu_power_on(target_vcpu);
44
45 out:
46 spin_unlock(&target_vcpu->arch.mp_state_lock);
47
48 return ret;
49 }
50
kvm_sbi_hsm_vcpu_stop(struct kvm_vcpu * vcpu)51 static int kvm_sbi_hsm_vcpu_stop(struct kvm_vcpu *vcpu)
52 {
53 int ret = 0;
54
55 spin_lock(&vcpu->arch.mp_state_lock);
56
57 if (kvm_riscv_vcpu_stopped(vcpu)) {
58 ret = SBI_ERR_FAILURE;
59 goto out;
60 }
61
62 __kvm_riscv_vcpu_power_off(vcpu);
63
64 out:
65 spin_unlock(&vcpu->arch.mp_state_lock);
66
67 return ret;
68 }
69
kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu * vcpu)70 static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
71 {
72 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
73 unsigned long target_vcpuid = cp->a0;
74 struct kvm_vcpu *target_vcpu;
75
76 target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
77 if (!target_vcpu)
78 return SBI_ERR_INVALID_PARAM;
79 if (kvm_riscv_vcpu_stopped(target_vcpu))
80 return SBI_HSM_STATE_STOPPED;
81 else if (target_vcpu->stat.generic.blocking)
82 return SBI_HSM_STATE_SUSPENDED;
83 else
84 return SBI_HSM_STATE_STARTED;
85 }
86
kvm_sbi_ext_hsm_handler(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_vcpu_sbi_return * retdata)87 static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
88 struct kvm_vcpu_sbi_return *retdata)
89 {
90 int ret = 0;
91 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
92 unsigned long funcid = cp->a6;
93
94 switch (funcid) {
95 case SBI_EXT_HSM_HART_START:
96 ret = kvm_sbi_hsm_vcpu_start(vcpu);
97 break;
98 case SBI_EXT_HSM_HART_STOP:
99 ret = kvm_sbi_hsm_vcpu_stop(vcpu);
100 break;
101 case SBI_EXT_HSM_HART_STATUS:
102 ret = kvm_sbi_hsm_vcpu_get_status(vcpu);
103 if (ret >= 0) {
104 retdata->out_val = ret;
105 retdata->err_val = 0;
106 }
107 return 0;
108 case SBI_EXT_HSM_HART_SUSPEND:
109 switch (cp->a0) {
110 case SBI_HSM_SUSPEND_RET_DEFAULT:
111 kvm_riscv_vcpu_wfi(vcpu);
112 break;
113 case SBI_HSM_SUSPEND_NON_RET_DEFAULT:
114 ret = SBI_ERR_NOT_SUPPORTED;
115 break;
116 default:
117 ret = SBI_ERR_INVALID_PARAM;
118 }
119 break;
120 default:
121 ret = SBI_ERR_NOT_SUPPORTED;
122 }
123
124 retdata->err_val = ret;
125
126 return 0;
127 }
128
129 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm = {
130 .extid_start = SBI_EXT_HSM,
131 .extid_end = SBI_EXT_HSM,
132 .handler = kvm_sbi_ext_hsm_handler,
133 };
134