xref: /openbmc/linux/arch/riscv/kvm/vcpu_sbi.c (revision 1edd0337)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <atish.patra@wdc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <asm/sbi.h>
13 #include <asm/kvm_vcpu_sbi.h>
14 
15 #ifndef CONFIG_RISCV_SBI_V01
16 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
17 	.extid_start = -1UL,
18 	.extid_end = -1UL,
19 	.handler = NULL,
20 };
21 #endif
22 
23 #ifdef CONFIG_RISCV_PMU_SBI
24 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu;
25 #else
26 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
27 	.extid_start = -1UL,
28 	.extid_end = -1UL,
29 	.handler = NULL,
30 };
31 #endif
32 
33 static const struct kvm_vcpu_sbi_extension *sbi_ext[] = {
34 	&vcpu_sbi_ext_v01,
35 	&vcpu_sbi_ext_base,
36 	&vcpu_sbi_ext_time,
37 	&vcpu_sbi_ext_ipi,
38 	&vcpu_sbi_ext_rfence,
39 	&vcpu_sbi_ext_srst,
40 	&vcpu_sbi_ext_hsm,
41 	&vcpu_sbi_ext_pmu,
42 	&vcpu_sbi_ext_experimental,
43 	&vcpu_sbi_ext_vendor,
44 };
45 
46 void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
47 {
48 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
49 
50 	vcpu->arch.sbi_context.return_handled = 0;
51 	vcpu->stat.ecall_exit_stat++;
52 	run->exit_reason = KVM_EXIT_RISCV_SBI;
53 	run->riscv_sbi.extension_id = cp->a7;
54 	run->riscv_sbi.function_id = cp->a6;
55 	run->riscv_sbi.args[0] = cp->a0;
56 	run->riscv_sbi.args[1] = cp->a1;
57 	run->riscv_sbi.args[2] = cp->a2;
58 	run->riscv_sbi.args[3] = cp->a3;
59 	run->riscv_sbi.args[4] = cp->a4;
60 	run->riscv_sbi.args[5] = cp->a5;
61 	run->riscv_sbi.ret[0] = cp->a0;
62 	run->riscv_sbi.ret[1] = cp->a1;
63 }
64 
65 void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
66 				     struct kvm_run *run,
67 				     u32 type, u64 reason)
68 {
69 	unsigned long i;
70 	struct kvm_vcpu *tmp;
71 
72 	kvm_for_each_vcpu(i, tmp, vcpu->kvm)
73 		tmp->arch.power_off = true;
74 	kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
75 
76 	memset(&run->system_event, 0, sizeof(run->system_event));
77 	run->system_event.type = type;
78 	run->system_event.ndata = 1;
79 	run->system_event.data[0] = reason;
80 	run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
81 }
82 
83 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
84 {
85 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
86 
87 	/* Handle SBI return only once */
88 	if (vcpu->arch.sbi_context.return_handled)
89 		return 0;
90 	vcpu->arch.sbi_context.return_handled = 1;
91 
92 	/* Update return values */
93 	cp->a0 = run->riscv_sbi.ret[0];
94 	cp->a1 = run->riscv_sbi.ret[1];
95 
96 	/* Move to next instruction */
97 	vcpu->arch.guest_context.sepc += 4;
98 
99 	return 0;
100 }
101 
102 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid)
103 {
104 	int i = 0;
105 
106 	for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
107 		if (sbi_ext[i]->extid_start <= extid &&
108 		    sbi_ext[i]->extid_end >= extid)
109 			return sbi_ext[i];
110 	}
111 
112 	return NULL;
113 }
114 
115 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
116 {
117 	int ret = 1;
118 	bool next_sepc = true;
119 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
120 	const struct kvm_vcpu_sbi_extension *sbi_ext;
121 	struct kvm_cpu_trap utrap = {0};
122 	struct kvm_vcpu_sbi_return sbi_ret = {
123 		.out_val = 0,
124 		.err_val = 0,
125 		.utrap = &utrap,
126 	};
127 	bool ext_is_v01 = false;
128 
129 	sbi_ext = kvm_vcpu_sbi_find_ext(cp->a7);
130 	if (sbi_ext && sbi_ext->handler) {
131 #ifdef CONFIG_RISCV_SBI_V01
132 		if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
133 		    cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
134 			ext_is_v01 = true;
135 #endif
136 		ret = sbi_ext->handler(vcpu, run, &sbi_ret);
137 	} else {
138 		/* Return error for unsupported SBI calls */
139 		cp->a0 = SBI_ERR_NOT_SUPPORTED;
140 		goto ecall_done;
141 	}
142 
143 	/*
144 	 * When the SBI extension returns a Linux error code, it exits the ioctl
145 	 * loop and forwards the error to userspace.
146 	 */
147 	if (ret < 0) {
148 		next_sepc = false;
149 		goto ecall_done;
150 	}
151 
152 	/* Handle special error cases i.e trap, exit or userspace forward */
153 	if (sbi_ret.utrap->scause) {
154 		/* No need to increment sepc or exit ioctl loop */
155 		ret = 1;
156 		sbi_ret.utrap->sepc = cp->sepc;
157 		kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
158 		next_sepc = false;
159 		goto ecall_done;
160 	}
161 
162 	/* Exit ioctl loop or Propagate the error code the guest */
163 	if (sbi_ret.uexit) {
164 		next_sepc = false;
165 		ret = 0;
166 	} else {
167 		cp->a0 = sbi_ret.err_val;
168 		ret = 1;
169 	}
170 ecall_done:
171 	if (next_sepc)
172 		cp->sepc += 4;
173 	/* a1 should only be updated when we continue the ioctl loop */
174 	if (!ext_is_v01 && ret == 1)
175 		cp->a1 = sbi_ret.out_val;
176 
177 	return ret;
178 }
179