xref: /openbmc/linux/arch/riscv/kvm/vcpu_sbi.c (revision fe38b4d6)
1 // SPDX-License-Identifier: GPL-2.0
2 /**
3  * Copyright (c) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <atish.patra@wdc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <asm/csr.h>
13 #include <asm/sbi.h>
14 #include <asm/kvm_vcpu_timer.h>
15 
16 #define SBI_VERSION_MAJOR			0
17 #define SBI_VERSION_MINOR			1
18 
19 static void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu,
20 				       struct kvm_run *run)
21 {
22 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
23 
24 	vcpu->arch.sbi_context.return_handled = 0;
25 	vcpu->stat.ecall_exit_stat++;
26 	run->exit_reason = KVM_EXIT_RISCV_SBI;
27 	run->riscv_sbi.extension_id = cp->a7;
28 	run->riscv_sbi.function_id = cp->a6;
29 	run->riscv_sbi.args[0] = cp->a0;
30 	run->riscv_sbi.args[1] = cp->a1;
31 	run->riscv_sbi.args[2] = cp->a2;
32 	run->riscv_sbi.args[3] = cp->a3;
33 	run->riscv_sbi.args[4] = cp->a4;
34 	run->riscv_sbi.args[5] = cp->a5;
35 	run->riscv_sbi.ret[0] = cp->a0;
36 	run->riscv_sbi.ret[1] = cp->a1;
37 }
38 
39 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
40 {
41 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
42 
43 	/* Handle SBI return only once */
44 	if (vcpu->arch.sbi_context.return_handled)
45 		return 0;
46 	vcpu->arch.sbi_context.return_handled = 1;
47 
48 	/* Update return values */
49 	cp->a0 = run->riscv_sbi.ret[0];
50 	cp->a1 = run->riscv_sbi.ret[1];
51 
52 	/* Move to next instruction */
53 	vcpu->arch.guest_context.sepc += 4;
54 
55 	return 0;
56 }
57 
58 #ifdef CONFIG_RISCV_SBI_V01
59 
60 static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu,
61 				    struct kvm_run *run, u32 type)
62 {
63 	int i;
64 	struct kvm_vcpu *tmp;
65 
66 	kvm_for_each_vcpu(i, tmp, vcpu->kvm)
67 		tmp->arch.power_off = true;
68 	kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
69 
70 	memset(&run->system_event, 0, sizeof(run->system_event));
71 	run->system_event.type = type;
72 	run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
73 }
74 
75 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
76 {
77 	ulong hmask;
78 	int i, ret = 1;
79 	u64 next_cycle;
80 	struct kvm_vcpu *rvcpu;
81 	bool next_sepc = true;
82 	struct cpumask cm, hm;
83 	struct kvm *kvm = vcpu->kvm;
84 	struct kvm_cpu_trap utrap = { 0 };
85 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
86 
87 	if (!cp)
88 		return -EINVAL;
89 
90 	switch (cp->a7) {
91 	case SBI_EXT_0_1_CONSOLE_GETCHAR:
92 	case SBI_EXT_0_1_CONSOLE_PUTCHAR:
93 		/*
94 		 * The CONSOLE_GETCHAR/CONSOLE_PUTCHAR SBI calls cannot be
95 		 * handled in kernel so we forward these to user-space
96 		 */
97 		kvm_riscv_vcpu_sbi_forward(vcpu, run);
98 		next_sepc = false;
99 		ret = 0;
100 		break;
101 	case SBI_EXT_0_1_SET_TIMER:
102 #if __riscv_xlen == 32
103 		next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
104 #else
105 		next_cycle = (u64)cp->a0;
106 #endif
107 		kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
108 		break;
109 	case SBI_EXT_0_1_CLEAR_IPI:
110 		kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT);
111 		break;
112 	case SBI_EXT_0_1_SEND_IPI:
113 		if (cp->a0)
114 			hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
115 							   &utrap);
116 		else
117 			hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1;
118 		if (utrap.scause) {
119 			utrap.sepc = cp->sepc;
120 			kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
121 			next_sepc = false;
122 			break;
123 		}
124 		for_each_set_bit(i, &hmask, BITS_PER_LONG) {
125 			rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
126 			kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT);
127 		}
128 		break;
129 	case SBI_EXT_0_1_SHUTDOWN:
130 		kvm_sbi_system_shutdown(vcpu, run, KVM_SYSTEM_EVENT_SHUTDOWN);
131 		next_sepc = false;
132 		ret = 0;
133 		break;
134 	case SBI_EXT_0_1_REMOTE_FENCE_I:
135 	case SBI_EXT_0_1_REMOTE_SFENCE_VMA:
136 	case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID:
137 		if (cp->a0)
138 			hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
139 							   &utrap);
140 		else
141 			hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1;
142 		if (utrap.scause) {
143 			utrap.sepc = cp->sepc;
144 			kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
145 			next_sepc = false;
146 			break;
147 		}
148 		cpumask_clear(&cm);
149 		for_each_set_bit(i, &hmask, BITS_PER_LONG) {
150 			rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
151 			if (rvcpu->cpu < 0)
152 				continue;
153 			cpumask_set_cpu(rvcpu->cpu, &cm);
154 		}
155 		riscv_cpuid_to_hartid_mask(&cm, &hm);
156 		if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I)
157 			sbi_remote_fence_i(cpumask_bits(&hm));
158 		else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA)
159 			sbi_remote_hfence_vvma(cpumask_bits(&hm),
160 						cp->a1, cp->a2);
161 		else
162 			sbi_remote_hfence_vvma_asid(cpumask_bits(&hm),
163 						cp->a1, cp->a2, cp->a3);
164 		break;
165 	default:
166 		/* Return error for unsupported SBI calls */
167 		cp->a0 = SBI_ERR_NOT_SUPPORTED;
168 		break;
169 	}
170 
171 	if (next_sepc)
172 		cp->sepc += 4;
173 
174 	return ret;
175 }
176 
177 #else
178 
179 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
180 {
181 	kvm_riscv_vcpu_sbi_forward(vcpu, run);
182 	return 0;
183 }
184 
185 #endif
186