xref: /openbmc/linux/arch/riscv/kvm/vcpu_sbi_v01.c (revision dd21bfa4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2021 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <atish.patra@wdc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <asm/csr.h>
13 #include <asm/sbi.h>
14 #include <asm/kvm_vcpu_timer.h>
15 #include <asm/kvm_vcpu_sbi.h>
16 
17 static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu,
18 				    struct kvm_run *run, u32 type)
19 {
20 	unsigned long i;
21 	struct kvm_vcpu *tmp;
22 
23 	kvm_for_each_vcpu(i, tmp, vcpu->kvm)
24 		tmp->arch.power_off = true;
25 	kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
26 
27 	memset(&run->system_event, 0, sizeof(run->system_event));
28 	run->system_event.type = type;
29 	run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
30 }
31 
32 static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
33 				      unsigned long *out_val,
34 				      struct kvm_cpu_trap *utrap,
35 				      bool *exit)
36 {
37 	ulong hmask;
38 	int i, ret = 0;
39 	u64 next_cycle;
40 	struct kvm_vcpu *rvcpu;
41 	struct cpumask cm;
42 	struct kvm *kvm = vcpu->kvm;
43 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
44 
45 	switch (cp->a7) {
46 	case SBI_EXT_0_1_CONSOLE_GETCHAR:
47 	case SBI_EXT_0_1_CONSOLE_PUTCHAR:
48 		/*
49 		 * The CONSOLE_GETCHAR/CONSOLE_PUTCHAR SBI calls cannot be
50 		 * handled in kernel so we forward these to user-space
51 		 */
52 		kvm_riscv_vcpu_sbi_forward(vcpu, run);
53 		*exit = true;
54 		break;
55 	case SBI_EXT_0_1_SET_TIMER:
56 #if __riscv_xlen == 32
57 		next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
58 #else
59 		next_cycle = (u64)cp->a0;
60 #endif
61 		ret = kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
62 		break;
63 	case SBI_EXT_0_1_CLEAR_IPI:
64 		ret = kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT);
65 		break;
66 	case SBI_EXT_0_1_SEND_IPI:
67 		if (cp->a0)
68 			hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
69 							   utrap);
70 		else
71 			hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1;
72 		if (utrap->scause)
73 			break;
74 
75 		for_each_set_bit(i, &hmask, BITS_PER_LONG) {
76 			rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
77 			ret = kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT);
78 			if (ret < 0)
79 				break;
80 		}
81 		break;
82 	case SBI_EXT_0_1_SHUTDOWN:
83 		kvm_sbi_system_shutdown(vcpu, run, KVM_SYSTEM_EVENT_SHUTDOWN);
84 		*exit = true;
85 		break;
86 	case SBI_EXT_0_1_REMOTE_FENCE_I:
87 	case SBI_EXT_0_1_REMOTE_SFENCE_VMA:
88 	case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID:
89 		if (cp->a0)
90 			hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
91 							   utrap);
92 		else
93 			hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1;
94 		if (utrap->scause)
95 			break;
96 
97 		cpumask_clear(&cm);
98 		for_each_set_bit(i, &hmask, BITS_PER_LONG) {
99 			rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
100 			if (rvcpu->cpu < 0)
101 				continue;
102 			cpumask_set_cpu(rvcpu->cpu, &cm);
103 		}
104 		if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I)
105 			ret = sbi_remote_fence_i(&cm);
106 		else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA)
107 			ret = sbi_remote_hfence_vvma(&cm, cp->a1, cp->a2);
108 		else
109 			ret = sbi_remote_hfence_vvma_asid(&cm, cp->a1, cp->a2, cp->a3);
110 		break;
111 	default:
112 		ret = -EINVAL;
113 		break;
114 	};
115 
116 	return ret;
117 }
118 
119 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
120 	.extid_start = SBI_EXT_0_1_SET_TIMER,
121 	.extid_end = SBI_EXT_0_1_SHUTDOWN,
122 	.handler = kvm_sbi_ext_v01_handler,
123 };
124