1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "kvm_util.h"
3 #include "linux/types.h"
4 #include "linux/bitmap.h"
5 #include "linux/atomic.h"
6
7 #define GUEST_UCALL_FAILED -1
8
9 struct ucall_header {
10 DECLARE_BITMAP(in_use, KVM_MAX_VCPUS);
11 struct ucall ucalls[KVM_MAX_VCPUS];
12 };
13
ucall_nr_pages_required(uint64_t page_size)14 int ucall_nr_pages_required(uint64_t page_size)
15 {
16 return align_up(sizeof(struct ucall_header), page_size) / page_size;
17 }
18
19 /*
20 * ucall_pool holds per-VM values (global data is duplicated by each VM), it
21 * must not be accessed from host code.
22 */
23 static struct ucall_header *ucall_pool;
24
ucall_init(struct kvm_vm * vm,vm_paddr_t mmio_gpa)25 void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
26 {
27 struct ucall_header *hdr;
28 struct ucall *uc;
29 vm_vaddr_t vaddr;
30 int i;
31
32 vaddr = __vm_vaddr_alloc(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR, MEM_REGION_DATA);
33 hdr = (struct ucall_header *)addr_gva2hva(vm, vaddr);
34 memset(hdr, 0, sizeof(*hdr));
35
36 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
37 uc = &hdr->ucalls[i];
38 uc->hva = uc;
39 }
40
41 write_guest_global(vm, ucall_pool, (struct ucall_header *)vaddr);
42
43 ucall_arch_init(vm, mmio_gpa);
44 }
45
ucall_alloc(void)46 static struct ucall *ucall_alloc(void)
47 {
48 struct ucall *uc;
49 int i;
50
51 if (!ucall_pool)
52 goto ucall_failed;
53
54 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
55 if (!test_and_set_bit(i, ucall_pool->in_use)) {
56 uc = &ucall_pool->ucalls[i];
57 memset(uc->args, 0, sizeof(uc->args));
58 return uc;
59 }
60 }
61
62 ucall_failed:
63 /*
64 * If the vCPU cannot grab a ucall structure, make a bare ucall with a
65 * magic value to signal to get_ucall() that things went sideways.
66 * GUEST_ASSERT() depends on ucall_alloc() and so cannot be used here.
67 */
68 ucall_arch_do_ucall(GUEST_UCALL_FAILED);
69 return NULL;
70 }
71
ucall_free(struct ucall * uc)72 static void ucall_free(struct ucall *uc)
73 {
74 /* Beware, here be pointer arithmetic. */
75 clear_bit(uc - ucall_pool->ucalls, ucall_pool->in_use);
76 }
77
ucall_assert(uint64_t cmd,const char * exp,const char * file,unsigned int line,const char * fmt,...)78 void ucall_assert(uint64_t cmd, const char *exp, const char *file,
79 unsigned int line, const char *fmt, ...)
80 {
81 struct ucall *uc;
82 va_list va;
83
84 uc = ucall_alloc();
85 uc->cmd = cmd;
86
87 WRITE_ONCE(uc->args[GUEST_ERROR_STRING], (uint64_t)(exp));
88 WRITE_ONCE(uc->args[GUEST_FILE], (uint64_t)(file));
89 WRITE_ONCE(uc->args[GUEST_LINE], line);
90
91 va_start(va, fmt);
92 guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va);
93 va_end(va);
94
95 ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
96
97 ucall_free(uc);
98 }
99
ucall_fmt(uint64_t cmd,const char * fmt,...)100 void ucall_fmt(uint64_t cmd, const char *fmt, ...)
101 {
102 struct ucall *uc;
103 va_list va;
104
105 uc = ucall_alloc();
106 uc->cmd = cmd;
107
108 va_start(va, fmt);
109 guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va);
110 va_end(va);
111
112 ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
113
114 ucall_free(uc);
115 }
116
ucall(uint64_t cmd,int nargs,...)117 void ucall(uint64_t cmd, int nargs, ...)
118 {
119 struct ucall *uc;
120 va_list va;
121 int i;
122
123 uc = ucall_alloc();
124
125 WRITE_ONCE(uc->cmd, cmd);
126
127 nargs = min(nargs, UCALL_MAX_ARGS);
128
129 va_start(va, nargs);
130 for (i = 0; i < nargs; ++i)
131 WRITE_ONCE(uc->args[i], va_arg(va, uint64_t));
132 va_end(va);
133
134 ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
135
136 ucall_free(uc);
137 }
138
get_ucall(struct kvm_vcpu * vcpu,struct ucall * uc)139 uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
140 {
141 struct ucall ucall;
142 void *addr;
143
144 if (!uc)
145 uc = &ucall;
146
147 addr = ucall_arch_get_ucall(vcpu);
148 if (addr) {
149 TEST_ASSERT(addr != (void *)GUEST_UCALL_FAILED,
150 "Guest failed to allocate ucall struct");
151
152 memcpy(uc, addr, sizeof(*uc));
153 vcpu_run_complete_io(vcpu);
154 } else {
155 memset(uc, 0, sizeof(*uc));
156 }
157
158 return uc->cmd;
159 }
160