xref: /openbmc/qemu/hw/virtio/vhost-backend.c (revision 80e5db30)
1 /*
2  * vhost-backend
3  *
4  * Copyright (c) 2013 Virtual Open Systems Sarl.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  *
9  */
10 
11 #include "qemu/osdep.h"
12 #include <linux/vhost.h>
13 #include <sys/ioctl.h>
14 #include "hw/virtio/vhost.h"
15 #include "hw/virtio/vhost-backend.h"
16 #include "qemu/error-report.h"
17 
18 static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
19                              void *arg)
20 {
21     int fd = (uintptr_t) dev->opaque;
22 
23     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
24 
25     return ioctl(fd, request, arg);
26 }
27 
28 static int vhost_kernel_init(struct vhost_dev *dev, void *opaque)
29 {
30     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
31 
32     dev->opaque = opaque;
33 
34     return 0;
35 }
36 
37 static int vhost_kernel_cleanup(struct vhost_dev *dev)
38 {
39     int fd = (uintptr_t) dev->opaque;
40 
41     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
42 
43     return close(fd);
44 }
45 
46 static int vhost_kernel_memslots_limit(struct vhost_dev *dev)
47 {
48     int limit = 64;
49     char *s;
50 
51     if (g_file_get_contents("/sys/module/vhost/parameters/max_mem_regions",
52                             &s, NULL, NULL)) {
53         uint64_t val = g_ascii_strtoull(s, NULL, 10);
54         if (!((val == G_MAXUINT64 || !val) && errno)) {
55             return val;
56         }
57         error_report("ignoring invalid max_mem_regions value in vhost module:"
58                      " %s", s);
59     }
60     return limit;
61 }
62 
63 static int vhost_kernel_net_set_backend(struct vhost_dev *dev,
64                                         struct vhost_vring_file *file)
65 {
66     return vhost_kernel_call(dev, VHOST_NET_SET_BACKEND, file);
67 }
68 
69 static int vhost_kernel_scsi_set_endpoint(struct vhost_dev *dev,
70                                           struct vhost_scsi_target *target)
71 {
72     return vhost_kernel_call(dev, VHOST_SCSI_SET_ENDPOINT, target);
73 }
74 
75 static int vhost_kernel_scsi_clear_endpoint(struct vhost_dev *dev,
76                                             struct vhost_scsi_target *target)
77 {
78     return vhost_kernel_call(dev, VHOST_SCSI_CLEAR_ENDPOINT, target);
79 }
80 
81 static int vhost_kernel_scsi_get_abi_version(struct vhost_dev *dev, int *version)
82 {
83     return vhost_kernel_call(dev, VHOST_SCSI_GET_ABI_VERSION, version);
84 }
85 
86 static int vhost_kernel_set_log_base(struct vhost_dev *dev, uint64_t base,
87                                      struct vhost_log *log)
88 {
89     return vhost_kernel_call(dev, VHOST_SET_LOG_BASE, &base);
90 }
91 
92 static int vhost_kernel_set_mem_table(struct vhost_dev *dev,
93                                       struct vhost_memory *mem)
94 {
95     return vhost_kernel_call(dev, VHOST_SET_MEM_TABLE, mem);
96 }
97 
98 static int vhost_kernel_set_vring_addr(struct vhost_dev *dev,
99                                        struct vhost_vring_addr *addr)
100 {
101     return vhost_kernel_call(dev, VHOST_SET_VRING_ADDR, addr);
102 }
103 
104 static int vhost_kernel_set_vring_endian(struct vhost_dev *dev,
105                                          struct vhost_vring_state *ring)
106 {
107     return vhost_kernel_call(dev, VHOST_SET_VRING_ENDIAN, ring);
108 }
109 
110 static int vhost_kernel_set_vring_num(struct vhost_dev *dev,
111                                       struct vhost_vring_state *ring)
112 {
113     return vhost_kernel_call(dev, VHOST_SET_VRING_NUM, ring);
114 }
115 
116 static int vhost_kernel_set_vring_base(struct vhost_dev *dev,
117                                        struct vhost_vring_state *ring)
118 {
119     return vhost_kernel_call(dev, VHOST_SET_VRING_BASE, ring);
120 }
121 
122 static int vhost_kernel_get_vring_base(struct vhost_dev *dev,
123                                        struct vhost_vring_state *ring)
124 {
125     return vhost_kernel_call(dev, VHOST_GET_VRING_BASE, ring);
126 }
127 
128 static int vhost_kernel_set_vring_kick(struct vhost_dev *dev,
129                                        struct vhost_vring_file *file)
130 {
131     return vhost_kernel_call(dev, VHOST_SET_VRING_KICK, file);
132 }
133 
134 static int vhost_kernel_set_vring_call(struct vhost_dev *dev,
135                                        struct vhost_vring_file *file)
136 {
137     return vhost_kernel_call(dev, VHOST_SET_VRING_CALL, file);
138 }
139 
140 static int vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev *dev,
141                                                    struct vhost_vring_state *s)
142 {
143     return vhost_kernel_call(dev, VHOST_SET_VRING_BUSYLOOP_TIMEOUT, s);
144 }
145 
146 static int vhost_kernel_set_features(struct vhost_dev *dev,
147                                      uint64_t features)
148 {
149     return vhost_kernel_call(dev, VHOST_SET_FEATURES, &features);
150 }
151 
152 static int vhost_kernel_get_features(struct vhost_dev *dev,
153                                      uint64_t *features)
154 {
155     return vhost_kernel_call(dev, VHOST_GET_FEATURES, features);
156 }
157 
158 static int vhost_kernel_set_owner(struct vhost_dev *dev)
159 {
160     return vhost_kernel_call(dev, VHOST_SET_OWNER, NULL);
161 }
162 
163 static int vhost_kernel_reset_device(struct vhost_dev *dev)
164 {
165     return vhost_kernel_call(dev, VHOST_RESET_OWNER, NULL);
166 }
167 
168 static int vhost_kernel_get_vq_index(struct vhost_dev *dev, int idx)
169 {
170     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
171 
172     return idx - dev->vq_index;
173 }
174 
175 #ifdef CONFIG_VHOST_VSOCK
176 static int vhost_kernel_vsock_set_guest_cid(struct vhost_dev *dev,
177                                             uint64_t guest_cid)
178 {
179     return vhost_kernel_call(dev, VHOST_VSOCK_SET_GUEST_CID, &guest_cid);
180 }
181 
182 static int vhost_kernel_vsock_set_running(struct vhost_dev *dev, int start)
183 {
184     return vhost_kernel_call(dev, VHOST_VSOCK_SET_RUNNING, &start);
185 }
186 #endif /* CONFIG_VHOST_VSOCK */
187 
188 static void vhost_kernel_iotlb_read(void *opaque)
189 {
190     struct vhost_dev *dev = opaque;
191     struct vhost_msg msg;
192     ssize_t len;
193 
194     while ((len = read((uintptr_t)dev->opaque, &msg, sizeof msg)) > 0) {
195         struct vhost_iotlb_msg *imsg = &msg.iotlb;
196         if (len < sizeof msg) {
197             error_report("Wrong vhost message len: %d", (int)len);
198             break;
199         }
200         if (msg.type != VHOST_IOTLB_MSG) {
201             error_report("Unknown vhost iotlb message type");
202             break;
203         }
204         switch (imsg->type) {
205         case VHOST_IOTLB_MISS:
206             vhost_device_iotlb_miss(dev, imsg->iova,
207                                     imsg->perm != VHOST_ACCESS_RO);
208             break;
209         case VHOST_IOTLB_UPDATE:
210         case VHOST_IOTLB_INVALIDATE:
211             error_report("Unexpected IOTLB message type");
212             break;
213         case VHOST_IOTLB_ACCESS_FAIL:
214             /* FIXME: report device iotlb error */
215             break;
216         default:
217             break;
218         }
219     }
220 }
221 
222 static int vhost_kernel_update_device_iotlb(struct vhost_dev *dev,
223                                             uint64_t iova, uint64_t uaddr,
224                                             uint64_t len,
225                                             IOMMUAccessFlags perm)
226 {
227     struct vhost_msg msg;
228     msg.type = VHOST_IOTLB_MSG;
229     msg.iotlb.iova =  iova;
230     msg.iotlb.uaddr = uaddr;
231     msg.iotlb.size = len;
232     msg.iotlb.type = VHOST_IOTLB_UPDATE;
233 
234     switch (perm) {
235     case IOMMU_RO:
236         msg.iotlb.perm = VHOST_ACCESS_RO;
237         break;
238     case IOMMU_WO:
239         msg.iotlb.perm = VHOST_ACCESS_WO;
240         break;
241     case IOMMU_RW:
242         msg.iotlb.perm = VHOST_ACCESS_RW;
243         break;
244     default:
245         g_assert_not_reached();
246     }
247 
248     if (write((uintptr_t)dev->opaque, &msg, sizeof msg) != sizeof msg) {
249         error_report("Fail to update device iotlb");
250         return -EFAULT;
251     }
252 
253     return 0;
254 }
255 
256 static int vhost_kernel_invalidate_device_iotlb(struct vhost_dev *dev,
257                                                 uint64_t iova, uint64_t len)
258 {
259     struct vhost_msg msg;
260 
261     msg.type = VHOST_IOTLB_MSG;
262     msg.iotlb.iova = iova;
263     msg.iotlb.size = len;
264     msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
265 
266     if (write((uintptr_t)dev->opaque, &msg, sizeof msg) != sizeof msg) {
267         error_report("Fail to invalidate device iotlb");
268         return -EFAULT;
269     }
270 
271     return 0;
272 }
273 
274 static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
275                                            int enabled)
276 {
277     if (enabled)
278         qemu_set_fd_handler((uintptr_t)dev->opaque,
279                             vhost_kernel_iotlb_read, NULL, dev);
280     else
281         qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
282 }
283 
284 static const VhostOps kernel_ops = {
285         .backend_type = VHOST_BACKEND_TYPE_KERNEL,
286         .vhost_backend_init = vhost_kernel_init,
287         .vhost_backend_cleanup = vhost_kernel_cleanup,
288         .vhost_backend_memslots_limit = vhost_kernel_memslots_limit,
289         .vhost_net_set_backend = vhost_kernel_net_set_backend,
290         .vhost_scsi_set_endpoint = vhost_kernel_scsi_set_endpoint,
291         .vhost_scsi_clear_endpoint = vhost_kernel_scsi_clear_endpoint,
292         .vhost_scsi_get_abi_version = vhost_kernel_scsi_get_abi_version,
293         .vhost_set_log_base = vhost_kernel_set_log_base,
294         .vhost_set_mem_table = vhost_kernel_set_mem_table,
295         .vhost_set_vring_addr = vhost_kernel_set_vring_addr,
296         .vhost_set_vring_endian = vhost_kernel_set_vring_endian,
297         .vhost_set_vring_num = vhost_kernel_set_vring_num,
298         .vhost_set_vring_base = vhost_kernel_set_vring_base,
299         .vhost_get_vring_base = vhost_kernel_get_vring_base,
300         .vhost_set_vring_kick = vhost_kernel_set_vring_kick,
301         .vhost_set_vring_call = vhost_kernel_set_vring_call,
302         .vhost_set_vring_busyloop_timeout =
303                                 vhost_kernel_set_vring_busyloop_timeout,
304         .vhost_set_features = vhost_kernel_set_features,
305         .vhost_get_features = vhost_kernel_get_features,
306         .vhost_set_owner = vhost_kernel_set_owner,
307         .vhost_reset_device = vhost_kernel_reset_device,
308         .vhost_get_vq_index = vhost_kernel_get_vq_index,
309 #ifdef CONFIG_VHOST_VSOCK
310         .vhost_vsock_set_guest_cid = vhost_kernel_vsock_set_guest_cid,
311         .vhost_vsock_set_running = vhost_kernel_vsock_set_running,
312 #endif /* CONFIG_VHOST_VSOCK */
313         .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
314         .vhost_update_device_iotlb = vhost_kernel_update_device_iotlb,
315         .vhost_invalidate_device_iotlb = vhost_kernel_invalidate_device_iotlb,
316 };
317 
318 int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType backend_type)
319 {
320     int r = 0;
321 
322     switch (backend_type) {
323     case VHOST_BACKEND_TYPE_KERNEL:
324         dev->vhost_ops = &kernel_ops;
325         break;
326     case VHOST_BACKEND_TYPE_USER:
327         dev->vhost_ops = &user_ops;
328         break;
329     default:
330         error_report("Unknown vhost backend type");
331         r = -1;
332     }
333 
334     return r;
335 }
336