1 /*
2 * Virtio vhost-user GPU Device
3 *
4 * Copyright Red Hat, Inc. 2013-2018
5 *
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 * Marc-André Lureau <marcandre.lureau@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 */
14 #include "qemu/osdep.h"
15 #include "qemu/drm.h"
16 #include "qapi/error.h"
17 #include "qemu/sockets.h"
18
19 #include <pixman.h>
20 #include <glib-unix.h>
21
22 #include "vugpu.h"
23 #include "hw/virtio/virtio-gpu-bswap.h"
24 #include "hw/virtio/virtio-gpu-pixman.h"
25 #include "virgl.h"
26 #include "vugbm.h"
27
28 enum {
29 VHOST_USER_GPU_MAX_QUEUES = 2,
30 };
31
32 struct virtio_gpu_simple_resource {
33 uint32_t resource_id;
34 uint32_t width;
35 uint32_t height;
36 uint32_t format;
37 struct iovec *iov;
38 unsigned int iov_cnt;
39 uint32_t scanout_bitmask;
40 pixman_image_t *image;
41 struct vugbm_buffer buffer;
42 QTAILQ_ENTRY(virtio_gpu_simple_resource) next;
43 };
44
45 static gboolean opt_print_caps;
46 static int opt_fdnum = -1;
47 static char *opt_socket_path;
48 static char *opt_render_node;
49 static gboolean opt_virgl;
50
51 static void vg_handle_ctrl(VuDev *dev, int qidx);
52 static void vg_cleanup_mapping(VuGpu *g,
53 struct virtio_gpu_simple_resource *res);
54
55 static const char *
vg_cmd_to_string(int cmd)56 vg_cmd_to_string(int cmd)
57 {
58 #define CMD(cmd) [cmd] = #cmd
59 static const char *vg_cmd_str[] = {
60 CMD(VIRTIO_GPU_UNDEFINED),
61
62 /* 2d commands */
63 CMD(VIRTIO_GPU_CMD_GET_DISPLAY_INFO),
64 CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D),
65 CMD(VIRTIO_GPU_CMD_RESOURCE_UNREF),
66 CMD(VIRTIO_GPU_CMD_SET_SCANOUT),
67 CMD(VIRTIO_GPU_CMD_RESOURCE_FLUSH),
68 CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D),
69 CMD(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING),
70 CMD(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING),
71 CMD(VIRTIO_GPU_CMD_GET_CAPSET_INFO),
72 CMD(VIRTIO_GPU_CMD_GET_CAPSET),
73
74 /* 3d commands */
75 CMD(VIRTIO_GPU_CMD_CTX_CREATE),
76 CMD(VIRTIO_GPU_CMD_CTX_DESTROY),
77 CMD(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE),
78 CMD(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE),
79 CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D),
80 CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D),
81 CMD(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D),
82 CMD(VIRTIO_GPU_CMD_SUBMIT_3D),
83
84 /* cursor commands */
85 CMD(VIRTIO_GPU_CMD_UPDATE_CURSOR),
86 CMD(VIRTIO_GPU_CMD_MOVE_CURSOR),
87 };
88 #undef REQ
89
90 if (cmd >= 0 && cmd < G_N_ELEMENTS(vg_cmd_str)) {
91 return vg_cmd_str[cmd];
92 } else {
93 return "unknown";
94 }
95 }
96
97 static int
vg_sock_fd_read(int sock,void * buf,ssize_t buflen)98 vg_sock_fd_read(int sock, void *buf, ssize_t buflen)
99 {
100 int ret;
101
102 do {
103 ret = read(sock, buf, buflen);
104 } while (ret < 0 && (errno == EINTR || errno == EAGAIN));
105
106 g_warn_if_fail(ret == buflen);
107 return ret;
108 }
109
110 static void
vg_sock_fd_close(VuGpu * g)111 vg_sock_fd_close(VuGpu *g)
112 {
113 if (g->sock_fd >= 0) {
114 close(g->sock_fd);
115 g->sock_fd = -1;
116 }
117 }
118
119 static gboolean
source_wait_cb(gint fd,GIOCondition condition,gpointer user_data)120 source_wait_cb(gint fd, GIOCondition condition, gpointer user_data)
121 {
122 VuGpu *g = user_data;
123
124 if (!vg_recv_msg(g, VHOST_USER_GPU_DMABUF_UPDATE, 0, NULL)) {
125 return G_SOURCE_CONTINUE;
126 }
127
128 /* resume */
129 g->wait_in = 0;
130 vg_handle_ctrl(&g->dev.parent, 0);
131
132 return G_SOURCE_REMOVE;
133 }
134
135 void
vg_wait_ok(VuGpu * g)136 vg_wait_ok(VuGpu *g)
137 {
138 assert(g->wait_in == 0);
139 g->wait_in = g_unix_fd_add(g->sock_fd, G_IO_IN | G_IO_HUP,
140 source_wait_cb, g);
141 }
142
143 static int
vg_sock_fd_write(int sock,const void * buf,ssize_t buflen,int fd)144 vg_sock_fd_write(int sock, const void *buf, ssize_t buflen, int fd)
145 {
146 ssize_t ret;
147 struct iovec iov = {
148 .iov_base = (void *)buf,
149 .iov_len = buflen,
150 };
151 struct msghdr msg = {
152 .msg_iov = &iov,
153 .msg_iovlen = 1,
154 };
155 union {
156 struct cmsghdr cmsghdr;
157 char control[CMSG_SPACE(sizeof(int))];
158 } cmsgu;
159 struct cmsghdr *cmsg;
160
161 if (fd != -1) {
162 msg.msg_control = cmsgu.control;
163 msg.msg_controllen = sizeof(cmsgu.control);
164
165 cmsg = CMSG_FIRSTHDR(&msg);
166 cmsg->cmsg_len = CMSG_LEN(sizeof(int));
167 cmsg->cmsg_level = SOL_SOCKET;
168 cmsg->cmsg_type = SCM_RIGHTS;
169
170 *((int *)CMSG_DATA(cmsg)) = fd;
171 }
172
173 do {
174 ret = sendmsg(sock, &msg, 0);
175 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
176
177 g_warn_if_fail(ret == buflen);
178 return ret;
179 }
180
181 void
vg_send_msg(VuGpu * vg,const VhostUserGpuMsg * msg,int fd)182 vg_send_msg(VuGpu *vg, const VhostUserGpuMsg *msg, int fd)
183 {
184 if (vg_sock_fd_write(vg->sock_fd, msg,
185 VHOST_USER_GPU_HDR_SIZE + msg->size, fd) < 0) {
186 vg_sock_fd_close(vg);
187 }
188 }
189
190 bool
vg_recv_msg(VuGpu * g,uint32_t expect_req,uint32_t expect_size,gpointer payload)191 vg_recv_msg(VuGpu *g, uint32_t expect_req, uint32_t expect_size,
192 gpointer payload)
193 {
194 uint32_t req, flags, size;
195
196 if (vg_sock_fd_read(g->sock_fd, &req, sizeof(req)) < 0 ||
197 vg_sock_fd_read(g->sock_fd, &flags, sizeof(flags)) < 0 ||
198 vg_sock_fd_read(g->sock_fd, &size, sizeof(size)) < 0) {
199 goto err;
200 }
201
202 g_return_val_if_fail(req == expect_req, false);
203 g_return_val_if_fail(flags & VHOST_USER_GPU_MSG_FLAG_REPLY, false);
204 g_return_val_if_fail(size == expect_size, false);
205
206 if (size && vg_sock_fd_read(g->sock_fd, payload, size) != size) {
207 goto err;
208 }
209
210 return true;
211
212 err:
213 vg_sock_fd_close(g);
214 return false;
215 }
216
217 static struct virtio_gpu_simple_resource *
virtio_gpu_find_resource(VuGpu * g,uint32_t resource_id)218 virtio_gpu_find_resource(VuGpu *g, uint32_t resource_id)
219 {
220 struct virtio_gpu_simple_resource *res;
221
222 QTAILQ_FOREACH(res, &g->reslist, next) {
223 if (res->resource_id == resource_id) {
224 return res;
225 }
226 }
227 return NULL;
228 }
229
230 void
vg_ctrl_response(VuGpu * g,struct virtio_gpu_ctrl_command * cmd,struct virtio_gpu_ctrl_hdr * resp,size_t resp_len)231 vg_ctrl_response(VuGpu *g,
232 struct virtio_gpu_ctrl_command *cmd,
233 struct virtio_gpu_ctrl_hdr *resp,
234 size_t resp_len)
235 {
236 size_t s;
237
238 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
239 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
240 resp->fence_id = cmd->cmd_hdr.fence_id;
241 resp->ctx_id = cmd->cmd_hdr.ctx_id;
242 }
243 virtio_gpu_ctrl_hdr_bswap(resp);
244 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
245 if (s != resp_len) {
246 g_critical("%s: response size incorrect %zu vs %zu",
247 __func__, s, resp_len);
248 }
249 vu_queue_push(&g->dev.parent, cmd->vq, &cmd->elem, s);
250 vu_queue_notify(&g->dev.parent, cmd->vq);
251 cmd->state = VG_CMD_STATE_FINISHED;
252 }
253
254 void
vg_ctrl_response_nodata(VuGpu * g,struct virtio_gpu_ctrl_command * cmd,enum virtio_gpu_ctrl_type type)255 vg_ctrl_response_nodata(VuGpu *g,
256 struct virtio_gpu_ctrl_command *cmd,
257 enum virtio_gpu_ctrl_type type)
258 {
259 struct virtio_gpu_ctrl_hdr resp = {
260 .type = type,
261 };
262
263 vg_ctrl_response(g, cmd, &resp, sizeof(resp));
264 }
265
266
267 static gboolean
get_display_info_cb(gint fd,GIOCondition condition,gpointer user_data)268 get_display_info_cb(gint fd, GIOCondition condition, gpointer user_data)
269 {
270 struct virtio_gpu_resp_display_info dpy_info = { {} };
271 VuGpu *vg = user_data;
272 struct virtio_gpu_ctrl_command *cmd = QTAILQ_LAST(&vg->fenceq);
273
274 g_debug("disp info cb");
275 assert(cmd->cmd_hdr.type == VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
276 if (!vg_recv_msg(vg, VHOST_USER_GPU_GET_DISPLAY_INFO,
277 sizeof(dpy_info), &dpy_info)) {
278 return G_SOURCE_CONTINUE;
279 }
280
281 QTAILQ_REMOVE(&vg->fenceq, cmd, next);
282 vg_ctrl_response(vg, cmd, &dpy_info.hdr, sizeof(dpy_info));
283
284 vg->wait_in = 0;
285 vg_handle_ctrl(&vg->dev.parent, 0);
286
287 return G_SOURCE_REMOVE;
288 }
289
290 void
vg_get_display_info(VuGpu * vg,struct virtio_gpu_ctrl_command * cmd)291 vg_get_display_info(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
292 {
293 VhostUserGpuMsg msg = {
294 .request = VHOST_USER_GPU_GET_DISPLAY_INFO,
295 .size = 0,
296 };
297
298 assert(vg->wait_in == 0);
299
300 vg_send_msg(vg, &msg, -1);
301 vg->wait_in = g_unix_fd_add(vg->sock_fd, G_IO_IN | G_IO_HUP,
302 get_display_info_cb, vg);
303 cmd->state = VG_CMD_STATE_PENDING;
304 }
305
306 static gboolean
get_edid_cb(gint fd,GIOCondition condition,gpointer user_data)307 get_edid_cb(gint fd, GIOCondition condition, gpointer user_data)
308 {
309 struct virtio_gpu_resp_edid resp_edid;
310 VuGpu *vg = user_data;
311 struct virtio_gpu_ctrl_command *cmd = QTAILQ_LAST(&vg->fenceq);
312
313 g_debug("get edid cb");
314 assert(cmd->cmd_hdr.type == VIRTIO_GPU_CMD_GET_EDID);
315 if (!vg_recv_msg(vg, VHOST_USER_GPU_GET_EDID,
316 sizeof(resp_edid), &resp_edid)) {
317 return G_SOURCE_CONTINUE;
318 }
319
320 QTAILQ_REMOVE(&vg->fenceq, cmd, next);
321 vg_ctrl_response(vg, cmd, &resp_edid.hdr, sizeof(resp_edid));
322
323 vg->wait_in = 0;
324 vg_handle_ctrl(&vg->dev.parent, 0);
325
326 return G_SOURCE_REMOVE;
327 }
328
329 void
vg_get_edid(VuGpu * vg,struct virtio_gpu_ctrl_command * cmd)330 vg_get_edid(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
331 {
332 struct virtio_gpu_cmd_get_edid get_edid;
333
334 VUGPU_FILL_CMD(get_edid);
335 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
336
337 VhostUserGpuMsg msg = {
338 .request = VHOST_USER_GPU_GET_EDID,
339 .size = sizeof(VhostUserGpuEdidRequest),
340 .payload.edid_req = {
341 .scanout_id = get_edid.scanout,
342 },
343 };
344
345 assert(vg->wait_in == 0);
346
347 vg_send_msg(vg, &msg, -1);
348 vg->wait_in = g_unix_fd_add(vg->sock_fd, G_IO_IN | G_IO_HUP,
349 get_edid_cb, vg);
350 cmd->state = VG_CMD_STATE_PENDING;
351 }
352
353 static void
vg_resource_create_2d(VuGpu * g,struct virtio_gpu_ctrl_command * cmd)354 vg_resource_create_2d(VuGpu *g,
355 struct virtio_gpu_ctrl_command *cmd)
356 {
357 pixman_format_code_t pformat;
358 struct virtio_gpu_simple_resource *res;
359 struct virtio_gpu_resource_create_2d c2d;
360
361 VUGPU_FILL_CMD(c2d);
362 virtio_gpu_bswap_32(&c2d, sizeof(c2d));
363
364 if (c2d.resource_id == 0) {
365 g_critical("%s: resource id 0 is not allowed", __func__);
366 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
367 return;
368 }
369
370 res = virtio_gpu_find_resource(g, c2d.resource_id);
371 if (res) {
372 g_critical("%s: resource already exists %d", __func__, c2d.resource_id);
373 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
374 return;
375 }
376
377 res = g_new0(struct virtio_gpu_simple_resource, 1);
378 res->width = c2d.width;
379 res->height = c2d.height;
380 res->format = c2d.format;
381 res->resource_id = c2d.resource_id;
382
383 pformat = virtio_gpu_get_pixman_format(c2d.format);
384 if (!pformat) {
385 g_critical("%s: host couldn't handle guest format %d",
386 __func__, c2d.format);
387 g_free(res);
388 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
389 return;
390 }
391 vugbm_buffer_create(&res->buffer, &g->gdev, c2d.width, c2d.height);
392 res->image = pixman_image_create_bits(pformat,
393 c2d.width,
394 c2d.height,
395 (uint32_t *)res->buffer.mmap,
396 res->buffer.stride);
397 if (!res->image) {
398 g_critical("%s: resource creation failed %d %d %d",
399 __func__, c2d.resource_id, c2d.width, c2d.height);
400 vugbm_buffer_destroy(&res->buffer);
401 g_free(res);
402 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
403 return;
404 }
405
406 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
407 }
408
409 static void
vg_disable_scanout(VuGpu * g,int scanout_id)410 vg_disable_scanout(VuGpu *g, int scanout_id)
411 {
412 struct virtio_gpu_scanout *scanout = &g->scanout[scanout_id];
413 struct virtio_gpu_simple_resource *res;
414
415 if (scanout->resource_id == 0) {
416 return;
417 }
418
419 res = virtio_gpu_find_resource(g, scanout->resource_id);
420 if (res) {
421 res->scanout_bitmask &= ~(1 << scanout_id);
422 }
423
424 scanout->width = 0;
425 scanout->height = 0;
426
427 if (g->sock_fd >= 0) {
428 VhostUserGpuMsg msg = {
429 .request = VHOST_USER_GPU_SCANOUT,
430 .size = sizeof(VhostUserGpuScanout),
431 .payload.scanout.scanout_id = scanout_id,
432 };
433 vg_send_msg(g, &msg, -1);
434 }
435 }
436
437 static void
vg_resource_destroy(VuGpu * g,struct virtio_gpu_simple_resource * res)438 vg_resource_destroy(VuGpu *g,
439 struct virtio_gpu_simple_resource *res)
440 {
441 int i;
442
443 if (res->scanout_bitmask) {
444 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
445 if (res->scanout_bitmask & (1 << i)) {
446 vg_disable_scanout(g, i);
447 }
448 }
449 }
450
451 vugbm_buffer_destroy(&res->buffer);
452 vg_cleanup_mapping(g, res);
453 pixman_image_unref(res->image);
454 QTAILQ_REMOVE(&g->reslist, res, next);
455 g_free(res);
456 }
457
458 static void
vg_resource_unref(VuGpu * g,struct virtio_gpu_ctrl_command * cmd)459 vg_resource_unref(VuGpu *g,
460 struct virtio_gpu_ctrl_command *cmd)
461 {
462 struct virtio_gpu_simple_resource *res;
463 struct virtio_gpu_resource_unref unref;
464
465 VUGPU_FILL_CMD(unref);
466 virtio_gpu_bswap_32(&unref, sizeof(unref));
467
468 res = virtio_gpu_find_resource(g, unref.resource_id);
469 if (!res) {
470 g_critical("%s: illegal resource specified %d",
471 __func__, unref.resource_id);
472 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
473 return;
474 }
475 vg_resource_destroy(g, res);
476 }
477
478 int
vg_create_mapping_iov(VuGpu * g,struct virtio_gpu_resource_attach_backing * ab,struct virtio_gpu_ctrl_command * cmd,struct iovec ** iov)479 vg_create_mapping_iov(VuGpu *g,
480 struct virtio_gpu_resource_attach_backing *ab,
481 struct virtio_gpu_ctrl_command *cmd,
482 struct iovec **iov)
483 {
484 struct virtio_gpu_mem_entry *ents;
485 size_t esize, s;
486 int i;
487
488 if (ab->nr_entries > 16384) {
489 g_critical("%s: nr_entries is too big (%d > 16384)",
490 __func__, ab->nr_entries);
491 return -1;
492 }
493
494 esize = sizeof(*ents) * ab->nr_entries;
495 ents = g_malloc(esize);
496 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
497 sizeof(*ab), ents, esize);
498 if (s != esize) {
499 g_critical("%s: command data size incorrect %zu vs %zu",
500 __func__, s, esize);
501 g_free(ents);
502 return -1;
503 }
504
505 *iov = g_new0(struct iovec, ab->nr_entries);
506 for (i = 0; i < ab->nr_entries; i++) {
507 uint64_t len = ents[i].length;
508 (*iov)[i].iov_len = ents[i].length;
509 (*iov)[i].iov_base = vu_gpa_to_va(&g->dev.parent, &len, ents[i].addr);
510 if (!(*iov)[i].iov_base || len != ents[i].length) {
511 g_critical("%s: resource %d element %d",
512 __func__, ab->resource_id, i);
513 g_free(*iov);
514 g_free(ents);
515 *iov = NULL;
516 return -1;
517 }
518 }
519 g_free(ents);
520 return 0;
521 }
522
523 static void
vg_resource_attach_backing(VuGpu * g,struct virtio_gpu_ctrl_command * cmd)524 vg_resource_attach_backing(VuGpu *g,
525 struct virtio_gpu_ctrl_command *cmd)
526 {
527 struct virtio_gpu_simple_resource *res;
528 struct virtio_gpu_resource_attach_backing ab;
529 int ret;
530
531 VUGPU_FILL_CMD(ab);
532 virtio_gpu_bswap_32(&ab, sizeof(ab));
533
534 res = virtio_gpu_find_resource(g, ab.resource_id);
535 if (!res) {
536 g_critical("%s: illegal resource specified %d",
537 __func__, ab.resource_id);
538 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
539 return;
540 }
541
542 if (res->iov) {
543 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
544 return;
545 }
546
547 ret = vg_create_mapping_iov(g, &ab, cmd, &res->iov);
548 if (ret != 0) {
549 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
550 return;
551 }
552
553 res->iov_cnt = ab.nr_entries;
554 }
555
556 /* Though currently only free iov, maybe later will do more work. */
vg_cleanup_mapping_iov(VuGpu * g,struct iovec * iov,uint32_t count)557 void vg_cleanup_mapping_iov(VuGpu *g,
558 struct iovec *iov, uint32_t count)
559 {
560 g_free(iov);
561 }
562
563 static void
vg_cleanup_mapping(VuGpu * g,struct virtio_gpu_simple_resource * res)564 vg_cleanup_mapping(VuGpu *g,
565 struct virtio_gpu_simple_resource *res)
566 {
567 vg_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
568 res->iov = NULL;
569 res->iov_cnt = 0;
570 }
571
572 static void
vg_resource_detach_backing(VuGpu * g,struct virtio_gpu_ctrl_command * cmd)573 vg_resource_detach_backing(VuGpu *g,
574 struct virtio_gpu_ctrl_command *cmd)
575 {
576 struct virtio_gpu_simple_resource *res;
577 struct virtio_gpu_resource_detach_backing detach;
578
579 VUGPU_FILL_CMD(detach);
580 virtio_gpu_bswap_32(&detach, sizeof(detach));
581
582 res = virtio_gpu_find_resource(g, detach.resource_id);
583 if (!res || !res->iov) {
584 g_critical("%s: illegal resource specified %d",
585 __func__, detach.resource_id);
586 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
587 return;
588 }
589
590 vg_cleanup_mapping(g, res);
591 }
592
593 static void
vg_transfer_to_host_2d(VuGpu * g,struct virtio_gpu_ctrl_command * cmd)594 vg_transfer_to_host_2d(VuGpu *g,
595 struct virtio_gpu_ctrl_command *cmd)
596 {
597 struct virtio_gpu_simple_resource *res;
598 int h;
599 uint32_t src_offset, dst_offset, stride;
600 int bpp;
601 pixman_format_code_t format;
602 struct virtio_gpu_transfer_to_host_2d t2d;
603
604 VUGPU_FILL_CMD(t2d);
605 virtio_gpu_t2d_bswap(&t2d);
606
607 res = virtio_gpu_find_resource(g, t2d.resource_id);
608 if (!res || !res->iov) {
609 g_critical("%s: illegal resource specified %d",
610 __func__, t2d.resource_id);
611 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
612 return;
613 }
614
615 if (t2d.r.x > res->width ||
616 t2d.r.y > res->height ||
617 t2d.r.width > res->width ||
618 t2d.r.height > res->height ||
619 t2d.r.x + t2d.r.width > res->width ||
620 t2d.r.y + t2d.r.height > res->height) {
621 g_critical("%s: transfer bounds outside resource"
622 " bounds for resource %d: %d %d %d %d vs %d %d",
623 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
624 t2d.r.width, t2d.r.height, res->width, res->height);
625 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
626 return;
627 }
628
629 format = pixman_image_get_format(res->image);
630 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
631 stride = pixman_image_get_stride(res->image);
632
633 if (t2d.offset || t2d.r.x || t2d.r.y ||
634 t2d.r.width != pixman_image_get_width(res->image)) {
635 void *img_data = pixman_image_get_data(res->image);
636 for (h = 0; h < t2d.r.height; h++) {
637 src_offset = t2d.offset + stride * h;
638 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
639
640 iov_to_buf(res->iov, res->iov_cnt, src_offset,
641 img_data
642 + dst_offset, t2d.r.width * bpp);
643 }
644 } else {
645 iov_to_buf(res->iov, res->iov_cnt, 0,
646 pixman_image_get_data(res->image),
647 pixman_image_get_stride(res->image)
648 * pixman_image_get_height(res->image));
649 }
650 }
651
652 static void
vg_set_scanout(VuGpu * g,struct virtio_gpu_ctrl_command * cmd)653 vg_set_scanout(VuGpu *g,
654 struct virtio_gpu_ctrl_command *cmd)
655 {
656 struct virtio_gpu_simple_resource *res, *ores;
657 struct virtio_gpu_scanout *scanout;
658 struct virtio_gpu_set_scanout ss;
659 int fd;
660
661 VUGPU_FILL_CMD(ss);
662 virtio_gpu_bswap_32(&ss, sizeof(ss));
663
664 if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUTS) {
665 g_critical("%s: illegal scanout id specified %d",
666 __func__, ss.scanout_id);
667 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
668 return;
669 }
670
671 if (ss.resource_id == 0) {
672 vg_disable_scanout(g, ss.scanout_id);
673 return;
674 }
675
676 /* create a surface for this scanout */
677 res = virtio_gpu_find_resource(g, ss.resource_id);
678 if (!res) {
679 g_critical("%s: illegal resource specified %d",
680 __func__, ss.resource_id);
681 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
682 return;
683 }
684
685 if (ss.r.x > res->width ||
686 ss.r.y > res->height ||
687 ss.r.width > res->width ||
688 ss.r.height > res->height ||
689 ss.r.x + ss.r.width > res->width ||
690 ss.r.y + ss.r.height > res->height) {
691 g_critical("%s: illegal scanout %d bounds for"
692 " resource %d, (%d,%d)+%d,%d vs %d %d",
693 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
694 ss.r.width, ss.r.height, res->width, res->height);
695 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
696 return;
697 }
698
699 scanout = &g->scanout[ss.scanout_id];
700
701 ores = virtio_gpu_find_resource(g, scanout->resource_id);
702 if (ores) {
703 ores->scanout_bitmask &= ~(1 << ss.scanout_id);
704 }
705
706 res->scanout_bitmask |= (1 << ss.scanout_id);
707 scanout->resource_id = ss.resource_id;
708 scanout->x = ss.r.x;
709 scanout->y = ss.r.y;
710 scanout->width = ss.r.width;
711 scanout->height = ss.r.height;
712
713 struct vugbm_buffer *buffer = &res->buffer;
714
715 if (vugbm_buffer_can_get_dmabuf_fd(buffer)) {
716 VhostUserGpuMsg msg = {
717 .request = VHOST_USER_GPU_DMABUF_SCANOUT,
718 .size = sizeof(VhostUserGpuDMABUFScanout),
719 .payload.dmabuf_scanout = (VhostUserGpuDMABUFScanout) {
720 .scanout_id = ss.scanout_id,
721 .x = ss.r.x,
722 .y = ss.r.y,
723 .width = ss.r.width,
724 .height = ss.r.height,
725 .fd_width = buffer->width,
726 .fd_height = buffer->height,
727 .fd_stride = buffer->stride,
728 .fd_drm_fourcc = buffer->format
729 }
730 };
731
732 if (vugbm_buffer_get_dmabuf_fd(buffer, &fd)) {
733 vg_send_msg(g, &msg, fd);
734 close(fd);
735 }
736 } else {
737 VhostUserGpuMsg msg = {
738 .request = VHOST_USER_GPU_SCANOUT,
739 .size = sizeof(VhostUserGpuScanout),
740 .payload.scanout = (VhostUserGpuScanout) {
741 .scanout_id = ss.scanout_id,
742 .width = scanout->width,
743 .height = scanout->height
744 }
745 };
746 vg_send_msg(g, &msg, -1);
747 }
748 }
749
750 static void
vg_resource_flush(VuGpu * g,struct virtio_gpu_ctrl_command * cmd)751 vg_resource_flush(VuGpu *g,
752 struct virtio_gpu_ctrl_command *cmd)
753 {
754 struct virtio_gpu_simple_resource *res;
755 struct virtio_gpu_resource_flush rf;
756 pixman_region16_t flush_region;
757 int i;
758
759 VUGPU_FILL_CMD(rf);
760 virtio_gpu_bswap_32(&rf, sizeof(rf));
761
762 res = virtio_gpu_find_resource(g, rf.resource_id);
763 if (!res) {
764 g_critical("%s: illegal resource specified %d\n",
765 __func__, rf.resource_id);
766 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
767 return;
768 }
769
770 if (rf.r.x > res->width ||
771 rf.r.y > res->height ||
772 rf.r.width > res->width ||
773 rf.r.height > res->height ||
774 rf.r.x + rf.r.width > res->width ||
775 rf.r.y + rf.r.height > res->height) {
776 g_critical("%s: flush bounds outside resource"
777 " bounds for resource %d: %d %d %d %d vs %d %d\n",
778 __func__, rf.resource_id, rf.r.x, rf.r.y,
779 rf.r.width, rf.r.height, res->width, res->height);
780 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
781 return;
782 }
783
784 pixman_region_init_rect(&flush_region,
785 rf.r.x, rf.r.y, rf.r.width, rf.r.height);
786 for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
787 struct virtio_gpu_scanout *scanout;
788 pixman_region16_t region, finalregion;
789 pixman_box16_t *extents;
790
791 if (!(res->scanout_bitmask & (1 << i))) {
792 continue;
793 }
794 scanout = &g->scanout[i];
795
796 pixman_region_init(&finalregion);
797 pixman_region_init_rect(®ion, scanout->x, scanout->y,
798 scanout->width, scanout->height);
799
800 pixman_region_intersect(&finalregion, &flush_region, ®ion);
801
802 extents = pixman_region_extents(&finalregion);
803 size_t width = extents->x2 - extents->x1;
804 size_t height = extents->y2 - extents->y1;
805
806 if (vugbm_buffer_can_get_dmabuf_fd(&res->buffer)) {
807 VhostUserGpuMsg vmsg = {
808 .request = VHOST_USER_GPU_DMABUF_UPDATE,
809 .size = sizeof(VhostUserGpuUpdate),
810 .payload.update = (VhostUserGpuUpdate) {
811 .scanout_id = i,
812 .x = extents->x1,
813 .y = extents->y1,
814 .width = width,
815 .height = height,
816 }
817 };
818 vg_send_msg(g, &vmsg, -1);
819 vg_wait_ok(g);
820 } else {
821 size_t bpp =
822 PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) / 8;
823 size_t size = width * height * bpp;
824
825 void *p = g_malloc(VHOST_USER_GPU_HDR_SIZE +
826 sizeof(VhostUserGpuUpdate) + size);
827 VhostUserGpuMsg *msg = p;
828 msg->request = VHOST_USER_GPU_UPDATE;
829 msg->size = sizeof(VhostUserGpuUpdate) + size;
830 msg->payload.update = (VhostUserGpuUpdate) {
831 .scanout_id = i,
832 .x = extents->x1,
833 .y = extents->y1,
834 .width = width,
835 .height = height,
836 };
837 pixman_image_t *img =
838 pixman_image_create_bits(pixman_image_get_format(res->image),
839 msg->payload.update.width,
840 msg->payload.update.height,
841 p + offsetof(VhostUserGpuMsg,
842 payload.update.data),
843 width * bpp);
844 pixman_image_composite(PIXMAN_OP_SRC,
845 res->image, NULL, img,
846 extents->x1, extents->y1,
847 0, 0, 0, 0,
848 width, height);
849 pixman_image_unref(img);
850 vg_send_msg(g, msg, -1);
851 g_free(msg);
852 }
853 pixman_region_fini(®ion);
854 pixman_region_fini(&finalregion);
855 }
856 pixman_region_fini(&flush_region);
857 }
858
859 static void
vg_process_cmd(VuGpu * vg,struct virtio_gpu_ctrl_command * cmd)860 vg_process_cmd(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
861 {
862 switch (cmd->cmd_hdr.type) {
863 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
864 vg_get_display_info(vg, cmd);
865 break;
866 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
867 vg_resource_create_2d(vg, cmd);
868 break;
869 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
870 vg_resource_unref(vg, cmd);
871 break;
872 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
873 vg_resource_flush(vg, cmd);
874 break;
875 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
876 vg_transfer_to_host_2d(vg, cmd);
877 break;
878 case VIRTIO_GPU_CMD_SET_SCANOUT:
879 vg_set_scanout(vg, cmd);
880 break;
881 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
882 vg_resource_attach_backing(vg, cmd);
883 break;
884 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
885 vg_resource_detach_backing(vg, cmd);
886 break;
887 case VIRTIO_GPU_CMD_GET_EDID:
888 vg_get_edid(vg, cmd);
889 break;
890 default:
891 g_warning("TODO handle ctrl %x\n", cmd->cmd_hdr.type);
892 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
893 break;
894 }
895 if (cmd->state == VG_CMD_STATE_NEW) {
896 vg_ctrl_response_nodata(vg, cmd, cmd->error ? cmd->error :
897 VIRTIO_GPU_RESP_OK_NODATA);
898 }
899 }
900
901 static void
vg_handle_ctrl(VuDev * dev,int qidx)902 vg_handle_ctrl(VuDev *dev, int qidx)
903 {
904 VuGpu *vg = container_of(dev, VuGpu, dev.parent);
905 VuVirtq *vq = vu_get_queue(dev, qidx);
906 struct virtio_gpu_ctrl_command *cmd = NULL;
907 size_t len;
908
909 for (;;) {
910 if (vg->wait_in != 0) {
911 return;
912 }
913
914 cmd = vu_queue_pop(dev, vq, sizeof(struct virtio_gpu_ctrl_command));
915 if (!cmd) {
916 break;
917 }
918 cmd->vq = vq;
919 cmd->error = 0;
920 cmd->state = VG_CMD_STATE_NEW;
921
922 len = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
923 0, &cmd->cmd_hdr, sizeof(cmd->cmd_hdr));
924 if (len != sizeof(cmd->cmd_hdr)) {
925 g_warning("%s: command size incorrect %zu vs %zu\n",
926 __func__, len, sizeof(cmd->cmd_hdr));
927 }
928
929 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
930 g_debug("%d %s\n", cmd->cmd_hdr.type,
931 vg_cmd_to_string(cmd->cmd_hdr.type));
932
933 if (vg->virgl) {
934 vg_virgl_process_cmd(vg, cmd);
935 } else {
936 vg_process_cmd(vg, cmd);
937 }
938
939 if (cmd->state != VG_CMD_STATE_FINISHED) {
940 QTAILQ_INSERT_TAIL(&vg->fenceq, cmd, next);
941 vg->inflight++;
942 } else {
943 free(cmd);
944 }
945 }
946 }
947
948 static void
update_cursor_data_simple(VuGpu * g,uint32_t resource_id,gpointer data)949 update_cursor_data_simple(VuGpu *g, uint32_t resource_id, gpointer data)
950 {
951 struct virtio_gpu_simple_resource *res;
952
953 res = virtio_gpu_find_resource(g, resource_id);
954 g_return_if_fail(res != NULL);
955 g_return_if_fail(pixman_image_get_width(res->image) == 64);
956 g_return_if_fail(pixman_image_get_height(res->image) == 64);
957 g_return_if_fail(
958 PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) == 32);
959
960 memcpy(data, pixman_image_get_data(res->image), 64 * 64 * sizeof(uint32_t));
961 }
962
963 static void
vg_process_cursor_cmd(VuGpu * g,struct virtio_gpu_update_cursor * cursor)964 vg_process_cursor_cmd(VuGpu *g, struct virtio_gpu_update_cursor *cursor)
965 {
966 switch (cursor->hdr.type) {
967 case VIRTIO_GPU_CMD_MOVE_CURSOR: {
968 VhostUserGpuMsg msg = {
969 .request = cursor->resource_id ?
970 VHOST_USER_GPU_CURSOR_POS : VHOST_USER_GPU_CURSOR_POS_HIDE,
971 .size = sizeof(VhostUserGpuCursorPos),
972 .payload.cursor_pos = {
973 .scanout_id = cursor->pos.scanout_id,
974 .x = cursor->pos.x,
975 .y = cursor->pos.y,
976 }
977 };
978 g_debug("%s: move", G_STRFUNC);
979 vg_send_msg(g, &msg, -1);
980 break;
981 }
982 case VIRTIO_GPU_CMD_UPDATE_CURSOR: {
983 VhostUserGpuMsg msg = {
984 .request = VHOST_USER_GPU_CURSOR_UPDATE,
985 .size = sizeof(VhostUserGpuCursorUpdate),
986 .payload.cursor_update = {
987 .pos = {
988 .scanout_id = cursor->pos.scanout_id,
989 .x = cursor->pos.x,
990 .y = cursor->pos.y,
991 },
992 .hot_x = cursor->hot_x,
993 .hot_y = cursor->hot_y,
994 }
995 };
996 g_debug("%s: update", G_STRFUNC);
997 if (g->virgl) {
998 vg_virgl_update_cursor_data(g, cursor->resource_id,
999 msg.payload.cursor_update.data);
1000 } else {
1001 update_cursor_data_simple(g, cursor->resource_id,
1002 msg.payload.cursor_update.data);
1003 }
1004 vg_send_msg(g, &msg, -1);
1005 break;
1006 }
1007 default:
1008 g_debug("%s: unknown cmd %d", G_STRFUNC, cursor->hdr.type);
1009 break;
1010 }
1011 }
1012
1013 static void
vg_handle_cursor(VuDev * dev,int qidx)1014 vg_handle_cursor(VuDev *dev, int qidx)
1015 {
1016 VuGpu *g = container_of(dev, VuGpu, dev.parent);
1017 VuVirtq *vq = vu_get_queue(dev, qidx);
1018 VuVirtqElement *elem;
1019 size_t len;
1020 struct virtio_gpu_update_cursor cursor;
1021
1022 for (;;) {
1023 elem = vu_queue_pop(dev, vq, sizeof(VuVirtqElement));
1024 if (!elem) {
1025 break;
1026 }
1027 g_debug("cursor out:%d in:%d\n", elem->out_num, elem->in_num);
1028
1029 len = iov_to_buf(elem->out_sg, elem->out_num,
1030 0, &cursor, sizeof(cursor));
1031 if (len != sizeof(cursor)) {
1032 g_warning("%s: cursor size incorrect %zu vs %zu\n",
1033 __func__, len, sizeof(cursor));
1034 } else {
1035 virtio_gpu_bswap_32(&cursor, sizeof(cursor));
1036 vg_process_cursor_cmd(g, &cursor);
1037 }
1038 vu_queue_push(dev, vq, elem, 0);
1039 vu_queue_notify(dev, vq);
1040 free(elem);
1041 }
1042 }
1043
1044 static void
vg_panic(VuDev * dev,const char * msg)1045 vg_panic(VuDev *dev, const char *msg)
1046 {
1047 g_critical("%s\n", msg);
1048 exit(1);
1049 }
1050
1051 static void
vg_queue_set_started(VuDev * dev,int qidx,bool started)1052 vg_queue_set_started(VuDev *dev, int qidx, bool started)
1053 {
1054 VuVirtq *vq = vu_get_queue(dev, qidx);
1055
1056 g_debug("queue started %d:%d\n", qidx, started);
1057
1058 switch (qidx) {
1059 case 0:
1060 vu_set_queue_handler(dev, vq, started ? vg_handle_ctrl : NULL);
1061 break;
1062 case 1:
1063 vu_set_queue_handler(dev, vq, started ? vg_handle_cursor : NULL);
1064 break;
1065 default:
1066 break;
1067 }
1068 }
1069
1070 static gboolean
protocol_features_cb(gint fd,GIOCondition condition,gpointer user_data)1071 protocol_features_cb(gint fd, GIOCondition condition, gpointer user_data)
1072 {
1073 const uint64_t protocol_edid = (1 << VHOST_USER_GPU_PROTOCOL_F_EDID);
1074 const uint64_t protocol_dmabuf2 = (1 << VHOST_USER_GPU_PROTOCOL_F_DMABUF2);
1075 VuGpu *g = user_data;
1076 uint64_t protocol_features;
1077 VhostUserGpuMsg msg = {
1078 .request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES
1079 };
1080
1081 if (!vg_recv_msg(g, msg.request,
1082 sizeof(protocol_features), &protocol_features)) {
1083 return G_SOURCE_CONTINUE;
1084 }
1085
1086 protocol_features &= (protocol_edid | protocol_dmabuf2);
1087
1088 msg = (VhostUserGpuMsg) {
1089 .request = VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
1090 .size = sizeof(uint64_t),
1091 .payload.u64 = protocol_features,
1092 };
1093 vg_send_msg(g, &msg, -1);
1094
1095 g->wait_in = 0;
1096 vg_handle_ctrl(&g->dev.parent, 0);
1097
1098 if (g->edid_inited && !(protocol_features & protocol_edid)) {
1099 g_printerr("EDID feature set by the frontend but it does not support "
1100 "the EDID vhost-user-gpu protocol.\n");
1101 exit(EXIT_FAILURE);
1102 }
1103
1104 g->use_modifiers = !!(protocol_features & protocol_dmabuf2);
1105
1106 return G_SOURCE_REMOVE;
1107 }
1108
1109 static void
set_gpu_protocol_features(VuGpu * g)1110 set_gpu_protocol_features(VuGpu *g)
1111 {
1112 VhostUserGpuMsg msg = {
1113 .request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES,
1114 };
1115
1116 vg_send_msg(g, &msg, -1);
1117 assert(g->wait_in == 0);
1118 g->wait_in = g_unix_fd_add(g->sock_fd, G_IO_IN | G_IO_HUP,
1119 protocol_features_cb, g);
1120 }
1121
1122 static int
vg_process_msg(VuDev * dev,VhostUserMsg * msg,int * do_reply)1123 vg_process_msg(VuDev *dev, VhostUserMsg *msg, int *do_reply)
1124 {
1125 VuGpu *g = container_of(dev, VuGpu, dev.parent);
1126
1127 switch (msg->request) {
1128 case VHOST_USER_GPU_SET_SOCKET: {
1129 g_return_val_if_fail(msg->fd_num == 1, 1);
1130 g_return_val_if_fail(g->sock_fd == -1, 1);
1131 g->sock_fd = msg->fds[0];
1132 set_gpu_protocol_features(g);
1133 return 1;
1134 }
1135 default:
1136 return 0;
1137 }
1138
1139 return 0;
1140 }
1141
1142 static uint64_t
vg_get_features(VuDev * dev)1143 vg_get_features(VuDev *dev)
1144 {
1145 uint64_t features = 0;
1146
1147 if (opt_virgl) {
1148 features |= 1 << VIRTIO_GPU_F_VIRGL;
1149 }
1150 features |= 1 << VIRTIO_GPU_F_EDID;
1151
1152 return features;
1153 }
1154
1155 static void
vg_set_features(VuDev * dev,uint64_t features)1156 vg_set_features(VuDev *dev, uint64_t features)
1157 {
1158 VuGpu *g = container_of(dev, VuGpu, dev.parent);
1159 bool virgl = features & (1 << VIRTIO_GPU_F_VIRGL);
1160
1161 if (virgl && !g->virgl_inited) {
1162 if (!vg_virgl_init(g)) {
1163 vg_panic(dev, "Failed to initialize virgl");
1164 }
1165 g->virgl_inited = true;
1166 }
1167
1168 g->edid_inited = !!(features & (1 << VIRTIO_GPU_F_EDID));
1169
1170 g->virgl = virgl;
1171 }
1172
1173 static int
vg_get_config(VuDev * dev,uint8_t * config,uint32_t len)1174 vg_get_config(VuDev *dev, uint8_t *config, uint32_t len)
1175 {
1176 VuGpu *g = container_of(dev, VuGpu, dev.parent);
1177
1178 if (len > sizeof(struct virtio_gpu_config)) {
1179 return -1;
1180 }
1181
1182 if (opt_virgl) {
1183 g->virtio_config.num_capsets = vg_virgl_get_num_capsets();
1184 }
1185
1186 memcpy(config, &g->virtio_config, len);
1187
1188 return 0;
1189 }
1190
1191 static int
vg_set_config(VuDev * dev,const uint8_t * data,uint32_t offset,uint32_t size,uint32_t flags)1192 vg_set_config(VuDev *dev, const uint8_t *data,
1193 uint32_t offset, uint32_t size,
1194 uint32_t flags)
1195 {
1196 VuGpu *g = container_of(dev, VuGpu, dev.parent);
1197 struct virtio_gpu_config *config = (struct virtio_gpu_config *)data;
1198
1199 if (config->events_clear) {
1200 g->virtio_config.events_read &= ~config->events_clear;
1201 }
1202
1203 return 0;
1204 }
1205
1206 static const VuDevIface vuiface = {
1207 .set_features = vg_set_features,
1208 .get_features = vg_get_features,
1209 .queue_set_started = vg_queue_set_started,
1210 .process_msg = vg_process_msg,
1211 .get_config = vg_get_config,
1212 .set_config = vg_set_config,
1213 };
1214
1215 static void
vg_destroy(VuGpu * g)1216 vg_destroy(VuGpu *g)
1217 {
1218 struct virtio_gpu_simple_resource *res, *tmp;
1219
1220 vug_deinit(&g->dev);
1221
1222 vg_sock_fd_close(g);
1223
1224 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1225 vg_resource_destroy(g, res);
1226 }
1227
1228 vugbm_device_destroy(&g->gdev);
1229 }
1230
1231 static GOptionEntry entries[] = {
1232 { "print-capabilities", 'c', 0, G_OPTION_ARG_NONE, &opt_print_caps,
1233 "Print capabilities", NULL },
1234 { "fd", 'f', 0, G_OPTION_ARG_INT, &opt_fdnum,
1235 "Use inherited fd socket", "FDNUM" },
1236 { "socket-path", 's', 0, G_OPTION_ARG_FILENAME, &opt_socket_path,
1237 "Use UNIX socket path", "PATH" },
1238 { "render-node", 'r', 0, G_OPTION_ARG_FILENAME, &opt_render_node,
1239 "Specify DRM render node", "PATH" },
1240 { "virgl", 'v', 0, G_OPTION_ARG_NONE, &opt_virgl,
1241 "Turn virgl rendering on", NULL },
1242 { NULL, }
1243 };
1244
1245 int
main(int argc,char * argv[])1246 main(int argc, char *argv[])
1247 {
1248 GOptionContext *context;
1249 GError *error = NULL;
1250 GMainLoop *loop = NULL;
1251 int fd;
1252 VuGpu g = { .sock_fd = -1, .drm_rnode_fd = -1 };
1253
1254 QTAILQ_INIT(&g.reslist);
1255 QTAILQ_INIT(&g.fenceq);
1256
1257 context = g_option_context_new("QEMU vhost-user-gpu");
1258 g_option_context_add_main_entries(context, entries, NULL);
1259 if (!g_option_context_parse(context, &argc, &argv, &error)) {
1260 g_printerr("Option parsing failed: %s\n", error->message);
1261 exit(EXIT_FAILURE);
1262 }
1263 g_option_context_free(context);
1264
1265 if (opt_print_caps) {
1266 g_print("{\n");
1267 g_print(" \"type\": \"gpu\",\n");
1268 g_print(" \"features\": [\n");
1269 g_print(" \"render-node\",\n");
1270 g_print(" \"virgl\"\n");
1271 g_print(" ]\n");
1272 g_print("}\n");
1273 exit(EXIT_SUCCESS);
1274 }
1275
1276 g.drm_rnode_fd = qemu_drm_rendernode_open(opt_render_node);
1277 if (opt_render_node && g.drm_rnode_fd == -1) {
1278 g_printerr("Failed to open DRM rendernode.\n");
1279 exit(EXIT_FAILURE);
1280 }
1281
1282 vugbm_device_init(&g.gdev, g.drm_rnode_fd);
1283
1284 if ((!!opt_socket_path + (opt_fdnum != -1)) != 1) {
1285 g_printerr("Please specify either --fd or --socket-path\n");
1286 exit(EXIT_FAILURE);
1287 }
1288
1289 if (opt_socket_path) {
1290 int lsock = unix_listen(opt_socket_path, &error_fatal);
1291 if (lsock < 0) {
1292 g_printerr("Failed to listen on %s.\n", opt_socket_path);
1293 exit(EXIT_FAILURE);
1294 }
1295 fd = accept(lsock, NULL, NULL);
1296 close(lsock);
1297 } else {
1298 fd = opt_fdnum;
1299 }
1300 if (fd == -1) {
1301 g_printerr("Invalid vhost-user socket.\n");
1302 exit(EXIT_FAILURE);
1303 }
1304
1305 if (!vug_init(&g.dev, VHOST_USER_GPU_MAX_QUEUES, fd, vg_panic, &vuiface)) {
1306 g_printerr("Failed to initialize libvhost-user-glib.\n");
1307 exit(EXIT_FAILURE);
1308 }
1309
1310 loop = g_main_loop_new(NULL, FALSE);
1311 g_main_loop_run(loop);
1312 g_main_loop_unref(loop);
1313
1314 vg_destroy(&g);
1315 if (g.drm_rnode_fd >= 0) {
1316 close(g.drm_rnode_fd);
1317 }
1318
1319 return 0;
1320 }
1321