1 /*
2 * vhost-user GPU Device
3 *
4 * Copyright Red Hat, Inc. 2018
5 *
6 * Authors:
7 * Marc-André Lureau <marcandre.lureau@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 */
12
13 #include "qemu/osdep.h"
14 #include "qemu/error-report.h"
15 #include "qemu/sockets.h"
16 #include "hw/qdev-properties.h"
17 #include "hw/virtio/virtio-gpu.h"
18 #include "chardev/char-fe.h"
19 #include "qapi/error.h"
20 #include "migration/blocker.h"
21
22 typedef enum VhostUserGpuRequest {
23 VHOST_USER_GPU_NONE = 0,
24 VHOST_USER_GPU_GET_PROTOCOL_FEATURES,
25 VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
26 VHOST_USER_GPU_GET_DISPLAY_INFO,
27 VHOST_USER_GPU_CURSOR_POS,
28 VHOST_USER_GPU_CURSOR_POS_HIDE,
29 VHOST_USER_GPU_CURSOR_UPDATE,
30 VHOST_USER_GPU_SCANOUT,
31 VHOST_USER_GPU_UPDATE,
32 VHOST_USER_GPU_DMABUF_SCANOUT,
33 VHOST_USER_GPU_DMABUF_UPDATE,
34 VHOST_USER_GPU_GET_EDID,
35 VHOST_USER_GPU_DMABUF_SCANOUT2,
36 } VhostUserGpuRequest;
37
38 typedef struct VhostUserGpuDisplayInfoReply {
39 struct virtio_gpu_resp_display_info info;
40 } VhostUserGpuDisplayInfoReply;
41
42 typedef struct VhostUserGpuCursorPos {
43 uint32_t scanout_id;
44 uint32_t x;
45 uint32_t y;
46 } QEMU_PACKED VhostUserGpuCursorPos;
47
48 typedef struct VhostUserGpuCursorUpdate {
49 VhostUserGpuCursorPos pos;
50 uint32_t hot_x;
51 uint32_t hot_y;
52 uint32_t data[64 * 64];
53 } QEMU_PACKED VhostUserGpuCursorUpdate;
54
55 typedef struct VhostUserGpuScanout {
56 uint32_t scanout_id;
57 uint32_t width;
58 uint32_t height;
59 } QEMU_PACKED VhostUserGpuScanout;
60
61 typedef struct VhostUserGpuUpdate {
62 uint32_t scanout_id;
63 uint32_t x;
64 uint32_t y;
65 uint32_t width;
66 uint32_t height;
67 uint8_t data[];
68 } QEMU_PACKED VhostUserGpuUpdate;
69
70 typedef struct VhostUserGpuDMABUFScanout {
71 uint32_t scanout_id;
72 uint32_t x;
73 uint32_t y;
74 uint32_t width;
75 uint32_t height;
76 uint32_t fd_width;
77 uint32_t fd_height;
78 uint32_t fd_stride;
79 uint32_t fd_flags;
80 int fd_drm_fourcc;
81 } QEMU_PACKED VhostUserGpuDMABUFScanout;
82
83 typedef struct VhostUserGpuDMABUFScanout2 {
84 struct VhostUserGpuDMABUFScanout dmabuf_scanout;
85 uint64_t modifier;
86 } QEMU_PACKED VhostUserGpuDMABUFScanout2;
87
88 typedef struct VhostUserGpuEdidRequest {
89 uint32_t scanout_id;
90 } QEMU_PACKED VhostUserGpuEdidRequest;
91
92 typedef struct VhostUserGpuMsg {
93 uint32_t request; /* VhostUserGpuRequest */
94 uint32_t flags;
95 uint32_t size; /* the following payload size */
96 union {
97 VhostUserGpuCursorPos cursor_pos;
98 VhostUserGpuCursorUpdate cursor_update;
99 VhostUserGpuScanout scanout;
100 VhostUserGpuUpdate update;
101 VhostUserGpuDMABUFScanout dmabuf_scanout;
102 VhostUserGpuDMABUFScanout2 dmabuf_scanout2;
103 VhostUserGpuEdidRequest edid_req;
104 struct virtio_gpu_resp_edid resp_edid;
105 struct virtio_gpu_resp_display_info display_info;
106 uint64_t u64;
107 } payload;
108 } QEMU_PACKED VhostUserGpuMsg;
109
110 static VhostUserGpuMsg m __attribute__ ((unused));
111 #define VHOST_USER_GPU_HDR_SIZE \
112 (sizeof(m.request) + sizeof(m.size) + sizeof(m.flags))
113
114 #define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4
115
116 #define VHOST_USER_GPU_PROTOCOL_F_EDID 0
117 #define VHOST_USER_GPU_PROTOCOL_F_DMABUF2 1
118
119 static void vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked);
120
121 static void
vhost_user_gpu_handle_cursor(VhostUserGPU * g,VhostUserGpuMsg * msg)122 vhost_user_gpu_handle_cursor(VhostUserGPU *g, VhostUserGpuMsg *msg)
123 {
124 VhostUserGpuCursorPos *pos = &msg->payload.cursor_pos;
125 struct virtio_gpu_scanout *s;
126
127 if (pos->scanout_id >= g->parent_obj.conf.max_outputs) {
128 return;
129 }
130 s = &g->parent_obj.scanout[pos->scanout_id];
131
132 if (msg->request == VHOST_USER_GPU_CURSOR_UPDATE) {
133 VhostUserGpuCursorUpdate *up = &msg->payload.cursor_update;
134 if (!s->current_cursor) {
135 s->current_cursor = cursor_alloc(64, 64);
136 }
137
138 s->current_cursor->hot_x = up->hot_x;
139 s->current_cursor->hot_y = up->hot_y;
140
141 memcpy(s->current_cursor->data, up->data,
142 64 * 64 * sizeof(uint32_t));
143
144 dpy_cursor_define(s->con, s->current_cursor);
145 }
146
147 dpy_mouse_set(s->con, pos->x, pos->y,
148 msg->request != VHOST_USER_GPU_CURSOR_POS_HIDE);
149 }
150
151 static void
vhost_user_gpu_send_msg(VhostUserGPU * g,const VhostUserGpuMsg * msg)152 vhost_user_gpu_send_msg(VhostUserGPU *g, const VhostUserGpuMsg *msg)
153 {
154 qemu_chr_fe_write(&g->vhost_chr, (uint8_t *)msg,
155 VHOST_USER_GPU_HDR_SIZE + msg->size);
156 }
157
158 static void
vhost_user_gpu_unblock(VhostUserGPU * g)159 vhost_user_gpu_unblock(VhostUserGPU *g)
160 {
161 VhostUserGpuMsg msg = {
162 .request = VHOST_USER_GPU_DMABUF_UPDATE,
163 .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
164 };
165
166 vhost_user_gpu_send_msg(g, &msg);
167 }
168
169 static void
vhost_user_gpu_handle_display(VhostUserGPU * g,VhostUserGpuMsg * msg)170 vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg)
171 {
172 QemuConsole *con = NULL;
173 struct virtio_gpu_scanout *s;
174
175 switch (msg->request) {
176 case VHOST_USER_GPU_GET_PROTOCOL_FEATURES: {
177 VhostUserGpuMsg reply = {
178 .request = msg->request,
179 .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
180 .size = sizeof(uint64_t),
181 .payload = {
182 .u64 = (1 << VHOST_USER_GPU_PROTOCOL_F_EDID) |
183 (1 << VHOST_USER_GPU_PROTOCOL_F_DMABUF2)
184 }
185 };
186
187 vhost_user_gpu_send_msg(g, &reply);
188 break;
189 }
190 case VHOST_USER_GPU_SET_PROTOCOL_FEATURES: {
191 break;
192 }
193 case VHOST_USER_GPU_GET_DISPLAY_INFO: {
194 struct virtio_gpu_resp_display_info display_info = { {} };
195 VhostUserGpuMsg reply = {
196 .request = msg->request,
197 .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
198 .size = sizeof(struct virtio_gpu_resp_display_info),
199 };
200
201 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
202 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
203 memcpy(&reply.payload.display_info, &display_info,
204 sizeof(display_info));
205 vhost_user_gpu_send_msg(g, &reply);
206 break;
207 }
208 case VHOST_USER_GPU_GET_EDID: {
209 VhostUserGpuEdidRequest *m = &msg->payload.edid_req;
210 struct virtio_gpu_resp_edid resp = { {} };
211 VhostUserGpuMsg reply = {
212 .request = msg->request,
213 .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
214 .size = sizeof(reply.payload.resp_edid),
215 };
216
217 if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
218 error_report("invalid scanout: %d", m->scanout_id);
219 break;
220 }
221
222 resp.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
223 virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), m->scanout_id, &resp);
224 memcpy(&reply.payload.resp_edid, &resp, sizeof(resp));
225 vhost_user_gpu_send_msg(g, &reply);
226 break;
227 }
228 case VHOST_USER_GPU_SCANOUT: {
229 VhostUserGpuScanout *m = &msg->payload.scanout;
230
231 if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
232 return;
233 }
234
235 g->parent_obj.enable = 1;
236 s = &g->parent_obj.scanout[m->scanout_id];
237 con = s->con;
238
239 if (m->width == 0) {
240 dpy_gfx_replace_surface(con, NULL);
241 } else {
242 s->ds = qemu_create_displaysurface(m->width, m->height);
243 /* replace surface on next update */
244 }
245
246 break;
247 }
248 case VHOST_USER_GPU_DMABUF_SCANOUT2:
249 case VHOST_USER_GPU_DMABUF_SCANOUT: {
250 VhostUserGpuDMABUFScanout *m = &msg->payload.dmabuf_scanout;
251 int fd = qemu_chr_fe_get_msgfd(&g->vhost_chr);
252 uint64_t modifier = 0;
253 QemuDmaBuf *dmabuf;
254
255 if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
256 error_report("invalid scanout: %d", m->scanout_id);
257 if (fd >= 0) {
258 close(fd);
259 }
260 break;
261 }
262
263 g->parent_obj.enable = 1;
264 con = g->parent_obj.scanout[m->scanout_id].con;
265 dmabuf = g->dmabuf[m->scanout_id];
266
267 if (dmabuf) {
268 qemu_dmabuf_close(dmabuf);
269 dpy_gl_release_dmabuf(con, dmabuf);
270 g_clear_pointer(&dmabuf, qemu_dmabuf_free);
271 }
272
273 if (fd == -1) {
274 dpy_gl_scanout_disable(con);
275 g->dmabuf[m->scanout_id] = NULL;
276 break;
277 }
278
279 if (msg->request == VHOST_USER_GPU_DMABUF_SCANOUT2) {
280 VhostUserGpuDMABUFScanout2 *m2 = &msg->payload.dmabuf_scanout2;
281 modifier = m2->modifier;
282 }
283
284 dmabuf = qemu_dmabuf_new(m->width, m->height,
285 m->fd_stride, 0, 0,
286 m->fd_width, m->fd_height,
287 m->fd_drm_fourcc, modifier,
288 fd, false, m->fd_flags &
289 VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP);
290
291 dpy_gl_scanout_dmabuf(con, dmabuf);
292 g->dmabuf[m->scanout_id] = dmabuf;
293 break;
294 }
295 case VHOST_USER_GPU_DMABUF_UPDATE: {
296 VhostUserGpuUpdate *m = &msg->payload.update;
297
298 if (m->scanout_id >= g->parent_obj.conf.max_outputs ||
299 !g->parent_obj.scanout[m->scanout_id].con) {
300 error_report("invalid scanout update: %d", m->scanout_id);
301 vhost_user_gpu_unblock(g);
302 break;
303 }
304
305 con = g->parent_obj.scanout[m->scanout_id].con;
306 if (!console_has_gl(con)) {
307 error_report("console doesn't support GL!");
308 vhost_user_gpu_unblock(g);
309 break;
310 }
311 g->backend_blocked = true;
312 dpy_gl_update(con, m->x, m->y, m->width, m->height);
313 break;
314 }
315 #ifdef CONFIG_PIXMAN
316 case VHOST_USER_GPU_UPDATE: {
317 VhostUserGpuUpdate *m = &msg->payload.update;
318
319 if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
320 break;
321 }
322 s = &g->parent_obj.scanout[m->scanout_id];
323 con = s->con;
324 pixman_image_t *image =
325 pixman_image_create_bits(PIXMAN_x8r8g8b8,
326 m->width,
327 m->height,
328 (uint32_t *)m->data,
329 m->width * 4);
330
331 pixman_image_composite(PIXMAN_OP_SRC,
332 image, NULL, s->ds->image,
333 0, 0, 0, 0, m->x, m->y, m->width, m->height);
334
335 pixman_image_unref(image);
336 if (qemu_console_surface(con) != s->ds) {
337 dpy_gfx_replace_surface(con, s->ds);
338 } else {
339 dpy_gfx_update(con, m->x, m->y, m->width, m->height);
340 }
341 break;
342 }
343 #endif
344 default:
345 g_warning("unhandled message %d %d", msg->request, msg->size);
346 }
347
348 if (con && qemu_console_is_gl_blocked(con)) {
349 vhost_user_gpu_update_blocked(g, true);
350 }
351 }
352
353 static void
vhost_user_gpu_chr_read(void * opaque)354 vhost_user_gpu_chr_read(void *opaque)
355 {
356 VhostUserGPU *g = opaque;
357 VhostUserGpuMsg *msg = NULL;
358 VhostUserGpuRequest request;
359 uint32_t size, flags;
360 int r;
361
362 r = qemu_chr_fe_read_all(&g->vhost_chr,
363 (uint8_t *)&request, sizeof(uint32_t));
364 if (r != sizeof(uint32_t)) {
365 error_report("failed to read msg header: %d, %d", r, errno);
366 goto end;
367 }
368
369 r = qemu_chr_fe_read_all(&g->vhost_chr,
370 (uint8_t *)&flags, sizeof(uint32_t));
371 if (r != sizeof(uint32_t)) {
372 error_report("failed to read msg flags");
373 goto end;
374 }
375
376 r = qemu_chr_fe_read_all(&g->vhost_chr,
377 (uint8_t *)&size, sizeof(uint32_t));
378 if (r != sizeof(uint32_t)) {
379 error_report("failed to read msg size");
380 goto end;
381 }
382
383 msg = g_malloc(VHOST_USER_GPU_HDR_SIZE + size);
384
385 r = qemu_chr_fe_read_all(&g->vhost_chr,
386 (uint8_t *)&msg->payload, size);
387 if (r != size) {
388 error_report("failed to read msg payload %d != %d", r, size);
389 goto end;
390 }
391
392 msg->request = request;
393 msg->flags = flags;
394 msg->size = size;
395
396 if (request == VHOST_USER_GPU_CURSOR_UPDATE ||
397 request == VHOST_USER_GPU_CURSOR_POS ||
398 request == VHOST_USER_GPU_CURSOR_POS_HIDE) {
399 vhost_user_gpu_handle_cursor(g, msg);
400 } else {
401 vhost_user_gpu_handle_display(g, msg);
402 }
403
404 end:
405 g_free(msg);
406 }
407
408 static void
vhost_user_gpu_update_blocked(VhostUserGPU * g,bool blocked)409 vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked)
410 {
411 qemu_set_fd_handler(g->vhost_gpu_fd,
412 blocked ? NULL : vhost_user_gpu_chr_read, NULL, g);
413 }
414
415 static void
vhost_user_gpu_gl_flushed(VirtIOGPUBase * b)416 vhost_user_gpu_gl_flushed(VirtIOGPUBase *b)
417 {
418 VhostUserGPU *g = VHOST_USER_GPU(b);
419
420 if (g->backend_blocked) {
421 vhost_user_gpu_unblock(g);
422 g->backend_blocked = false;
423 }
424
425 vhost_user_gpu_update_blocked(g, false);
426 }
427
428 static bool
vhost_user_gpu_do_set_socket(VhostUserGPU * g,Error ** errp)429 vhost_user_gpu_do_set_socket(VhostUserGPU *g, Error **errp)
430 {
431 Chardev *chr;
432 int sv[2];
433
434 if (qemu_socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
435 error_setg_errno(errp, errno, "socketpair() failed");
436 return false;
437 }
438
439 chr = CHARDEV(object_new(TYPE_CHARDEV_SOCKET));
440 if (!chr || qemu_chr_add_client(chr, sv[0]) == -1) {
441 error_setg(errp, "Failed to make socket chardev");
442 goto err;
443 }
444 if (!qemu_chr_fe_init(&g->vhost_chr, chr, errp)) {
445 goto err;
446 }
447 if (vhost_user_gpu_set_socket(&g->vhost->dev, sv[1]) < 0) {
448 error_setg(errp, "Failed to set vhost-user-gpu socket");
449 qemu_chr_fe_deinit(&g->vhost_chr, false);
450 goto err;
451 }
452
453 g->vhost_gpu_fd = sv[0];
454 vhost_user_gpu_update_blocked(g, false);
455 close(sv[1]);
456 return true;
457
458 err:
459 close(sv[0]);
460 close(sv[1]);
461 if (chr) {
462 object_unref(OBJECT(chr));
463 }
464 return false;
465 }
466
467 static void
vhost_user_gpu_get_config(VirtIODevice * vdev,uint8_t * config_data)468 vhost_user_gpu_get_config(VirtIODevice *vdev, uint8_t *config_data)
469 {
470 VhostUserGPU *g = VHOST_USER_GPU(vdev);
471 VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
472 struct virtio_gpu_config *vgconfig =
473 (struct virtio_gpu_config *)config_data;
474 Error *local_err = NULL;
475 int ret;
476
477 memset(config_data, 0, sizeof(struct virtio_gpu_config));
478
479 ret = vhost_dev_get_config(&g->vhost->dev,
480 config_data, sizeof(struct virtio_gpu_config),
481 &local_err);
482 if (ret) {
483 error_report_err(local_err);
484 return;
485 }
486
487 /* those fields are managed by qemu */
488 vgconfig->num_scanouts = b->virtio_config.num_scanouts;
489 vgconfig->events_read = b->virtio_config.events_read;
490 vgconfig->events_clear = b->virtio_config.events_clear;
491 }
492
493 static void
vhost_user_gpu_set_config(VirtIODevice * vdev,const uint8_t * config_data)494 vhost_user_gpu_set_config(VirtIODevice *vdev,
495 const uint8_t *config_data)
496 {
497 VhostUserGPU *g = VHOST_USER_GPU(vdev);
498 VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
499 const struct virtio_gpu_config *vgconfig =
500 (const struct virtio_gpu_config *)config_data;
501 int ret;
502
503 if (vgconfig->events_clear) {
504 b->virtio_config.events_read &= ~vgconfig->events_clear;
505 }
506
507 ret = vhost_dev_set_config(&g->vhost->dev, config_data,
508 0, sizeof(struct virtio_gpu_config),
509 VHOST_SET_CONFIG_TYPE_FRONTEND);
510 if (ret) {
511 error_report("vhost-user-gpu: set device config space failed");
512 return;
513 }
514 }
515
516 static void
vhost_user_gpu_set_status(VirtIODevice * vdev,uint8_t val)517 vhost_user_gpu_set_status(VirtIODevice *vdev, uint8_t val)
518 {
519 VhostUserGPU *g = VHOST_USER_GPU(vdev);
520 Error *err = NULL;
521
522 if (val & VIRTIO_CONFIG_S_DRIVER_OK && vdev->vm_running) {
523 if (!vhost_user_gpu_do_set_socket(g, &err)) {
524 error_report_err(err);
525 return;
526 }
527 vhost_user_backend_start(g->vhost);
528 } else {
529 /* unblock any wait and stop processing */
530 if (g->vhost_gpu_fd != -1) {
531 vhost_user_gpu_update_blocked(g, true);
532 qemu_chr_fe_deinit(&g->vhost_chr, true);
533 g->vhost_gpu_fd = -1;
534 }
535 vhost_user_backend_stop(g->vhost);
536 }
537 }
538
539 static bool
vhost_user_gpu_guest_notifier_pending(VirtIODevice * vdev,int idx)540 vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx)
541 {
542 VhostUserGPU *g = VHOST_USER_GPU(vdev);
543
544 /*
545 * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
546 * as the macro of configure interrupt's IDX, If this driver does not
547 * support, the function will return
548 */
549
550 if (idx == VIRTIO_CONFIG_IRQ_IDX) {
551 return false;
552 }
553 return vhost_virtqueue_pending(&g->vhost->dev, idx);
554 }
555
556 static void
vhost_user_gpu_guest_notifier_mask(VirtIODevice * vdev,int idx,bool mask)557 vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
558 {
559 VhostUserGPU *g = VHOST_USER_GPU(vdev);
560
561 /*
562 * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
563 * as the macro of configure interrupt's IDX, If this driver does not
564 * support, the function will return
565 */
566
567 if (idx == VIRTIO_CONFIG_IRQ_IDX) {
568 return;
569 }
570 vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask);
571 }
572
573 static void
vhost_user_gpu_instance_init(Object * obj)574 vhost_user_gpu_instance_init(Object *obj)
575 {
576 VhostUserGPU *g = VHOST_USER_GPU(obj);
577
578 g->vhost = VHOST_USER_BACKEND(object_new(TYPE_VHOST_USER_BACKEND));
579 object_property_add_alias(obj, "chardev",
580 OBJECT(g->vhost), "chardev");
581 }
582
583 static void
vhost_user_gpu_instance_finalize(Object * obj)584 vhost_user_gpu_instance_finalize(Object *obj)
585 {
586 VhostUserGPU *g = VHOST_USER_GPU(obj);
587
588 object_unref(OBJECT(g->vhost));
589 }
590
591 static void
vhost_user_gpu_reset(VirtIODevice * vdev)592 vhost_user_gpu_reset(VirtIODevice *vdev)
593 {
594 VhostUserGPU *g = VHOST_USER_GPU(vdev);
595
596 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
597
598 vhost_user_backend_stop(g->vhost);
599 }
600
601 static int
vhost_user_gpu_config_change(struct vhost_dev * dev)602 vhost_user_gpu_config_change(struct vhost_dev *dev)
603 {
604 error_report("vhost-user-gpu: unhandled backend config change");
605 return -1;
606 }
607
608 static const VhostDevConfigOps config_ops = {
609 .vhost_dev_config_notifier = vhost_user_gpu_config_change,
610 };
611
612 static void
vhost_user_gpu_device_realize(DeviceState * qdev,Error ** errp)613 vhost_user_gpu_device_realize(DeviceState *qdev, Error **errp)
614 {
615 VhostUserGPU *g = VHOST_USER_GPU(qdev);
616 VirtIODevice *vdev = VIRTIO_DEVICE(g);
617
618 vhost_dev_set_config_notifier(&g->vhost->dev, &config_ops);
619 if (vhost_user_backend_dev_init(g->vhost, vdev, 2, errp) < 0) {
620 return;
621 }
622
623 /* existing backend may send DMABUF, so let's add that requirement */
624 g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_DMABUF_ENABLED;
625 if (virtio_has_feature(g->vhost->dev.features, VIRTIO_GPU_F_VIRGL)) {
626 g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED;
627 }
628 if (virtio_has_feature(g->vhost->dev.features, VIRTIO_GPU_F_EDID)) {
629 g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_EDID_ENABLED;
630 } else {
631 error_report("EDID requested but the backend doesn't support it.");
632 g->parent_obj.conf.flags &= ~(1 << VIRTIO_GPU_FLAG_EDID_ENABLED);
633 }
634
635 if (!virtio_gpu_base_device_realize(qdev, NULL, NULL, errp)) {
636 return;
637 }
638
639 g->vhost_gpu_fd = -1;
640 }
641
vhost_user_gpu_get_vhost(VirtIODevice * vdev)642 static struct vhost_dev *vhost_user_gpu_get_vhost(VirtIODevice *vdev)
643 {
644 VhostUserGPU *g = VHOST_USER_GPU(vdev);
645 return &g->vhost->dev;
646 }
647
648 static Property vhost_user_gpu_properties[] = {
649 VIRTIO_GPU_BASE_PROPERTIES(VhostUserGPU, parent_obj.conf),
650 DEFINE_PROP_END_OF_LIST(),
651 };
652
653 static void
vhost_user_gpu_class_init(ObjectClass * klass,void * data)654 vhost_user_gpu_class_init(ObjectClass *klass, void *data)
655 {
656 DeviceClass *dc = DEVICE_CLASS(klass);
657 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
658 VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass);
659
660 vgc->gl_flushed = vhost_user_gpu_gl_flushed;
661
662 vdc->realize = vhost_user_gpu_device_realize;
663 vdc->reset = vhost_user_gpu_reset;
664 vdc->set_status = vhost_user_gpu_set_status;
665 vdc->guest_notifier_mask = vhost_user_gpu_guest_notifier_mask;
666 vdc->guest_notifier_pending = vhost_user_gpu_guest_notifier_pending;
667 vdc->get_config = vhost_user_gpu_get_config;
668 vdc->set_config = vhost_user_gpu_set_config;
669 vdc->get_vhost = vhost_user_gpu_get_vhost;
670
671 device_class_set_props(dc, vhost_user_gpu_properties);
672 }
673
674 static const TypeInfo vhost_user_gpu_info = {
675 .name = TYPE_VHOST_USER_GPU,
676 .parent = TYPE_VIRTIO_GPU_BASE,
677 .instance_size = sizeof(VhostUserGPU),
678 .instance_init = vhost_user_gpu_instance_init,
679 .instance_finalize = vhost_user_gpu_instance_finalize,
680 .class_init = vhost_user_gpu_class_init,
681 };
682 module_obj(TYPE_VHOST_USER_GPU);
683 module_kconfig(VHOST_USER_GPU);
684
vhost_user_gpu_register_types(void)685 static void vhost_user_gpu_register_types(void)
686 {
687 type_register_static(&vhost_user_gpu_info);
688 }
689
690 type_init(vhost_user_gpu_register_types)
691