1 /*
2 * Virtio GPU Device
3 *
4 * Copyright Red Hat, Inc. 2013-2014
5 *
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 */
13
14 #include "qemu/osdep.h"
15 #include "qemu/error-report.h"
16 #include "qemu/iov.h"
17 #include "trace.h"
18 #include "hw/virtio/virtio.h"
19 #include "hw/virtio/virtio-gpu.h"
20 #include "hw/virtio/virtio-gpu-bswap.h"
21 #include "hw/virtio/virtio-gpu-pixman.h"
22
23 #include "ui/egl-helpers.h"
24
25 #include <virglrenderer.h>
26
27 struct virtio_gpu_virgl_resource {
28 struct virtio_gpu_simple_resource base;
29 MemoryRegion *mr;
30 };
31
32 static struct virtio_gpu_virgl_resource *
virtio_gpu_virgl_find_resource(VirtIOGPU * g,uint32_t resource_id)33 virtio_gpu_virgl_find_resource(VirtIOGPU *g, uint32_t resource_id)
34 {
35 struct virtio_gpu_simple_resource *res;
36
37 res = virtio_gpu_find_resource(g, resource_id);
38 if (!res) {
39 return NULL;
40 }
41
42 return container_of(res, struct virtio_gpu_virgl_resource, base);
43 }
44
45 #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
46 static void *
virgl_get_egl_display(G_GNUC_UNUSED void * cookie)47 virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
48 {
49 return qemu_egl_display;
50 }
51 #endif
52
53 #if VIRGL_VERSION_MAJOR >= 1
54 struct virtio_gpu_virgl_hostmem_region {
55 MemoryRegion mr;
56 struct VirtIOGPU *g;
57 bool finish_unmapping;
58 };
59
60 static struct virtio_gpu_virgl_hostmem_region *
to_hostmem_region(MemoryRegion * mr)61 to_hostmem_region(MemoryRegion *mr)
62 {
63 return container_of(mr, struct virtio_gpu_virgl_hostmem_region, mr);
64 }
65
virtio_gpu_virgl_resume_cmdq_bh(void * opaque)66 static void virtio_gpu_virgl_resume_cmdq_bh(void *opaque)
67 {
68 VirtIOGPU *g = opaque;
69
70 virtio_gpu_process_cmdq(g);
71 }
72
virtio_gpu_virgl_hostmem_region_free(void * obj)73 static void virtio_gpu_virgl_hostmem_region_free(void *obj)
74 {
75 MemoryRegion *mr = MEMORY_REGION(obj);
76 struct virtio_gpu_virgl_hostmem_region *vmr;
77 VirtIOGPUBase *b;
78 VirtIOGPUGL *gl;
79
80 vmr = to_hostmem_region(mr);
81 vmr->finish_unmapping = true;
82
83 b = VIRTIO_GPU_BASE(vmr->g);
84 b->renderer_blocked--;
85
86 /*
87 * memory_region_unref() is executed from RCU thread context, while
88 * virglrenderer works only on the main-loop thread that's holding GL
89 * context.
90 */
91 gl = VIRTIO_GPU_GL(vmr->g);
92 qemu_bh_schedule(gl->cmdq_resume_bh);
93 }
94
95 static int
virtio_gpu_virgl_map_resource_blob(VirtIOGPU * g,struct virtio_gpu_virgl_resource * res,uint64_t offset)96 virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
97 struct virtio_gpu_virgl_resource *res,
98 uint64_t offset)
99 {
100 struct virtio_gpu_virgl_hostmem_region *vmr;
101 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
102 MemoryRegion *mr;
103 uint64_t size;
104 void *data;
105 int ret;
106
107 if (!virtio_gpu_hostmem_enabled(b->conf)) {
108 qemu_log_mask(LOG_GUEST_ERROR, "%s: hostmem disabled\n", __func__);
109 return -EOPNOTSUPP;
110 }
111
112 ret = virgl_renderer_resource_map(res->base.resource_id, &data, &size);
113 if (ret) {
114 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map virgl resource: %s\n",
115 __func__, strerror(-ret));
116 return ret;
117 }
118
119 vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1);
120 vmr->g = g;
121
122 mr = &vmr->mr;
123 memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data);
124 memory_region_add_subregion(&b->hostmem, offset, mr);
125 memory_region_set_enabled(mr, true);
126
127 /*
128 * MR could outlive the resource if MR's reference is held outside of
129 * virtio-gpu. In order to prevent unmapping resource while MR is alive,
130 * and thus, making the data pointer invalid, we will block virtio-gpu
131 * command processing until MR is fully unreferenced and freed.
132 */
133 OBJECT(mr)->free = virtio_gpu_virgl_hostmem_region_free;
134
135 res->mr = mr;
136
137 return 0;
138 }
139
140 static int
virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU * g,struct virtio_gpu_virgl_resource * res,bool * cmd_suspended)141 virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
142 struct virtio_gpu_virgl_resource *res,
143 bool *cmd_suspended)
144 {
145 struct virtio_gpu_virgl_hostmem_region *vmr;
146 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
147 MemoryRegion *mr = res->mr;
148 int ret;
149
150 if (!mr) {
151 return 0;
152 }
153
154 vmr = to_hostmem_region(res->mr);
155
156 /*
157 * Perform async unmapping in 3 steps:
158 *
159 * 1. Begin async unmapping with memory_region_del_subregion()
160 * and suspend/block cmd processing.
161 * 2. Wait for res->mr to be freed and cmd processing resumed
162 * asynchronously by virtio_gpu_virgl_hostmem_region_free().
163 * 3. Finish the unmapping with final virgl_renderer_resource_unmap().
164 */
165 if (vmr->finish_unmapping) {
166 res->mr = NULL;
167 g_free(vmr);
168
169 ret = virgl_renderer_resource_unmap(res->base.resource_id);
170 if (ret) {
171 qemu_log_mask(LOG_GUEST_ERROR,
172 "%s: failed to unmap virgl resource: %s\n",
173 __func__, strerror(-ret));
174 return ret;
175 }
176 } else {
177 *cmd_suspended = true;
178
179 /* render will be unblocked once MR is freed */
180 b->renderer_blocked++;
181
182 /* memory region owns self res->mr object and frees it by itself */
183 memory_region_set_enabled(mr, false);
184 memory_region_del_subregion(&b->hostmem, mr);
185 object_unparent(OBJECT(mr));
186 }
187
188 return 0;
189 }
190 #endif
191
virgl_cmd_create_resource_2d(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)192 static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
193 struct virtio_gpu_ctrl_command *cmd)
194 {
195 struct virtio_gpu_resource_create_2d c2d;
196 struct virgl_renderer_resource_create_args args;
197 struct virtio_gpu_virgl_resource *res;
198
199 VIRTIO_GPU_FILL_CMD(c2d);
200 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
201 c2d.width, c2d.height);
202
203 if (c2d.resource_id == 0) {
204 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
205 __func__);
206 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
207 return;
208 }
209
210 res = virtio_gpu_virgl_find_resource(g, c2d.resource_id);
211 if (res) {
212 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
213 __func__, c2d.resource_id);
214 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
215 return;
216 }
217
218 res = g_new0(struct virtio_gpu_virgl_resource, 1);
219 res->base.width = c2d.width;
220 res->base.height = c2d.height;
221 res->base.format = c2d.format;
222 res->base.resource_id = c2d.resource_id;
223 res->base.dmabuf_fd = -1;
224 QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
225
226 args.handle = c2d.resource_id;
227 args.target = 2;
228 args.format = c2d.format;
229 args.bind = (1 << 1);
230 args.width = c2d.width;
231 args.height = c2d.height;
232 args.depth = 1;
233 args.array_size = 1;
234 args.last_level = 0;
235 args.nr_samples = 0;
236 args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP;
237 virgl_renderer_resource_create(&args, NULL, 0);
238 }
239
virgl_cmd_create_resource_3d(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)240 static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
241 struct virtio_gpu_ctrl_command *cmd)
242 {
243 struct virtio_gpu_resource_create_3d c3d;
244 struct virgl_renderer_resource_create_args args;
245 struct virtio_gpu_virgl_resource *res;
246
247 VIRTIO_GPU_FILL_CMD(c3d);
248 trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format,
249 c3d.width, c3d.height, c3d.depth);
250
251 if (c3d.resource_id == 0) {
252 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
253 __func__);
254 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
255 return;
256 }
257
258 res = virtio_gpu_virgl_find_resource(g, c3d.resource_id);
259 if (res) {
260 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
261 __func__, c3d.resource_id);
262 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
263 return;
264 }
265
266 res = g_new0(struct virtio_gpu_virgl_resource, 1);
267 res->base.width = c3d.width;
268 res->base.height = c3d.height;
269 res->base.format = c3d.format;
270 res->base.resource_id = c3d.resource_id;
271 res->base.dmabuf_fd = -1;
272 QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
273
274 args.handle = c3d.resource_id;
275 args.target = c3d.target;
276 args.format = c3d.format;
277 args.bind = c3d.bind;
278 args.width = c3d.width;
279 args.height = c3d.height;
280 args.depth = c3d.depth;
281 args.array_size = c3d.array_size;
282 args.last_level = c3d.last_level;
283 args.nr_samples = c3d.nr_samples;
284 args.flags = c3d.flags;
285 virgl_renderer_resource_create(&args, NULL, 0);
286 }
287
virgl_cmd_resource_unref(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd,bool * cmd_suspended)288 static void virgl_cmd_resource_unref(VirtIOGPU *g,
289 struct virtio_gpu_ctrl_command *cmd,
290 bool *cmd_suspended)
291 {
292 struct virtio_gpu_resource_unref unref;
293 struct virtio_gpu_virgl_resource *res;
294 struct iovec *res_iovs = NULL;
295 int num_iovs = 0;
296
297 VIRTIO_GPU_FILL_CMD(unref);
298 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
299
300 res = virtio_gpu_virgl_find_resource(g, unref.resource_id);
301 if (!res) {
302 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
303 __func__, unref.resource_id);
304 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
305 return;
306 }
307
308 #if VIRGL_VERSION_MAJOR >= 1
309 if (virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended)) {
310 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
311 return;
312 }
313 if (*cmd_suspended) {
314 return;
315 }
316 #endif
317
318 virgl_renderer_resource_detach_iov(unref.resource_id,
319 &res_iovs,
320 &num_iovs);
321 if (res_iovs != NULL && num_iovs != 0) {
322 virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
323 }
324 virgl_renderer_resource_unref(unref.resource_id);
325
326 QTAILQ_REMOVE(&g->reslist, &res->base, next);
327
328 g_free(res);
329 }
330
virgl_cmd_context_create(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)331 static void virgl_cmd_context_create(VirtIOGPU *g,
332 struct virtio_gpu_ctrl_command *cmd)
333 {
334 struct virtio_gpu_ctx_create cc;
335
336 VIRTIO_GPU_FILL_CMD(cc);
337 trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id,
338 cc.debug_name);
339
340 if (cc.context_init) {
341 if (!virtio_gpu_context_init_enabled(g->parent_obj.conf)) {
342 qemu_log_mask(LOG_GUEST_ERROR, "%s: context_init disabled",
343 __func__);
344 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
345 return;
346 }
347
348 #if VIRGL_VERSION_MAJOR >= 1
349 virgl_renderer_context_create_with_flags(cc.hdr.ctx_id,
350 cc.context_init,
351 cc.nlen,
352 cc.debug_name);
353 return;
354 #endif
355 }
356
357 virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen, cc.debug_name);
358 }
359
virgl_cmd_context_destroy(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)360 static void virgl_cmd_context_destroy(VirtIOGPU *g,
361 struct virtio_gpu_ctrl_command *cmd)
362 {
363 struct virtio_gpu_ctx_destroy cd;
364
365 VIRTIO_GPU_FILL_CMD(cd);
366 trace_virtio_gpu_cmd_ctx_destroy(cd.hdr.ctx_id);
367
368 virgl_renderer_context_destroy(cd.hdr.ctx_id);
369 }
370
virtio_gpu_rect_update(VirtIOGPU * g,int idx,int x,int y,int width,int height)371 static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y,
372 int width, int height)
373 {
374 if (!g->parent_obj.scanout[idx].con) {
375 return;
376 }
377
378 dpy_gl_update(g->parent_obj.scanout[idx].con, x, y, width, height);
379 }
380
virgl_cmd_resource_flush(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)381 static void virgl_cmd_resource_flush(VirtIOGPU *g,
382 struct virtio_gpu_ctrl_command *cmd)
383 {
384 struct virtio_gpu_resource_flush rf;
385 int i;
386
387 VIRTIO_GPU_FILL_CMD(rf);
388 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
389 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
390
391 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
392 if (g->parent_obj.scanout[i].resource_id != rf.resource_id) {
393 continue;
394 }
395 virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height);
396 }
397 }
398
virgl_cmd_set_scanout(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)399 static void virgl_cmd_set_scanout(VirtIOGPU *g,
400 struct virtio_gpu_ctrl_command *cmd)
401 {
402 struct virtio_gpu_set_scanout ss;
403 int ret;
404
405 VIRTIO_GPU_FILL_CMD(ss);
406 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
407 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
408
409 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
410 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
411 __func__, ss.scanout_id);
412 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
413 return;
414 }
415 g->parent_obj.enable = 1;
416
417 if (ss.resource_id && ss.r.width && ss.r.height) {
418 struct virgl_renderer_resource_info info;
419 void *d3d_tex2d = NULL;
420
421 #if VIRGL_VERSION_MAJOR >= 1
422 struct virgl_renderer_resource_info_ext ext;
423 memset(&ext, 0, sizeof(ext));
424 ret = virgl_renderer_resource_get_info_ext(ss.resource_id, &ext);
425 info = ext.base;
426 d3d_tex2d = ext.d3d_tex2d;
427 #else
428 memset(&info, 0, sizeof(info));
429 ret = virgl_renderer_resource_get_info(ss.resource_id, &info);
430 #endif
431 if (ret) {
432 qemu_log_mask(LOG_GUEST_ERROR,
433 "%s: illegal resource specified %d\n",
434 __func__, ss.resource_id);
435 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
436 return;
437 }
438 qemu_console_resize(g->parent_obj.scanout[ss.scanout_id].con,
439 ss.r.width, ss.r.height);
440 virgl_renderer_force_ctx_0();
441 dpy_gl_scanout_texture(
442 g->parent_obj.scanout[ss.scanout_id].con, info.tex_id,
443 info.flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP,
444 info.width, info.height,
445 ss.r.x, ss.r.y, ss.r.width, ss.r.height,
446 d3d_tex2d);
447 } else {
448 dpy_gfx_replace_surface(
449 g->parent_obj.scanout[ss.scanout_id].con, NULL);
450 dpy_gl_scanout_disable(g->parent_obj.scanout[ss.scanout_id].con);
451 }
452 g->parent_obj.scanout[ss.scanout_id].resource_id = ss.resource_id;
453 }
454
virgl_cmd_submit_3d(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)455 static void virgl_cmd_submit_3d(VirtIOGPU *g,
456 struct virtio_gpu_ctrl_command *cmd)
457 {
458 struct virtio_gpu_cmd_submit cs;
459 void *buf;
460 size_t s;
461
462 VIRTIO_GPU_FILL_CMD(cs);
463 trace_virtio_gpu_cmd_ctx_submit(cs.hdr.ctx_id, cs.size);
464
465 buf = g_malloc(cs.size);
466 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
467 sizeof(cs), buf, cs.size);
468 if (s != cs.size) {
469 qemu_log_mask(LOG_GUEST_ERROR, "%s: size mismatch (%zd/%d)",
470 __func__, s, cs.size);
471 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
472 goto out;
473 }
474
475 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
476 g->stats.req_3d++;
477 g->stats.bytes_3d += cs.size;
478 }
479
480 virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4);
481
482 out:
483 g_free(buf);
484 }
485
virgl_cmd_transfer_to_host_2d(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)486 static void virgl_cmd_transfer_to_host_2d(VirtIOGPU *g,
487 struct virtio_gpu_ctrl_command *cmd)
488 {
489 struct virtio_gpu_transfer_to_host_2d t2d;
490 struct virtio_gpu_box box;
491
492 VIRTIO_GPU_FILL_CMD(t2d);
493 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
494
495 box.x = t2d.r.x;
496 box.y = t2d.r.y;
497 box.z = 0;
498 box.w = t2d.r.width;
499 box.h = t2d.r.height;
500 box.d = 1;
501
502 virgl_renderer_transfer_write_iov(t2d.resource_id,
503 0,
504 0,
505 0,
506 0,
507 (struct virgl_box *)&box,
508 t2d.offset, NULL, 0);
509 }
510
virgl_cmd_transfer_to_host_3d(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)511 static void virgl_cmd_transfer_to_host_3d(VirtIOGPU *g,
512 struct virtio_gpu_ctrl_command *cmd)
513 {
514 struct virtio_gpu_transfer_host_3d t3d;
515
516 VIRTIO_GPU_FILL_CMD(t3d);
517 trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d.resource_id);
518
519 virgl_renderer_transfer_write_iov(t3d.resource_id,
520 t3d.hdr.ctx_id,
521 t3d.level,
522 t3d.stride,
523 t3d.layer_stride,
524 (struct virgl_box *)&t3d.box,
525 t3d.offset, NULL, 0);
526 }
527
528 static void
virgl_cmd_transfer_from_host_3d(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)529 virgl_cmd_transfer_from_host_3d(VirtIOGPU *g,
530 struct virtio_gpu_ctrl_command *cmd)
531 {
532 struct virtio_gpu_transfer_host_3d tf3d;
533
534 VIRTIO_GPU_FILL_CMD(tf3d);
535 trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d.resource_id);
536
537 virgl_renderer_transfer_read_iov(tf3d.resource_id,
538 tf3d.hdr.ctx_id,
539 tf3d.level,
540 tf3d.stride,
541 tf3d.layer_stride,
542 (struct virgl_box *)&tf3d.box,
543 tf3d.offset, NULL, 0);
544 }
545
546
virgl_resource_attach_backing(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)547 static void virgl_resource_attach_backing(VirtIOGPU *g,
548 struct virtio_gpu_ctrl_command *cmd)
549 {
550 struct virtio_gpu_resource_attach_backing att_rb;
551 struct iovec *res_iovs;
552 uint32_t res_niov;
553 int ret;
554
555 VIRTIO_GPU_FILL_CMD(att_rb);
556 trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id);
557
558 ret = virtio_gpu_create_mapping_iov(g, att_rb.nr_entries, sizeof(att_rb),
559 cmd, NULL, &res_iovs, &res_niov);
560 if (ret != 0) {
561 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
562 return;
563 }
564
565 ret = virgl_renderer_resource_attach_iov(att_rb.resource_id,
566 res_iovs, res_niov);
567
568 if (ret != 0)
569 virtio_gpu_cleanup_mapping_iov(g, res_iovs, res_niov);
570 }
571
virgl_resource_detach_backing(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)572 static void virgl_resource_detach_backing(VirtIOGPU *g,
573 struct virtio_gpu_ctrl_command *cmd)
574 {
575 struct virtio_gpu_resource_detach_backing detach_rb;
576 struct iovec *res_iovs = NULL;
577 int num_iovs = 0;
578
579 VIRTIO_GPU_FILL_CMD(detach_rb);
580 trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id);
581
582 virgl_renderer_resource_detach_iov(detach_rb.resource_id,
583 &res_iovs,
584 &num_iovs);
585 if (res_iovs == NULL || num_iovs == 0) {
586 return;
587 }
588 virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
589 }
590
591
virgl_cmd_ctx_attach_resource(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)592 static void virgl_cmd_ctx_attach_resource(VirtIOGPU *g,
593 struct virtio_gpu_ctrl_command *cmd)
594 {
595 struct virtio_gpu_ctx_resource att_res;
596
597 VIRTIO_GPU_FILL_CMD(att_res);
598 trace_virtio_gpu_cmd_ctx_res_attach(att_res.hdr.ctx_id,
599 att_res.resource_id);
600
601 virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id);
602 }
603
virgl_cmd_ctx_detach_resource(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)604 static void virgl_cmd_ctx_detach_resource(VirtIOGPU *g,
605 struct virtio_gpu_ctrl_command *cmd)
606 {
607 struct virtio_gpu_ctx_resource det_res;
608
609 VIRTIO_GPU_FILL_CMD(det_res);
610 trace_virtio_gpu_cmd_ctx_res_detach(det_res.hdr.ctx_id,
611 det_res.resource_id);
612
613 virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id);
614 }
615
virgl_cmd_get_capset_info(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)616 static void virgl_cmd_get_capset_info(VirtIOGPU *g,
617 struct virtio_gpu_ctrl_command *cmd)
618 {
619 struct virtio_gpu_get_capset_info info;
620 struct virtio_gpu_resp_capset_info resp;
621
622 VIRTIO_GPU_FILL_CMD(info);
623
624 memset(&resp, 0, sizeof(resp));
625
626 if (info.capset_index < g->capset_ids->len) {
627 resp.capset_id = g_array_index(g->capset_ids, uint32_t,
628 info.capset_index);
629 virgl_renderer_get_cap_set(resp.capset_id,
630 &resp.capset_max_version,
631 &resp.capset_max_size);
632 }
633 resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO;
634 virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
635 }
636
virgl_cmd_get_capset(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)637 static void virgl_cmd_get_capset(VirtIOGPU *g,
638 struct virtio_gpu_ctrl_command *cmd)
639 {
640 struct virtio_gpu_get_capset gc;
641 struct virtio_gpu_resp_capset *resp;
642 uint32_t max_ver, max_size;
643 VIRTIO_GPU_FILL_CMD(gc);
644
645 virgl_renderer_get_cap_set(gc.capset_id, &max_ver,
646 &max_size);
647 if (!max_size) {
648 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
649 return;
650 }
651
652 resp = g_malloc0(sizeof(*resp) + max_size);
653 resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
654 virgl_renderer_fill_caps(gc.capset_id,
655 gc.capset_version,
656 (void *)resp->capset_data);
657 virtio_gpu_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size);
658 g_free(resp);
659 }
660
661 #if VIRGL_VERSION_MAJOR >= 1
virgl_cmd_resource_create_blob(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)662 static void virgl_cmd_resource_create_blob(VirtIOGPU *g,
663 struct virtio_gpu_ctrl_command *cmd)
664 {
665 struct virgl_renderer_resource_create_blob_args virgl_args = { 0 };
666 g_autofree struct virtio_gpu_virgl_resource *res = NULL;
667 struct virtio_gpu_resource_create_blob cblob;
668 struct virgl_renderer_resource_info info;
669 int ret;
670
671 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
672 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
673 return;
674 }
675
676 VIRTIO_GPU_FILL_CMD(cblob);
677 virtio_gpu_create_blob_bswap(&cblob);
678 trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
679
680 if (cblob.resource_id == 0) {
681 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
682 __func__);
683 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
684 return;
685 }
686
687 res = virtio_gpu_virgl_find_resource(g, cblob.resource_id);
688 if (res) {
689 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
690 __func__, cblob.resource_id);
691 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
692 return;
693 }
694
695 res = g_new0(struct virtio_gpu_virgl_resource, 1);
696 res->base.resource_id = cblob.resource_id;
697 res->base.blob_size = cblob.size;
698 res->base.dmabuf_fd = -1;
699
700 if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
701 ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
702 cmd, &res->base.addrs,
703 &res->base.iov, &res->base.iov_cnt);
704 if (!ret) {
705 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
706 return;
707 }
708 }
709
710 virgl_args.res_handle = cblob.resource_id;
711 virgl_args.ctx_id = cblob.hdr.ctx_id;
712 virgl_args.blob_mem = cblob.blob_mem;
713 virgl_args.blob_id = cblob.blob_id;
714 virgl_args.blob_flags = cblob.blob_flags;
715 virgl_args.size = cblob.size;
716 virgl_args.iovecs = res->base.iov;
717 virgl_args.num_iovs = res->base.iov_cnt;
718
719 ret = virgl_renderer_resource_create_blob(&virgl_args);
720 if (ret) {
721 qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n",
722 __func__, strerror(-ret));
723 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
724 virtio_gpu_cleanup_mapping(g, &res->base);
725 return;
726 }
727
728 ret = virgl_renderer_resource_get_info(cblob.resource_id, &info);
729 if (ret) {
730 qemu_log_mask(LOG_GUEST_ERROR,
731 "%s: resource does not have info %d: %s\n",
732 __func__, cblob.resource_id, strerror(-ret));
733 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
734 virtio_gpu_cleanup_mapping(g, &res->base);
735 virgl_renderer_resource_unref(cblob.resource_id);
736 return;
737 }
738
739 res->base.dmabuf_fd = info.fd;
740
741 QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
742 res = NULL;
743 }
744
virgl_cmd_resource_map_blob(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)745 static void virgl_cmd_resource_map_blob(VirtIOGPU *g,
746 struct virtio_gpu_ctrl_command *cmd)
747 {
748 struct virtio_gpu_resource_map_blob mblob;
749 struct virtio_gpu_virgl_resource *res;
750 struct virtio_gpu_resp_map_info resp;
751 int ret;
752
753 VIRTIO_GPU_FILL_CMD(mblob);
754 virtio_gpu_map_blob_bswap(&mblob);
755
756 res = virtio_gpu_virgl_find_resource(g, mblob.resource_id);
757 if (!res) {
758 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
759 __func__, mblob.resource_id);
760 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
761 return;
762 }
763
764 ret = virtio_gpu_virgl_map_resource_blob(g, res, mblob.offset);
765 if (ret) {
766 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
767 return;
768 }
769
770 memset(&resp, 0, sizeof(resp));
771 resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO;
772 virgl_renderer_resource_get_map_info(mblob.resource_id, &resp.map_info);
773 virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
774 }
775
virgl_cmd_resource_unmap_blob(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd,bool * cmd_suspended)776 static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g,
777 struct virtio_gpu_ctrl_command *cmd,
778 bool *cmd_suspended)
779 {
780 struct virtio_gpu_resource_unmap_blob ublob;
781 struct virtio_gpu_virgl_resource *res;
782 int ret;
783
784 VIRTIO_GPU_FILL_CMD(ublob);
785 virtio_gpu_unmap_blob_bswap(&ublob);
786
787 res = virtio_gpu_virgl_find_resource(g, ublob.resource_id);
788 if (!res) {
789 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
790 __func__, ublob.resource_id);
791 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
792 return;
793 }
794
795 ret = virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended);
796 if (ret) {
797 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
798 return;
799 }
800 }
801
virgl_cmd_set_scanout_blob(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)802 static void virgl_cmd_set_scanout_blob(VirtIOGPU *g,
803 struct virtio_gpu_ctrl_command *cmd)
804 {
805 struct virtio_gpu_framebuffer fb = { 0 };
806 struct virtio_gpu_virgl_resource *res;
807 struct virtio_gpu_set_scanout_blob ss;
808
809 VIRTIO_GPU_FILL_CMD(ss);
810 virtio_gpu_scanout_blob_bswap(&ss);
811 trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id,
812 ss.r.width, ss.r.height, ss.r.x,
813 ss.r.y);
814
815 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
816 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
817 __func__, ss.scanout_id);
818 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
819 return;
820 }
821
822 if (ss.resource_id == 0) {
823 virtio_gpu_disable_scanout(g, ss.scanout_id);
824 return;
825 }
826
827 if (ss.width < 16 ||
828 ss.height < 16 ||
829 ss.r.x + ss.r.width > ss.width ||
830 ss.r.y + ss.r.height > ss.height) {
831 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
832 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
833 __func__, ss.scanout_id, ss.resource_id,
834 ss.r.x, ss.r.y, ss.r.width, ss.r.height,
835 ss.width, ss.height);
836 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
837 return;
838 }
839
840 res = virtio_gpu_virgl_find_resource(g, ss.resource_id);
841 if (!res) {
842 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
843 __func__, ss.resource_id);
844 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
845 return;
846 }
847 if (res->base.dmabuf_fd < 0) {
848 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource not backed by dmabuf %d\n",
849 __func__, ss.resource_id);
850 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
851 return;
852 }
853
854 if (!virtio_gpu_scanout_blob_to_fb(&fb, &ss, res->base.blob_size)) {
855 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
856 return;
857 }
858
859 g->parent_obj.enable = 1;
860 if (virtio_gpu_update_dmabuf(g, ss.scanout_id, &res->base, &fb, &ss.r)) {
861 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to update dmabuf\n",
862 __func__);
863 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
864 return;
865 }
866
867 virtio_gpu_update_scanout(g, ss.scanout_id, &res->base, &fb, &ss.r);
868 }
869 #endif
870
virtio_gpu_virgl_process_cmd(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)871 void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
872 struct virtio_gpu_ctrl_command *cmd)
873 {
874 bool cmd_suspended = false;
875
876 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
877
878 virgl_renderer_force_ctx_0();
879 switch (cmd->cmd_hdr.type) {
880 case VIRTIO_GPU_CMD_CTX_CREATE:
881 virgl_cmd_context_create(g, cmd);
882 break;
883 case VIRTIO_GPU_CMD_CTX_DESTROY:
884 virgl_cmd_context_destroy(g, cmd);
885 break;
886 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
887 virgl_cmd_create_resource_2d(g, cmd);
888 break;
889 case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D:
890 virgl_cmd_create_resource_3d(g, cmd);
891 break;
892 case VIRTIO_GPU_CMD_SUBMIT_3D:
893 virgl_cmd_submit_3d(g, cmd);
894 break;
895 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
896 virgl_cmd_transfer_to_host_2d(g, cmd);
897 break;
898 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D:
899 virgl_cmd_transfer_to_host_3d(g, cmd);
900 break;
901 case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D:
902 virgl_cmd_transfer_from_host_3d(g, cmd);
903 break;
904 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
905 virgl_resource_attach_backing(g, cmd);
906 break;
907 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
908 virgl_resource_detach_backing(g, cmd);
909 break;
910 case VIRTIO_GPU_CMD_SET_SCANOUT:
911 virgl_cmd_set_scanout(g, cmd);
912 break;
913 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
914 virgl_cmd_resource_flush(g, cmd);
915 break;
916 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
917 virgl_cmd_resource_unref(g, cmd, &cmd_suspended);
918 break;
919 case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
920 /* TODO add security */
921 virgl_cmd_ctx_attach_resource(g, cmd);
922 break;
923 case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE:
924 /* TODO add security */
925 virgl_cmd_ctx_detach_resource(g, cmd);
926 break;
927 case VIRTIO_GPU_CMD_GET_CAPSET_INFO:
928 virgl_cmd_get_capset_info(g, cmd);
929 break;
930 case VIRTIO_GPU_CMD_GET_CAPSET:
931 virgl_cmd_get_capset(g, cmd);
932 break;
933 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
934 virtio_gpu_get_display_info(g, cmd);
935 break;
936 case VIRTIO_GPU_CMD_GET_EDID:
937 virtio_gpu_get_edid(g, cmd);
938 break;
939 #if VIRGL_VERSION_MAJOR >= 1
940 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
941 virgl_cmd_resource_create_blob(g, cmd);
942 break;
943 case VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB:
944 virgl_cmd_resource_map_blob(g, cmd);
945 break;
946 case VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB:
947 virgl_cmd_resource_unmap_blob(g, cmd, &cmd_suspended);
948 break;
949 case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
950 virgl_cmd_set_scanout_blob(g, cmd);
951 break;
952 #endif
953 default:
954 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
955 break;
956 }
957
958 if (cmd_suspended || cmd->finished) {
959 return;
960 }
961 if (cmd->error) {
962 fprintf(stderr, "%s: ctrl 0x%x, error 0x%x\n", __func__,
963 cmd->cmd_hdr.type, cmd->error);
964 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error);
965 return;
966 }
967 if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) {
968 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
969 return;
970 }
971
972 trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
973 virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
974 }
975
virgl_write_fence(void * opaque,uint32_t fence)976 static void virgl_write_fence(void *opaque, uint32_t fence)
977 {
978 VirtIOGPU *g = opaque;
979 struct virtio_gpu_ctrl_command *cmd, *tmp;
980
981 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
982 /*
983 * the guest can end up emitting fences out of order
984 * so we should check all fenced cmds not just the first one.
985 */
986 if (cmd->cmd_hdr.fence_id > fence) {
987 continue;
988 }
989 trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
990 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
991 QTAILQ_REMOVE(&g->fenceq, cmd, next);
992 g_free(cmd);
993 g->inflight--;
994 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
995 trace_virtio_gpu_dec_inflight_fences(g->inflight);
996 }
997 }
998 }
999
1000 static virgl_renderer_gl_context
virgl_create_context(void * opaque,int scanout_idx,struct virgl_renderer_gl_ctx_param * params)1001 virgl_create_context(void *opaque, int scanout_idx,
1002 struct virgl_renderer_gl_ctx_param *params)
1003 {
1004 VirtIOGPU *g = opaque;
1005 QEMUGLContext ctx;
1006 QEMUGLParams qparams;
1007
1008 qparams.major_ver = params->major_ver;
1009 qparams.minor_ver = params->minor_ver;
1010
1011 ctx = dpy_gl_ctx_create(g->parent_obj.scanout[scanout_idx].con, &qparams);
1012 return (virgl_renderer_gl_context)ctx;
1013 }
1014
virgl_destroy_context(void * opaque,virgl_renderer_gl_context ctx)1015 static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx)
1016 {
1017 VirtIOGPU *g = opaque;
1018 QEMUGLContext qctx = (QEMUGLContext)ctx;
1019
1020 dpy_gl_ctx_destroy(g->parent_obj.scanout[0].con, qctx);
1021 }
1022
virgl_make_context_current(void * opaque,int scanout_idx,virgl_renderer_gl_context ctx)1023 static int virgl_make_context_current(void *opaque, int scanout_idx,
1024 virgl_renderer_gl_context ctx)
1025 {
1026 VirtIOGPU *g = opaque;
1027 QEMUGLContext qctx = (QEMUGLContext)ctx;
1028
1029 return dpy_gl_ctx_make_current(g->parent_obj.scanout[scanout_idx].con,
1030 qctx);
1031 }
1032
1033 static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = {
1034 .version = 1,
1035 .write_fence = virgl_write_fence,
1036 .create_gl_context = virgl_create_context,
1037 .destroy_gl_context = virgl_destroy_context,
1038 .make_current = virgl_make_context_current,
1039 };
1040
virtio_gpu_print_stats(void * opaque)1041 static void virtio_gpu_print_stats(void *opaque)
1042 {
1043 VirtIOGPU *g = opaque;
1044 VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
1045
1046 if (g->stats.requests) {
1047 fprintf(stderr, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n",
1048 g->stats.requests,
1049 g->stats.max_inflight,
1050 g->stats.req_3d,
1051 g->stats.bytes_3d);
1052 g->stats.requests = 0;
1053 g->stats.max_inflight = 0;
1054 g->stats.req_3d = 0;
1055 g->stats.bytes_3d = 0;
1056 } else {
1057 fprintf(stderr, "stats: idle\r");
1058 }
1059 timer_mod(gl->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
1060 }
1061
virtio_gpu_fence_poll(void * opaque)1062 static void virtio_gpu_fence_poll(void *opaque)
1063 {
1064 VirtIOGPU *g = opaque;
1065 VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
1066
1067 virgl_renderer_poll();
1068 virtio_gpu_process_cmdq(g);
1069 if (!QTAILQ_EMPTY(&g->cmdq) || !QTAILQ_EMPTY(&g->fenceq)) {
1070 timer_mod(gl->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10);
1071 }
1072 }
1073
virtio_gpu_virgl_fence_poll(VirtIOGPU * g)1074 void virtio_gpu_virgl_fence_poll(VirtIOGPU *g)
1075 {
1076 virtio_gpu_fence_poll(g);
1077 }
1078
virtio_gpu_virgl_reset_scanout(VirtIOGPU * g)1079 void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g)
1080 {
1081 int i;
1082
1083 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
1084 dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
1085 dpy_gl_scanout_disable(g->parent_obj.scanout[i].con);
1086 }
1087 }
1088
virtio_gpu_virgl_reset(VirtIOGPU * g)1089 void virtio_gpu_virgl_reset(VirtIOGPU *g)
1090 {
1091 virgl_renderer_reset();
1092 }
1093
virtio_gpu_virgl_init(VirtIOGPU * g)1094 int virtio_gpu_virgl_init(VirtIOGPU *g)
1095 {
1096 int ret;
1097 uint32_t flags = 0;
1098 VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
1099
1100 #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
1101 if (qemu_egl_display) {
1102 virtio_gpu_3d_cbs.version = 4;
1103 virtio_gpu_3d_cbs.get_egl_display = virgl_get_egl_display;
1104 }
1105 #endif
1106 #ifdef VIRGL_RENDERER_D3D11_SHARE_TEXTURE
1107 if (qemu_egl_angle_d3d) {
1108 flags |= VIRGL_RENDERER_D3D11_SHARE_TEXTURE;
1109 }
1110 #endif
1111 #if VIRGL_VERSION_MAJOR >= 1
1112 if (virtio_gpu_venus_enabled(g->parent_obj.conf)) {
1113 flags |= VIRGL_RENDERER_VENUS | VIRGL_RENDERER_RENDER_SERVER;
1114 }
1115 #endif
1116
1117 ret = virgl_renderer_init(g, flags, &virtio_gpu_3d_cbs);
1118 if (ret != 0) {
1119 error_report("virgl could not be initialized: %d", ret);
1120 return ret;
1121 }
1122
1123 gl->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL,
1124 virtio_gpu_fence_poll, g);
1125
1126 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1127 gl->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL,
1128 virtio_gpu_print_stats, g);
1129 timer_mod(gl->print_stats,
1130 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
1131 }
1132
1133 #if VIRGL_VERSION_MAJOR >= 1
1134 gl->cmdq_resume_bh = aio_bh_new(qemu_get_aio_context(),
1135 virtio_gpu_virgl_resume_cmdq_bh,
1136 g);
1137 #endif
1138
1139 return 0;
1140 }
1141
virtio_gpu_virgl_add_capset(GArray * capset_ids,uint32_t capset_id)1142 static void virtio_gpu_virgl_add_capset(GArray *capset_ids, uint32_t capset_id)
1143 {
1144 g_array_append_val(capset_ids, capset_id);
1145 }
1146
virtio_gpu_virgl_get_capsets(VirtIOGPU * g)1147 GArray *virtio_gpu_virgl_get_capsets(VirtIOGPU *g)
1148 {
1149 uint32_t capset_max_ver, capset_max_size;
1150 GArray *capset_ids;
1151
1152 capset_ids = g_array_new(false, false, sizeof(uint32_t));
1153
1154 /* VIRGL is always supported. */
1155 virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VIRGL);
1156
1157 virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2,
1158 &capset_max_ver,
1159 &capset_max_size);
1160 if (capset_max_ver) {
1161 virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VIRGL2);
1162 }
1163
1164 if (virtio_gpu_venus_enabled(g->parent_obj.conf)) {
1165 virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VENUS,
1166 &capset_max_ver,
1167 &capset_max_size);
1168 if (capset_max_size) {
1169 virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VENUS);
1170 }
1171 }
1172
1173 return capset_ids;
1174 }
1175