xref: /openbmc/qemu/hw/display/virtio-gpu-virgl.c (revision 9e4cc917)
1 /*
2  * Virtio GPU Device
3  *
4  * Copyright Red Hat, Inc. 2013-2014
5  *
6  * Authors:
7  *     Dave Airlie <airlied@redhat.com>
8  *     Gerd Hoffmann <kraxel@redhat.com>
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2 or later.
11  * See the COPYING file in the top-level directory.
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/error-report.h"
16 #include "qemu/iov.h"
17 #include "trace.h"
18 #include "hw/virtio/virtio.h"
19 #include "hw/virtio/virtio-gpu.h"
20 #include "hw/virtio/virtio-gpu-bswap.h"
21 #include "hw/virtio/virtio-gpu-pixman.h"
22 
23 #include "ui/egl-helpers.h"
24 
25 #include <virglrenderer.h>
26 
27 struct virtio_gpu_virgl_resource {
28     struct virtio_gpu_simple_resource base;
29     MemoryRegion *mr;
30 };
31 
32 static struct virtio_gpu_virgl_resource *
33 virtio_gpu_virgl_find_resource(VirtIOGPU *g, uint32_t resource_id)
34 {
35     struct virtio_gpu_simple_resource *res;
36 
37     res = virtio_gpu_find_resource(g, resource_id);
38     if (!res) {
39         return NULL;
40     }
41 
42     return container_of(res, struct virtio_gpu_virgl_resource, base);
43 }
44 
45 #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
46 static void *
47 virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
48 {
49     return qemu_egl_display;
50 }
51 #endif
52 
53 #if VIRGL_VERSION_MAJOR >= 1
54 struct virtio_gpu_virgl_hostmem_region {
55     MemoryRegion mr;
56     struct VirtIOGPU *g;
57     bool finish_unmapping;
58 };
59 
60 static struct virtio_gpu_virgl_hostmem_region *
61 to_hostmem_region(MemoryRegion *mr)
62 {
63     return container_of(mr, struct virtio_gpu_virgl_hostmem_region, mr);
64 }
65 
66 static void virtio_gpu_virgl_resume_cmdq_bh(void *opaque)
67 {
68     VirtIOGPU *g = opaque;
69 
70     virtio_gpu_process_cmdq(g);
71 }
72 
73 static void virtio_gpu_virgl_hostmem_region_free(void *obj)
74 {
75     MemoryRegion *mr = MEMORY_REGION(obj);
76     struct virtio_gpu_virgl_hostmem_region *vmr;
77     VirtIOGPUBase *b;
78     VirtIOGPUGL *gl;
79 
80     vmr = to_hostmem_region(mr);
81     vmr->finish_unmapping = true;
82 
83     b = VIRTIO_GPU_BASE(vmr->g);
84     b->renderer_blocked--;
85 
86     /*
87      * memory_region_unref() is executed from RCU thread context, while
88      * virglrenderer works only on the main-loop thread that's holding GL
89      * context.
90      */
91     gl = VIRTIO_GPU_GL(vmr->g);
92     qemu_bh_schedule(gl->cmdq_resume_bh);
93 }
94 
95 static int
96 virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
97                                    struct virtio_gpu_virgl_resource *res,
98                                    uint64_t offset)
99 {
100     struct virtio_gpu_virgl_hostmem_region *vmr;
101     VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
102     MemoryRegion *mr;
103     uint64_t size;
104     void *data;
105     int ret;
106 
107     if (!virtio_gpu_hostmem_enabled(b->conf)) {
108         qemu_log_mask(LOG_GUEST_ERROR, "%s: hostmem disabled\n", __func__);
109         return -EOPNOTSUPP;
110     }
111 
112     ret = virgl_renderer_resource_map(res->base.resource_id, &data, &size);
113     if (ret) {
114         qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map virgl resource: %s\n",
115                       __func__, strerror(-ret));
116         return ret;
117     }
118 
119     vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1);
120     vmr->g = g;
121 
122     mr = &vmr->mr;
123     memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data);
124     memory_region_add_subregion(&b->hostmem, offset, mr);
125     memory_region_set_enabled(mr, true);
126 
127     /*
128      * MR could outlive the resource if MR's reference is held outside of
129      * virtio-gpu. In order to prevent unmapping resource while MR is alive,
130      * and thus, making the data pointer invalid, we will block virtio-gpu
131      * command processing until MR is fully unreferenced and freed.
132      */
133     OBJECT(mr)->free = virtio_gpu_virgl_hostmem_region_free;
134 
135     res->mr = mr;
136 
137     return 0;
138 }
139 
140 static int
141 virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
142                                      struct virtio_gpu_virgl_resource *res,
143                                      bool *cmd_suspended)
144 {
145     struct virtio_gpu_virgl_hostmem_region *vmr;
146     VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
147     MemoryRegion *mr = res->mr;
148     int ret;
149 
150     if (!mr) {
151         return 0;
152     }
153 
154     vmr = to_hostmem_region(res->mr);
155 
156     /*
157      * Perform async unmapping in 3 steps:
158      *
159      * 1. Begin async unmapping with memory_region_del_subregion()
160      *    and suspend/block cmd processing.
161      * 2. Wait for res->mr to be freed and cmd processing resumed
162      *    asynchronously by virtio_gpu_virgl_hostmem_region_free().
163      * 3. Finish the unmapping with final virgl_renderer_resource_unmap().
164      */
165     if (vmr->finish_unmapping) {
166         res->mr = NULL;
167         g_free(vmr);
168 
169         ret = virgl_renderer_resource_unmap(res->base.resource_id);
170         if (ret) {
171             qemu_log_mask(LOG_GUEST_ERROR,
172                           "%s: failed to unmap virgl resource: %s\n",
173                           __func__, strerror(-ret));
174             return ret;
175         }
176     } else {
177         *cmd_suspended = true;
178 
179         /* render will be unblocked once MR is freed */
180         b->renderer_blocked++;
181 
182         /* memory region owns self res->mr object and frees it by itself */
183         memory_region_set_enabled(mr, false);
184         memory_region_del_subregion(&b->hostmem, mr);
185         object_unparent(OBJECT(mr));
186     }
187 
188     return 0;
189 }
190 #endif
191 
192 static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
193                                          struct virtio_gpu_ctrl_command *cmd)
194 {
195     struct virtio_gpu_resource_create_2d c2d;
196     struct virgl_renderer_resource_create_args args;
197     struct virtio_gpu_virgl_resource *res;
198 
199     VIRTIO_GPU_FILL_CMD(c2d);
200     trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
201                                        c2d.width, c2d.height);
202 
203     if (c2d.resource_id == 0) {
204         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
205                       __func__);
206         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
207         return;
208     }
209 
210     res = virtio_gpu_virgl_find_resource(g, c2d.resource_id);
211     if (res) {
212         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
213                       __func__, c2d.resource_id);
214         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
215         return;
216     }
217 
218     res = g_new0(struct virtio_gpu_virgl_resource, 1);
219     res->base.width = c2d.width;
220     res->base.height = c2d.height;
221     res->base.format = c2d.format;
222     res->base.resource_id = c2d.resource_id;
223     res->base.dmabuf_fd = -1;
224     QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
225 
226     args.handle = c2d.resource_id;
227     args.target = 2;
228     args.format = c2d.format;
229     args.bind = (1 << 1);
230     args.width = c2d.width;
231     args.height = c2d.height;
232     args.depth = 1;
233     args.array_size = 1;
234     args.last_level = 0;
235     args.nr_samples = 0;
236     args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP;
237     virgl_renderer_resource_create(&args, NULL, 0);
238 }
239 
240 static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
241                                          struct virtio_gpu_ctrl_command *cmd)
242 {
243     struct virtio_gpu_resource_create_3d c3d;
244     struct virgl_renderer_resource_create_args args;
245     struct virtio_gpu_virgl_resource *res;
246 
247     VIRTIO_GPU_FILL_CMD(c3d);
248     trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format,
249                                        c3d.width, c3d.height, c3d.depth);
250 
251     if (c3d.resource_id == 0) {
252         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
253                       __func__);
254         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
255         return;
256     }
257 
258     res = virtio_gpu_virgl_find_resource(g, c3d.resource_id);
259     if (res) {
260         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
261                       __func__, c3d.resource_id);
262         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
263         return;
264     }
265 
266     res = g_new0(struct virtio_gpu_virgl_resource, 1);
267     res->base.width = c3d.width;
268     res->base.height = c3d.height;
269     res->base.format = c3d.format;
270     res->base.resource_id = c3d.resource_id;
271     res->base.dmabuf_fd = -1;
272     QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
273 
274     args.handle = c3d.resource_id;
275     args.target = c3d.target;
276     args.format = c3d.format;
277     args.bind = c3d.bind;
278     args.width = c3d.width;
279     args.height = c3d.height;
280     args.depth = c3d.depth;
281     args.array_size = c3d.array_size;
282     args.last_level = c3d.last_level;
283     args.nr_samples = c3d.nr_samples;
284     args.flags = c3d.flags;
285     virgl_renderer_resource_create(&args, NULL, 0);
286 }
287 
288 static void virgl_cmd_resource_unref(VirtIOGPU *g,
289                                      struct virtio_gpu_ctrl_command *cmd,
290                                      bool *cmd_suspended)
291 {
292     struct virtio_gpu_resource_unref unref;
293     struct virtio_gpu_virgl_resource *res;
294     struct iovec *res_iovs = NULL;
295     int num_iovs = 0;
296 
297     VIRTIO_GPU_FILL_CMD(unref);
298     trace_virtio_gpu_cmd_res_unref(unref.resource_id);
299 
300     res = virtio_gpu_virgl_find_resource(g, unref.resource_id);
301     if (!res) {
302         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
303                       __func__, unref.resource_id);
304         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
305         return;
306     }
307 
308 #if VIRGL_VERSION_MAJOR >= 1
309     if (virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended)) {
310         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
311         return;
312     }
313     if (*cmd_suspended) {
314         return;
315     }
316 #endif
317 
318     virgl_renderer_resource_detach_iov(unref.resource_id,
319                                        &res_iovs,
320                                        &num_iovs);
321     if (res_iovs != NULL && num_iovs != 0) {
322         virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
323     }
324     virgl_renderer_resource_unref(unref.resource_id);
325 
326     QTAILQ_REMOVE(&g->reslist, &res->base, next);
327 
328     g_free(res);
329 }
330 
331 static void virgl_cmd_context_create(VirtIOGPU *g,
332                                      struct virtio_gpu_ctrl_command *cmd)
333 {
334     struct virtio_gpu_ctx_create cc;
335 
336     VIRTIO_GPU_FILL_CMD(cc);
337     trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id,
338                                     cc.debug_name);
339 
340     if (cc.context_init) {
341         if (!virtio_gpu_context_init_enabled(g->parent_obj.conf)) {
342             qemu_log_mask(LOG_GUEST_ERROR, "%s: context_init disabled",
343                           __func__);
344             cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
345             return;
346         }
347 
348 #if VIRGL_VERSION_MAJOR >= 1
349         virgl_renderer_context_create_with_flags(cc.hdr.ctx_id,
350                                                  cc.context_init,
351                                                  cc.nlen,
352                                                  cc.debug_name);
353         return;
354 #endif
355     }
356 
357     virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen, cc.debug_name);
358 }
359 
360 static void virgl_cmd_context_destroy(VirtIOGPU *g,
361                                       struct virtio_gpu_ctrl_command *cmd)
362 {
363     struct virtio_gpu_ctx_destroy cd;
364 
365     VIRTIO_GPU_FILL_CMD(cd);
366     trace_virtio_gpu_cmd_ctx_destroy(cd.hdr.ctx_id);
367 
368     virgl_renderer_context_destroy(cd.hdr.ctx_id);
369 }
370 
371 static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y,
372                                 int width, int height)
373 {
374     if (!g->parent_obj.scanout[idx].con) {
375         return;
376     }
377 
378     dpy_gl_update(g->parent_obj.scanout[idx].con, x, y, width, height);
379 }
380 
381 static void virgl_cmd_resource_flush(VirtIOGPU *g,
382                                      struct virtio_gpu_ctrl_command *cmd)
383 {
384     struct virtio_gpu_resource_flush rf;
385     int i;
386 
387     VIRTIO_GPU_FILL_CMD(rf);
388     trace_virtio_gpu_cmd_res_flush(rf.resource_id,
389                                    rf.r.width, rf.r.height, rf.r.x, rf.r.y);
390 
391     for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
392         if (g->parent_obj.scanout[i].resource_id != rf.resource_id) {
393             continue;
394         }
395         virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height);
396     }
397 }
398 
399 static void virgl_cmd_set_scanout(VirtIOGPU *g,
400                                   struct virtio_gpu_ctrl_command *cmd)
401 {
402     struct virtio_gpu_set_scanout ss;
403     int ret;
404 
405     VIRTIO_GPU_FILL_CMD(ss);
406     trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
407                                      ss.r.width, ss.r.height, ss.r.x, ss.r.y);
408 
409     if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
410         qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
411                       __func__, ss.scanout_id);
412         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
413         return;
414     }
415     g->parent_obj.enable = 1;
416 
417     if (ss.resource_id && ss.r.width && ss.r.height) {
418         struct virgl_renderer_resource_info info;
419         void *d3d_tex2d = NULL;
420 
421 #if VIRGL_VERSION_MAJOR >= 1
422         struct virgl_renderer_resource_info_ext ext;
423         memset(&ext, 0, sizeof(ext));
424         ret = virgl_renderer_resource_get_info_ext(ss.resource_id, &ext);
425         info = ext.base;
426         d3d_tex2d = ext.d3d_tex2d;
427 #else
428         memset(&info, 0, sizeof(info));
429         ret = virgl_renderer_resource_get_info(ss.resource_id, &info);
430 #endif
431         if (ret) {
432             qemu_log_mask(LOG_GUEST_ERROR,
433                           "%s: illegal resource specified %d\n",
434                           __func__, ss.resource_id);
435             cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
436             return;
437         }
438         qemu_console_resize(g->parent_obj.scanout[ss.scanout_id].con,
439                             ss.r.width, ss.r.height);
440         virgl_renderer_force_ctx_0();
441         dpy_gl_scanout_texture(
442             g->parent_obj.scanout[ss.scanout_id].con, info.tex_id,
443             info.flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP,
444             info.width, info.height,
445             ss.r.x, ss.r.y, ss.r.width, ss.r.height,
446             d3d_tex2d);
447     } else {
448         dpy_gfx_replace_surface(
449             g->parent_obj.scanout[ss.scanout_id].con, NULL);
450         dpy_gl_scanout_disable(g->parent_obj.scanout[ss.scanout_id].con);
451     }
452     g->parent_obj.scanout[ss.scanout_id].resource_id = ss.resource_id;
453 }
454 
455 static void virgl_cmd_submit_3d(VirtIOGPU *g,
456                                 struct virtio_gpu_ctrl_command *cmd)
457 {
458     struct virtio_gpu_cmd_submit cs;
459     void *buf;
460     size_t s;
461 
462     VIRTIO_GPU_FILL_CMD(cs);
463     trace_virtio_gpu_cmd_ctx_submit(cs.hdr.ctx_id, cs.size);
464 
465     buf = g_malloc(cs.size);
466     s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
467                    sizeof(cs), buf, cs.size);
468     if (s != cs.size) {
469         qemu_log_mask(LOG_GUEST_ERROR, "%s: size mismatch (%zd/%d)",
470                       __func__, s, cs.size);
471         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
472         goto out;
473     }
474 
475     if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
476         g->stats.req_3d++;
477         g->stats.bytes_3d += cs.size;
478     }
479 
480     virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4);
481 
482 out:
483     g_free(buf);
484 }
485 
486 static void virgl_cmd_transfer_to_host_2d(VirtIOGPU *g,
487                                           struct virtio_gpu_ctrl_command *cmd)
488 {
489     struct virtio_gpu_transfer_to_host_2d t2d;
490     struct virtio_gpu_box box;
491 
492     VIRTIO_GPU_FILL_CMD(t2d);
493     trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
494 
495     box.x = t2d.r.x;
496     box.y = t2d.r.y;
497     box.z = 0;
498     box.w = t2d.r.width;
499     box.h = t2d.r.height;
500     box.d = 1;
501 
502     virgl_renderer_transfer_write_iov(t2d.resource_id,
503                                       0,
504                                       0,
505                                       0,
506                                       0,
507                                       (struct virgl_box *)&box,
508                                       t2d.offset, NULL, 0);
509 }
510 
511 static void virgl_cmd_transfer_to_host_3d(VirtIOGPU *g,
512                                           struct virtio_gpu_ctrl_command *cmd)
513 {
514     struct virtio_gpu_transfer_host_3d t3d;
515 
516     VIRTIO_GPU_FILL_CMD(t3d);
517     trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d.resource_id);
518 
519     virgl_renderer_transfer_write_iov(t3d.resource_id,
520                                       t3d.hdr.ctx_id,
521                                       t3d.level,
522                                       t3d.stride,
523                                       t3d.layer_stride,
524                                       (struct virgl_box *)&t3d.box,
525                                       t3d.offset, NULL, 0);
526 }
527 
528 static void
529 virgl_cmd_transfer_from_host_3d(VirtIOGPU *g,
530                                 struct virtio_gpu_ctrl_command *cmd)
531 {
532     struct virtio_gpu_transfer_host_3d tf3d;
533 
534     VIRTIO_GPU_FILL_CMD(tf3d);
535     trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d.resource_id);
536 
537     virgl_renderer_transfer_read_iov(tf3d.resource_id,
538                                      tf3d.hdr.ctx_id,
539                                      tf3d.level,
540                                      tf3d.stride,
541                                      tf3d.layer_stride,
542                                      (struct virgl_box *)&tf3d.box,
543                                      tf3d.offset, NULL, 0);
544 }
545 
546 
547 static void virgl_resource_attach_backing(VirtIOGPU *g,
548                                           struct virtio_gpu_ctrl_command *cmd)
549 {
550     struct virtio_gpu_resource_attach_backing att_rb;
551     struct iovec *res_iovs;
552     uint32_t res_niov;
553     int ret;
554 
555     VIRTIO_GPU_FILL_CMD(att_rb);
556     trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id);
557 
558     ret = virtio_gpu_create_mapping_iov(g, att_rb.nr_entries, sizeof(att_rb),
559                                         cmd, NULL, &res_iovs, &res_niov);
560     if (ret != 0) {
561         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
562         return;
563     }
564 
565     ret = virgl_renderer_resource_attach_iov(att_rb.resource_id,
566                                              res_iovs, res_niov);
567 
568     if (ret != 0)
569         virtio_gpu_cleanup_mapping_iov(g, res_iovs, res_niov);
570 }
571 
572 static void virgl_resource_detach_backing(VirtIOGPU *g,
573                                           struct virtio_gpu_ctrl_command *cmd)
574 {
575     struct virtio_gpu_resource_detach_backing detach_rb;
576     struct iovec *res_iovs = NULL;
577     int num_iovs = 0;
578 
579     VIRTIO_GPU_FILL_CMD(detach_rb);
580     trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id);
581 
582     virgl_renderer_resource_detach_iov(detach_rb.resource_id,
583                                        &res_iovs,
584                                        &num_iovs);
585     if (res_iovs == NULL || num_iovs == 0) {
586         return;
587     }
588     virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
589 }
590 
591 
592 static void virgl_cmd_ctx_attach_resource(VirtIOGPU *g,
593                                           struct virtio_gpu_ctrl_command *cmd)
594 {
595     struct virtio_gpu_ctx_resource att_res;
596 
597     VIRTIO_GPU_FILL_CMD(att_res);
598     trace_virtio_gpu_cmd_ctx_res_attach(att_res.hdr.ctx_id,
599                                         att_res.resource_id);
600 
601     virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id);
602 }
603 
604 static void virgl_cmd_ctx_detach_resource(VirtIOGPU *g,
605                                           struct virtio_gpu_ctrl_command *cmd)
606 {
607     struct virtio_gpu_ctx_resource det_res;
608 
609     VIRTIO_GPU_FILL_CMD(det_res);
610     trace_virtio_gpu_cmd_ctx_res_detach(det_res.hdr.ctx_id,
611                                         det_res.resource_id);
612 
613     virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id);
614 }
615 
616 static void virgl_cmd_get_capset_info(VirtIOGPU *g,
617                                       struct virtio_gpu_ctrl_command *cmd)
618 {
619     struct virtio_gpu_get_capset_info info;
620     struct virtio_gpu_resp_capset_info resp;
621 
622     VIRTIO_GPU_FILL_CMD(info);
623 
624     memset(&resp, 0, sizeof(resp));
625 
626     if (info.capset_index < g->capset_ids->len) {
627         resp.capset_id = g_array_index(g->capset_ids, uint32_t,
628                                        info.capset_index);
629         virgl_renderer_get_cap_set(resp.capset_id,
630                                    &resp.capset_max_version,
631                                    &resp.capset_max_size);
632     }
633     resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO;
634     virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
635 }
636 
637 static void virgl_cmd_get_capset(VirtIOGPU *g,
638                                  struct virtio_gpu_ctrl_command *cmd)
639 {
640     struct virtio_gpu_get_capset gc;
641     struct virtio_gpu_resp_capset *resp;
642     uint32_t max_ver, max_size;
643     VIRTIO_GPU_FILL_CMD(gc);
644 
645     virgl_renderer_get_cap_set(gc.capset_id, &max_ver,
646                                &max_size);
647     if (!max_size) {
648         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
649         return;
650     }
651 
652     resp = g_malloc0(sizeof(*resp) + max_size);
653     resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
654     virgl_renderer_fill_caps(gc.capset_id,
655                              gc.capset_version,
656                              (void *)resp->capset_data);
657     virtio_gpu_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size);
658     g_free(resp);
659 }
660 
661 #if VIRGL_VERSION_MAJOR >= 1
662 static void virgl_cmd_resource_create_blob(VirtIOGPU *g,
663                                            struct virtio_gpu_ctrl_command *cmd)
664 {
665     struct virgl_renderer_resource_create_blob_args virgl_args = { 0 };
666     g_autofree struct virtio_gpu_virgl_resource *res = NULL;
667     struct virtio_gpu_resource_create_blob cblob;
668     struct virgl_renderer_resource_info info;
669     int ret;
670 
671     if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
672         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
673         return;
674     }
675 
676     VIRTIO_GPU_FILL_CMD(cblob);
677     virtio_gpu_create_blob_bswap(&cblob);
678     trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
679 
680     if (cblob.resource_id == 0) {
681         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
682                       __func__);
683         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
684         return;
685     }
686 
687     res = virtio_gpu_virgl_find_resource(g, cblob.resource_id);
688     if (res) {
689         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
690                       __func__, cblob.resource_id);
691         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
692         return;
693     }
694 
695     res = g_new0(struct virtio_gpu_virgl_resource, 1);
696     res->base.resource_id = cblob.resource_id;
697     res->base.blob_size = cblob.size;
698     res->base.dmabuf_fd = -1;
699 
700     if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
701         ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
702                                             cmd, &res->base.addrs,
703                                             &res->base.iov, &res->base.iov_cnt);
704         if (!ret) {
705             cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
706             return;
707         }
708     }
709 
710     virgl_args.res_handle = cblob.resource_id;
711     virgl_args.ctx_id = cblob.hdr.ctx_id;
712     virgl_args.blob_mem = cblob.blob_mem;
713     virgl_args.blob_id = cblob.blob_id;
714     virgl_args.blob_flags = cblob.blob_flags;
715     virgl_args.size = cblob.size;
716     virgl_args.iovecs = res->base.iov;
717     virgl_args.num_iovs = res->base.iov_cnt;
718 
719     ret = virgl_renderer_resource_create_blob(&virgl_args);
720     if (ret) {
721         qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n",
722                       __func__, strerror(-ret));
723         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
724         virtio_gpu_cleanup_mapping(g, &res->base);
725         return;
726     }
727 
728     ret = virgl_renderer_resource_get_info(cblob.resource_id, &info);
729     if (ret) {
730         qemu_log_mask(LOG_GUEST_ERROR,
731                       "%s: resource does not have info %d: %s\n",
732                       __func__, cblob.resource_id, strerror(-ret));
733         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
734         virtio_gpu_cleanup_mapping(g, &res->base);
735         virgl_renderer_resource_unref(cblob.resource_id);
736         return;
737     }
738 
739     res->base.dmabuf_fd = info.fd;
740 
741     QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
742     res = NULL;
743 }
744 
745 static void virgl_cmd_resource_map_blob(VirtIOGPU *g,
746                                         struct virtio_gpu_ctrl_command *cmd)
747 {
748     struct virtio_gpu_resource_map_blob mblob;
749     struct virtio_gpu_virgl_resource *res;
750     struct virtio_gpu_resp_map_info resp;
751     int ret;
752 
753     VIRTIO_GPU_FILL_CMD(mblob);
754     virtio_gpu_map_blob_bswap(&mblob);
755 
756     res = virtio_gpu_virgl_find_resource(g, mblob.resource_id);
757     if (!res) {
758         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
759                       __func__, mblob.resource_id);
760         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
761         return;
762     }
763 
764     ret = virtio_gpu_virgl_map_resource_blob(g, res, mblob.offset);
765     if (ret) {
766         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
767         return;
768     }
769 
770     memset(&resp, 0, sizeof(resp));
771     resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO;
772     virgl_renderer_resource_get_map_info(mblob.resource_id, &resp.map_info);
773     virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
774 }
775 
776 static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g,
777                                           struct virtio_gpu_ctrl_command *cmd,
778                                           bool *cmd_suspended)
779 {
780     struct virtio_gpu_resource_unmap_blob ublob;
781     struct virtio_gpu_virgl_resource *res;
782     int ret;
783 
784     VIRTIO_GPU_FILL_CMD(ublob);
785     virtio_gpu_unmap_blob_bswap(&ublob);
786 
787     res = virtio_gpu_virgl_find_resource(g, ublob.resource_id);
788     if (!res) {
789         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
790                       __func__, ublob.resource_id);
791         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
792         return;
793     }
794 
795     ret = virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended);
796     if (ret) {
797         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
798         return;
799     }
800 }
801 
802 static void virgl_cmd_set_scanout_blob(VirtIOGPU *g,
803                                        struct virtio_gpu_ctrl_command *cmd)
804 {
805     struct virtio_gpu_framebuffer fb = { 0 };
806     struct virtio_gpu_virgl_resource *res;
807     struct virtio_gpu_set_scanout_blob ss;
808     uint64_t fbend;
809 
810     VIRTIO_GPU_FILL_CMD(ss);
811     virtio_gpu_scanout_blob_bswap(&ss);
812     trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id,
813                                           ss.r.width, ss.r.height, ss.r.x,
814                                           ss.r.y);
815 
816     if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
817         qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
818                       __func__, ss.scanout_id);
819         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
820         return;
821     }
822 
823     if (ss.resource_id == 0) {
824         virtio_gpu_disable_scanout(g, ss.scanout_id);
825         return;
826     }
827 
828     if (ss.width < 16 ||
829         ss.height < 16 ||
830         ss.r.x + ss.r.width > ss.width ||
831         ss.r.y + ss.r.height > ss.height) {
832         qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
833                       " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
834                       __func__, ss.scanout_id, ss.resource_id,
835                       ss.r.x, ss.r.y, ss.r.width, ss.r.height,
836                       ss.width, ss.height);
837         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
838         return;
839     }
840 
841     res = virtio_gpu_virgl_find_resource(g, ss.resource_id);
842     if (!res) {
843         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
844                       __func__, ss.resource_id);
845         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
846         return;
847     }
848     if (res->base.dmabuf_fd < 0) {
849         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource not backed by dmabuf %d\n",
850                       __func__, ss.resource_id);
851         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
852         return;
853     }
854 
855     fb.format = virtio_gpu_get_pixman_format(ss.format);
856     if (!fb.format) {
857         qemu_log_mask(LOG_GUEST_ERROR, "%s: pixel format not supported %d\n",
858                       __func__, ss.format);
859         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
860         return;
861     }
862 
863     fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
864     fb.width = ss.width;
865     fb.height = ss.height;
866     fb.stride = ss.strides[0];
867     fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
868 
869     fbend = fb.offset;
870     fbend += fb.stride * (ss.r.height - 1);
871     fbend += fb.bytes_pp * ss.r.width;
872     if (fbend > res->base.blob_size) {
873         qemu_log_mask(LOG_GUEST_ERROR, "%s: fb end out of range\n",
874                       __func__);
875         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
876         return;
877     }
878 
879     g->parent_obj.enable = 1;
880     if (virtio_gpu_update_dmabuf(g, ss.scanout_id, &res->base, &fb, &ss.r)) {
881         qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to update dmabuf\n",
882                       __func__);
883         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
884         return;
885     }
886 
887     virtio_gpu_update_scanout(g, ss.scanout_id, &res->base, &fb, &ss.r);
888 }
889 #endif
890 
891 void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
892                                       struct virtio_gpu_ctrl_command *cmd)
893 {
894     bool cmd_suspended = false;
895 
896     VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
897 
898     virgl_renderer_force_ctx_0();
899     switch (cmd->cmd_hdr.type) {
900     case VIRTIO_GPU_CMD_CTX_CREATE:
901         virgl_cmd_context_create(g, cmd);
902         break;
903     case VIRTIO_GPU_CMD_CTX_DESTROY:
904         virgl_cmd_context_destroy(g, cmd);
905         break;
906     case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
907         virgl_cmd_create_resource_2d(g, cmd);
908         break;
909     case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D:
910         virgl_cmd_create_resource_3d(g, cmd);
911         break;
912     case VIRTIO_GPU_CMD_SUBMIT_3D:
913         virgl_cmd_submit_3d(g, cmd);
914         break;
915     case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
916         virgl_cmd_transfer_to_host_2d(g, cmd);
917         break;
918     case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D:
919         virgl_cmd_transfer_to_host_3d(g, cmd);
920         break;
921     case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D:
922         virgl_cmd_transfer_from_host_3d(g, cmd);
923         break;
924     case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
925         virgl_resource_attach_backing(g, cmd);
926         break;
927     case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
928         virgl_resource_detach_backing(g, cmd);
929         break;
930     case VIRTIO_GPU_CMD_SET_SCANOUT:
931         virgl_cmd_set_scanout(g, cmd);
932         break;
933     case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
934         virgl_cmd_resource_flush(g, cmd);
935         break;
936     case VIRTIO_GPU_CMD_RESOURCE_UNREF:
937         virgl_cmd_resource_unref(g, cmd, &cmd_suspended);
938         break;
939     case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
940         /* TODO add security */
941         virgl_cmd_ctx_attach_resource(g, cmd);
942         break;
943     case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE:
944         /* TODO add security */
945         virgl_cmd_ctx_detach_resource(g, cmd);
946         break;
947     case VIRTIO_GPU_CMD_GET_CAPSET_INFO:
948         virgl_cmd_get_capset_info(g, cmd);
949         break;
950     case VIRTIO_GPU_CMD_GET_CAPSET:
951         virgl_cmd_get_capset(g, cmd);
952         break;
953     case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
954         virtio_gpu_get_display_info(g, cmd);
955         break;
956     case VIRTIO_GPU_CMD_GET_EDID:
957         virtio_gpu_get_edid(g, cmd);
958         break;
959 #if VIRGL_VERSION_MAJOR >= 1
960     case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
961         virgl_cmd_resource_create_blob(g, cmd);
962         break;
963     case VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB:
964         virgl_cmd_resource_map_blob(g, cmd);
965         break;
966     case VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB:
967         virgl_cmd_resource_unmap_blob(g, cmd, &cmd_suspended);
968         break;
969     case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
970         virgl_cmd_set_scanout_blob(g, cmd);
971         break;
972 #endif
973     default:
974         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
975         break;
976     }
977 
978     if (cmd_suspended || cmd->finished) {
979         return;
980     }
981     if (cmd->error) {
982         fprintf(stderr, "%s: ctrl 0x%x, error 0x%x\n", __func__,
983                 cmd->cmd_hdr.type, cmd->error);
984         virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error);
985         return;
986     }
987     if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) {
988         virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
989         return;
990     }
991 
992     trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
993     virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
994 }
995 
996 static void virgl_write_fence(void *opaque, uint32_t fence)
997 {
998     VirtIOGPU *g = opaque;
999     struct virtio_gpu_ctrl_command *cmd, *tmp;
1000 
1001     QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
1002         /*
1003          * the guest can end up emitting fences out of order
1004          * so we should check all fenced cmds not just the first one.
1005          */
1006         if (cmd->cmd_hdr.fence_id > fence) {
1007             continue;
1008         }
1009         trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
1010         virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
1011         QTAILQ_REMOVE(&g->fenceq, cmd, next);
1012         g_free(cmd);
1013         g->inflight--;
1014         if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1015             trace_virtio_gpu_dec_inflight_fences(g->inflight);
1016         }
1017     }
1018 }
1019 
1020 static virgl_renderer_gl_context
1021 virgl_create_context(void *opaque, int scanout_idx,
1022                      struct virgl_renderer_gl_ctx_param *params)
1023 {
1024     VirtIOGPU *g = opaque;
1025     QEMUGLContext ctx;
1026     QEMUGLParams qparams;
1027 
1028     qparams.major_ver = params->major_ver;
1029     qparams.minor_ver = params->minor_ver;
1030 
1031     ctx = dpy_gl_ctx_create(g->parent_obj.scanout[scanout_idx].con, &qparams);
1032     return (virgl_renderer_gl_context)ctx;
1033 }
1034 
1035 static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx)
1036 {
1037     VirtIOGPU *g = opaque;
1038     QEMUGLContext qctx = (QEMUGLContext)ctx;
1039 
1040     dpy_gl_ctx_destroy(g->parent_obj.scanout[0].con, qctx);
1041 }
1042 
1043 static int virgl_make_context_current(void *opaque, int scanout_idx,
1044                                       virgl_renderer_gl_context ctx)
1045 {
1046     VirtIOGPU *g = opaque;
1047     QEMUGLContext qctx = (QEMUGLContext)ctx;
1048 
1049     return dpy_gl_ctx_make_current(g->parent_obj.scanout[scanout_idx].con,
1050                                    qctx);
1051 }
1052 
1053 static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = {
1054     .version             = 1,
1055     .write_fence         = virgl_write_fence,
1056     .create_gl_context   = virgl_create_context,
1057     .destroy_gl_context  = virgl_destroy_context,
1058     .make_current        = virgl_make_context_current,
1059 };
1060 
1061 static void virtio_gpu_print_stats(void *opaque)
1062 {
1063     VirtIOGPU *g = opaque;
1064     VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
1065 
1066     if (g->stats.requests) {
1067         fprintf(stderr, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n",
1068                 g->stats.requests,
1069                 g->stats.max_inflight,
1070                 g->stats.req_3d,
1071                 g->stats.bytes_3d);
1072         g->stats.requests     = 0;
1073         g->stats.max_inflight = 0;
1074         g->stats.req_3d       = 0;
1075         g->stats.bytes_3d     = 0;
1076     } else {
1077         fprintf(stderr, "stats: idle\r");
1078     }
1079     timer_mod(gl->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
1080 }
1081 
1082 static void virtio_gpu_fence_poll(void *opaque)
1083 {
1084     VirtIOGPU *g = opaque;
1085     VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
1086 
1087     virgl_renderer_poll();
1088     virtio_gpu_process_cmdq(g);
1089     if (!QTAILQ_EMPTY(&g->cmdq) || !QTAILQ_EMPTY(&g->fenceq)) {
1090         timer_mod(gl->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10);
1091     }
1092 }
1093 
1094 void virtio_gpu_virgl_fence_poll(VirtIOGPU *g)
1095 {
1096     virtio_gpu_fence_poll(g);
1097 }
1098 
1099 void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g)
1100 {
1101     int i;
1102 
1103     for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
1104         dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
1105         dpy_gl_scanout_disable(g->parent_obj.scanout[i].con);
1106     }
1107 }
1108 
1109 void virtio_gpu_virgl_reset(VirtIOGPU *g)
1110 {
1111     virgl_renderer_reset();
1112 }
1113 
1114 int virtio_gpu_virgl_init(VirtIOGPU *g)
1115 {
1116     int ret;
1117     uint32_t flags = 0;
1118     VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
1119 
1120 #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
1121     if (qemu_egl_display) {
1122         virtio_gpu_3d_cbs.version = 4;
1123         virtio_gpu_3d_cbs.get_egl_display = virgl_get_egl_display;
1124     }
1125 #endif
1126 #ifdef VIRGL_RENDERER_D3D11_SHARE_TEXTURE
1127     if (qemu_egl_angle_d3d) {
1128         flags |= VIRGL_RENDERER_D3D11_SHARE_TEXTURE;
1129     }
1130 #endif
1131 #if VIRGL_VERSION_MAJOR >= 1
1132     if (virtio_gpu_venus_enabled(g->parent_obj.conf)) {
1133         flags |= VIRGL_RENDERER_VENUS | VIRGL_RENDERER_RENDER_SERVER;
1134     }
1135 #endif
1136 
1137     ret = virgl_renderer_init(g, flags, &virtio_gpu_3d_cbs);
1138     if (ret != 0) {
1139         error_report("virgl could not be initialized: %d", ret);
1140         return ret;
1141     }
1142 
1143     gl->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL,
1144                                   virtio_gpu_fence_poll, g);
1145 
1146     if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1147         gl->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL,
1148                                        virtio_gpu_print_stats, g);
1149         timer_mod(gl->print_stats,
1150                   qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
1151     }
1152 
1153 #if VIRGL_VERSION_MAJOR >= 1
1154     gl->cmdq_resume_bh = aio_bh_new(qemu_get_aio_context(),
1155                                     virtio_gpu_virgl_resume_cmdq_bh,
1156                                     g);
1157 #endif
1158 
1159     return 0;
1160 }
1161 
1162 static void virtio_gpu_virgl_add_capset(GArray *capset_ids, uint32_t capset_id)
1163 {
1164     g_array_append_val(capset_ids, capset_id);
1165 }
1166 
1167 GArray *virtio_gpu_virgl_get_capsets(VirtIOGPU *g)
1168 {
1169     uint32_t capset_max_ver, capset_max_size;
1170     GArray *capset_ids;
1171 
1172     capset_ids = g_array_new(false, false, sizeof(uint32_t));
1173 
1174     /* VIRGL is always supported. */
1175     virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VIRGL);
1176 
1177     virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2,
1178                                &capset_max_ver,
1179                                &capset_max_size);
1180     if (capset_max_ver) {
1181         virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VIRGL2);
1182     }
1183 
1184     if (virtio_gpu_venus_enabled(g->parent_obj.conf)) {
1185         virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VENUS,
1186                                    &capset_max_ver,
1187                                    &capset_max_size);
1188         if (capset_max_size) {
1189             virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VENUS);
1190         }
1191     }
1192 
1193     return capset_ids;
1194 }
1195