xref: /openbmc/qemu/hw/display/virtio-gpu-virgl.c (revision e5628119e117b0dd6b14e6c398b219c5113a0970)
1 /*
2  * Virtio GPU Device
3  *
4  * Copyright Red Hat, Inc. 2013-2014
5  *
6  * Authors:
7  *     Dave Airlie <airlied@redhat.com>
8  *     Gerd Hoffmann <kraxel@redhat.com>
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2 or later.
11  * See the COPYING file in the top-level directory.
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/error-report.h"
16 #include "qemu/iov.h"
17 #include "trace.h"
18 #include "hw/virtio/virtio.h"
19 #include "hw/virtio/virtio-gpu.h"
20 #include "hw/virtio/virtio-gpu-bswap.h"
21 #include "hw/virtio/virtio-gpu-pixman.h"
22 
23 #include "ui/egl-helpers.h"
24 
25 #include <virglrenderer.h>
26 
27 struct virtio_gpu_virgl_resource {
28     struct virtio_gpu_simple_resource base;
29     MemoryRegion *mr;
30 };
31 
32 static struct virtio_gpu_virgl_resource *
virtio_gpu_virgl_find_resource(VirtIOGPU * g,uint32_t resource_id)33 virtio_gpu_virgl_find_resource(VirtIOGPU *g, uint32_t resource_id)
34 {
35     struct virtio_gpu_simple_resource *res;
36 
37     res = virtio_gpu_find_resource(g, resource_id);
38     if (!res) {
39         return NULL;
40     }
41 
42     return container_of(res, struct virtio_gpu_virgl_resource, base);
43 }
44 
45 #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
46 static void *
virgl_get_egl_display(G_GNUC_UNUSED void * cookie)47 virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
48 {
49     return qemu_egl_display;
50 }
51 #endif
52 
53 #if VIRGL_VERSION_MAJOR >= 1
54 struct virtio_gpu_virgl_hostmem_region {
55     MemoryRegion mr;
56     struct VirtIOGPU *g;
57     bool finish_unmapping;
58 };
59 
60 static struct virtio_gpu_virgl_hostmem_region *
to_hostmem_region(MemoryRegion * mr)61 to_hostmem_region(MemoryRegion *mr)
62 {
63     return container_of(mr, struct virtio_gpu_virgl_hostmem_region, mr);
64 }
65 
virtio_gpu_virgl_resume_cmdq_bh(void * opaque)66 static void virtio_gpu_virgl_resume_cmdq_bh(void *opaque)
67 {
68     VirtIOGPU *g = opaque;
69 
70     virtio_gpu_process_cmdq(g);
71 }
72 
virtio_gpu_virgl_hostmem_region_free(void * obj)73 static void virtio_gpu_virgl_hostmem_region_free(void *obj)
74 {
75     MemoryRegion *mr = MEMORY_REGION(obj);
76     struct virtio_gpu_virgl_hostmem_region *vmr;
77     VirtIOGPUBase *b;
78     VirtIOGPUGL *gl;
79 
80     vmr = to_hostmem_region(mr);
81     vmr->finish_unmapping = true;
82 
83     b = VIRTIO_GPU_BASE(vmr->g);
84     b->renderer_blocked--;
85 
86     /*
87      * memory_region_unref() is executed from RCU thread context, while
88      * virglrenderer works only on the main-loop thread that's holding GL
89      * context.
90      */
91     gl = VIRTIO_GPU_GL(vmr->g);
92     qemu_bh_schedule(gl->cmdq_resume_bh);
93 }
94 
95 static int
virtio_gpu_virgl_map_resource_blob(VirtIOGPU * g,struct virtio_gpu_virgl_resource * res,uint64_t offset)96 virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
97                                    struct virtio_gpu_virgl_resource *res,
98                                    uint64_t offset)
99 {
100     struct virtio_gpu_virgl_hostmem_region *vmr;
101     VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
102     MemoryRegion *mr;
103     uint64_t size;
104     void *data;
105     int ret;
106 
107     if (!virtio_gpu_hostmem_enabled(b->conf)) {
108         qemu_log_mask(LOG_GUEST_ERROR, "%s: hostmem disabled\n", __func__);
109         return -EOPNOTSUPP;
110     }
111 
112     ret = virgl_renderer_resource_map(res->base.resource_id, &data, &size);
113     if (ret) {
114         qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map virgl resource: %s\n",
115                       __func__, strerror(-ret));
116         return ret;
117     }
118 
119     vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1);
120     vmr->g = g;
121 
122     mr = &vmr->mr;
123     memory_region_init_ram_ptr(mr, OBJECT(mr), NULL, size, data);
124     memory_region_add_subregion(&b->hostmem, offset, mr);
125     memory_region_set_enabled(mr, true);
126 
127     /*
128      * MR could outlive the resource if MR's reference is held outside of
129      * virtio-gpu. In order to prevent unmapping resource while MR is alive,
130      * and thus, making the data pointer invalid, we will block virtio-gpu
131      * command processing until MR is fully unreferenced and freed.
132      */
133     OBJECT(mr)->free = virtio_gpu_virgl_hostmem_region_free;
134 
135     res->mr = mr;
136 
137     trace_virtio_gpu_cmd_res_map_blob(res->base.resource_id, vmr, mr);
138 
139     return 0;
140 }
141 
142 static int
virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU * g,struct virtio_gpu_virgl_resource * res,bool * cmd_suspended)143 virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
144                                      struct virtio_gpu_virgl_resource *res,
145                                      bool *cmd_suspended)
146 {
147     struct virtio_gpu_virgl_hostmem_region *vmr;
148     VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
149     MemoryRegion *mr = res->mr;
150     int ret;
151 
152     if (!mr) {
153         return 0;
154     }
155 
156     vmr = to_hostmem_region(res->mr);
157 
158     trace_virtio_gpu_cmd_res_unmap_blob(res->base.resource_id, mr, vmr->finish_unmapping);
159 
160     /*
161      * Perform async unmapping in 3 steps:
162      *
163      * 1. Begin async unmapping with memory_region_del_subregion()
164      *    and suspend/block cmd processing.
165      * 2. Wait for res->mr to be freed and cmd processing resumed
166      *    asynchronously by virtio_gpu_virgl_hostmem_region_free().
167      * 3. Finish the unmapping with final virgl_renderer_resource_unmap().
168      */
169     if (vmr->finish_unmapping) {
170         res->mr = NULL;
171         g_free(vmr);
172 
173         ret = virgl_renderer_resource_unmap(res->base.resource_id);
174         if (ret) {
175             qemu_log_mask(LOG_GUEST_ERROR,
176                           "%s: failed to unmap virgl resource: %s\n",
177                           __func__, strerror(-ret));
178             return ret;
179         }
180     } else {
181         *cmd_suspended = true;
182 
183         /* render will be unblocked once MR is freed */
184         b->renderer_blocked++;
185 
186         /* memory region owns self res->mr object and frees it by itself */
187         memory_region_set_enabled(mr, false);
188         memory_region_del_subregion(&b->hostmem, mr);
189         object_unref(OBJECT(mr));
190     }
191 
192     return 0;
193 }
194 #endif
195 
virgl_cmd_create_resource_2d(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)196 static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
197                                          struct virtio_gpu_ctrl_command *cmd)
198 {
199     struct virtio_gpu_resource_create_2d c2d;
200     struct virgl_renderer_resource_create_args args;
201     struct virtio_gpu_virgl_resource *res;
202 
203     VIRTIO_GPU_FILL_CMD(c2d);
204     trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
205                                        c2d.width, c2d.height);
206 
207     if (c2d.resource_id == 0) {
208         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
209                       __func__);
210         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
211         return;
212     }
213 
214     res = virtio_gpu_virgl_find_resource(g, c2d.resource_id);
215     if (res) {
216         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
217                       __func__, c2d.resource_id);
218         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
219         return;
220     }
221 
222     res = g_new0(struct virtio_gpu_virgl_resource, 1);
223     res->base.width = c2d.width;
224     res->base.height = c2d.height;
225     res->base.format = c2d.format;
226     res->base.resource_id = c2d.resource_id;
227     res->base.dmabuf_fd = -1;
228     QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
229 
230     args.handle = c2d.resource_id;
231     args.target = 2;
232     args.format = c2d.format;
233     args.bind = (1 << 1);
234     args.width = c2d.width;
235     args.height = c2d.height;
236     args.depth = 1;
237     args.array_size = 1;
238     args.last_level = 0;
239     args.nr_samples = 0;
240     args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP;
241     virgl_renderer_resource_create(&args, NULL, 0);
242 }
243 
virgl_cmd_create_resource_3d(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)244 static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
245                                          struct virtio_gpu_ctrl_command *cmd)
246 {
247     struct virtio_gpu_resource_create_3d c3d;
248     struct virgl_renderer_resource_create_args args;
249     struct virtio_gpu_virgl_resource *res;
250 
251     VIRTIO_GPU_FILL_CMD(c3d);
252     trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format,
253                                        c3d.width, c3d.height, c3d.depth);
254 
255     if (c3d.resource_id == 0) {
256         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
257                       __func__);
258         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
259         return;
260     }
261 
262     res = virtio_gpu_virgl_find_resource(g, c3d.resource_id);
263     if (res) {
264         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
265                       __func__, c3d.resource_id);
266         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
267         return;
268     }
269 
270     res = g_new0(struct virtio_gpu_virgl_resource, 1);
271     res->base.width = c3d.width;
272     res->base.height = c3d.height;
273     res->base.format = c3d.format;
274     res->base.resource_id = c3d.resource_id;
275     res->base.dmabuf_fd = -1;
276     QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
277 
278     args.handle = c3d.resource_id;
279     args.target = c3d.target;
280     args.format = c3d.format;
281     args.bind = c3d.bind;
282     args.width = c3d.width;
283     args.height = c3d.height;
284     args.depth = c3d.depth;
285     args.array_size = c3d.array_size;
286     args.last_level = c3d.last_level;
287     args.nr_samples = c3d.nr_samples;
288     args.flags = c3d.flags;
289     virgl_renderer_resource_create(&args, NULL, 0);
290 }
291 
virgl_cmd_resource_unref(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd,bool * cmd_suspended)292 static void virgl_cmd_resource_unref(VirtIOGPU *g,
293                                      struct virtio_gpu_ctrl_command *cmd,
294                                      bool *cmd_suspended)
295 {
296     struct virtio_gpu_resource_unref unref;
297     struct virtio_gpu_virgl_resource *res;
298     struct iovec *res_iovs = NULL;
299     int num_iovs = 0;
300 
301     VIRTIO_GPU_FILL_CMD(unref);
302     trace_virtio_gpu_cmd_res_unref(unref.resource_id);
303 
304     res = virtio_gpu_virgl_find_resource(g, unref.resource_id);
305     if (!res) {
306         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
307                       __func__, unref.resource_id);
308         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
309         return;
310     }
311 
312 #if VIRGL_VERSION_MAJOR >= 1
313     if (virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended)) {
314         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
315         return;
316     }
317     if (*cmd_suspended) {
318         return;
319     }
320 #endif
321 
322     virgl_renderer_resource_detach_iov(unref.resource_id,
323                                        &res_iovs,
324                                        &num_iovs);
325     if (res_iovs != NULL && num_iovs != 0) {
326         virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
327     }
328     virgl_renderer_resource_unref(unref.resource_id);
329 
330     QTAILQ_REMOVE(&g->reslist, &res->base, next);
331 
332     g_free(res);
333 }
334 
virgl_cmd_context_create(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)335 static void virgl_cmd_context_create(VirtIOGPU *g,
336                                      struct virtio_gpu_ctrl_command *cmd)
337 {
338     struct virtio_gpu_ctx_create cc;
339 
340     VIRTIO_GPU_FILL_CMD(cc);
341     trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id,
342                                     cc.debug_name);
343 
344     if (cc.context_init) {
345         if (!virtio_gpu_context_init_enabled(g->parent_obj.conf)) {
346             qemu_log_mask(LOG_GUEST_ERROR, "%s: context_init disabled",
347                           __func__);
348             cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
349             return;
350         }
351 
352 #if VIRGL_VERSION_MAJOR >= 1
353         virgl_renderer_context_create_with_flags(cc.hdr.ctx_id,
354                                                  cc.context_init,
355                                                  cc.nlen,
356                                                  cc.debug_name);
357         return;
358 #endif
359     }
360 
361     virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen, cc.debug_name);
362 }
363 
virgl_cmd_context_destroy(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)364 static void virgl_cmd_context_destroy(VirtIOGPU *g,
365                                       struct virtio_gpu_ctrl_command *cmd)
366 {
367     struct virtio_gpu_ctx_destroy cd;
368 
369     VIRTIO_GPU_FILL_CMD(cd);
370     trace_virtio_gpu_cmd_ctx_destroy(cd.hdr.ctx_id);
371 
372     virgl_renderer_context_destroy(cd.hdr.ctx_id);
373 }
374 
virtio_gpu_rect_update(VirtIOGPU * g,int idx,int x,int y,int width,int height)375 static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y,
376                                 int width, int height)
377 {
378     if (!g->parent_obj.scanout[idx].con) {
379         return;
380     }
381 
382     dpy_gl_update(g->parent_obj.scanout[idx].con, x, y, width, height);
383 }
384 
virgl_cmd_resource_flush(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)385 static void virgl_cmd_resource_flush(VirtIOGPU *g,
386                                      struct virtio_gpu_ctrl_command *cmd)
387 {
388     struct virtio_gpu_resource_flush rf;
389     int i;
390 
391     VIRTIO_GPU_FILL_CMD(rf);
392     trace_virtio_gpu_cmd_res_flush(rf.resource_id,
393                                    rf.r.width, rf.r.height, rf.r.x, rf.r.y);
394 
395     for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
396         if (g->parent_obj.scanout[i].resource_id != rf.resource_id) {
397             continue;
398         }
399         virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height);
400     }
401 }
402 
virgl_cmd_set_scanout(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)403 static void virgl_cmd_set_scanout(VirtIOGPU *g,
404                                   struct virtio_gpu_ctrl_command *cmd)
405 {
406     struct virtio_gpu_set_scanout ss;
407     int ret;
408 
409     VIRTIO_GPU_FILL_CMD(ss);
410     trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
411                                      ss.r.width, ss.r.height, ss.r.x, ss.r.y);
412 
413     if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
414         qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
415                       __func__, ss.scanout_id);
416         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
417         return;
418     }
419     g->parent_obj.enable = 1;
420 
421     if (ss.resource_id && ss.r.width && ss.r.height) {
422         struct virgl_renderer_resource_info info;
423         void *d3d_tex2d = NULL;
424 
425 #if VIRGL_VERSION_MAJOR >= 1
426         struct virgl_renderer_resource_info_ext ext;
427         memset(&ext, 0, sizeof(ext));
428         ret = virgl_renderer_resource_get_info_ext(ss.resource_id, &ext);
429         info = ext.base;
430         d3d_tex2d = ext.d3d_tex2d;
431 #else
432         memset(&info, 0, sizeof(info));
433         ret = virgl_renderer_resource_get_info(ss.resource_id, &info);
434 #endif
435         if (ret) {
436             qemu_log_mask(LOG_GUEST_ERROR,
437                           "%s: illegal resource specified %d\n",
438                           __func__, ss.resource_id);
439             cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
440             return;
441         }
442         qemu_console_resize(g->parent_obj.scanout[ss.scanout_id].con,
443                             ss.r.width, ss.r.height);
444         virgl_renderer_force_ctx_0();
445         dpy_gl_scanout_texture(
446             g->parent_obj.scanout[ss.scanout_id].con, info.tex_id,
447             info.flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP,
448             info.width, info.height,
449             ss.r.x, ss.r.y, ss.r.width, ss.r.height,
450             d3d_tex2d);
451     } else {
452         dpy_gfx_replace_surface(
453             g->parent_obj.scanout[ss.scanout_id].con, NULL);
454         dpy_gl_scanout_disable(g->parent_obj.scanout[ss.scanout_id].con);
455     }
456     g->parent_obj.scanout[ss.scanout_id].resource_id = ss.resource_id;
457 }
458 
virgl_cmd_submit_3d(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)459 static void virgl_cmd_submit_3d(VirtIOGPU *g,
460                                 struct virtio_gpu_ctrl_command *cmd)
461 {
462     struct virtio_gpu_cmd_submit cs;
463     void *buf;
464     size_t s;
465 
466     VIRTIO_GPU_FILL_CMD(cs);
467     trace_virtio_gpu_cmd_ctx_submit(cs.hdr.ctx_id, cs.size);
468 
469     buf = g_malloc(cs.size);
470     s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
471                    sizeof(cs), buf, cs.size);
472     if (s != cs.size) {
473         qemu_log_mask(LOG_GUEST_ERROR, "%s: size mismatch (%zd/%d)",
474                       __func__, s, cs.size);
475         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
476         goto out;
477     }
478 
479     if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
480         g->stats.req_3d++;
481         g->stats.bytes_3d += cs.size;
482     }
483 
484     virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4);
485 
486 out:
487     g_free(buf);
488 }
489 
virgl_cmd_transfer_to_host_2d(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)490 static void virgl_cmd_transfer_to_host_2d(VirtIOGPU *g,
491                                           struct virtio_gpu_ctrl_command *cmd)
492 {
493     struct virtio_gpu_transfer_to_host_2d t2d;
494     struct virtio_gpu_box box;
495 
496     VIRTIO_GPU_FILL_CMD(t2d);
497     trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
498 
499     box.x = t2d.r.x;
500     box.y = t2d.r.y;
501     box.z = 0;
502     box.w = t2d.r.width;
503     box.h = t2d.r.height;
504     box.d = 1;
505 
506     virgl_renderer_transfer_write_iov(t2d.resource_id,
507                                       0,
508                                       0,
509                                       0,
510                                       0,
511                                       (struct virgl_box *)&box,
512                                       t2d.offset, NULL, 0);
513 }
514 
virgl_cmd_transfer_to_host_3d(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)515 static void virgl_cmd_transfer_to_host_3d(VirtIOGPU *g,
516                                           struct virtio_gpu_ctrl_command *cmd)
517 {
518     struct virtio_gpu_transfer_host_3d t3d;
519 
520     VIRTIO_GPU_FILL_CMD(t3d);
521     trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d.resource_id);
522 
523     virgl_renderer_transfer_write_iov(t3d.resource_id,
524                                       t3d.hdr.ctx_id,
525                                       t3d.level,
526                                       t3d.stride,
527                                       t3d.layer_stride,
528                                       (struct virgl_box *)&t3d.box,
529                                       t3d.offset, NULL, 0);
530 }
531 
532 static void
virgl_cmd_transfer_from_host_3d(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)533 virgl_cmd_transfer_from_host_3d(VirtIOGPU *g,
534                                 struct virtio_gpu_ctrl_command *cmd)
535 {
536     struct virtio_gpu_transfer_host_3d tf3d;
537 
538     VIRTIO_GPU_FILL_CMD(tf3d);
539     trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d.resource_id);
540 
541     virgl_renderer_transfer_read_iov(tf3d.resource_id,
542                                      tf3d.hdr.ctx_id,
543                                      tf3d.level,
544                                      tf3d.stride,
545                                      tf3d.layer_stride,
546                                      (struct virgl_box *)&tf3d.box,
547                                      tf3d.offset, NULL, 0);
548 }
549 
550 
virgl_resource_attach_backing(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)551 static void virgl_resource_attach_backing(VirtIOGPU *g,
552                                           struct virtio_gpu_ctrl_command *cmd)
553 {
554     struct virtio_gpu_resource_attach_backing att_rb;
555     struct iovec *res_iovs;
556     uint32_t res_niov;
557     int ret;
558 
559     VIRTIO_GPU_FILL_CMD(att_rb);
560     trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id);
561 
562     ret = virtio_gpu_create_mapping_iov(g, att_rb.nr_entries, sizeof(att_rb),
563                                         cmd, NULL, &res_iovs, &res_niov);
564     if (ret != 0) {
565         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
566         return;
567     }
568 
569     ret = virgl_renderer_resource_attach_iov(att_rb.resource_id,
570                                              res_iovs, res_niov);
571 
572     if (ret != 0)
573         virtio_gpu_cleanup_mapping_iov(g, res_iovs, res_niov);
574 }
575 
virgl_resource_detach_backing(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)576 static void virgl_resource_detach_backing(VirtIOGPU *g,
577                                           struct virtio_gpu_ctrl_command *cmd)
578 {
579     struct virtio_gpu_resource_detach_backing detach_rb;
580     struct iovec *res_iovs = NULL;
581     int num_iovs = 0;
582 
583     VIRTIO_GPU_FILL_CMD(detach_rb);
584     trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id);
585 
586     virgl_renderer_resource_detach_iov(detach_rb.resource_id,
587                                        &res_iovs,
588                                        &num_iovs);
589     if (res_iovs == NULL || num_iovs == 0) {
590         return;
591     }
592     virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
593 }
594 
595 
virgl_cmd_ctx_attach_resource(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)596 static void virgl_cmd_ctx_attach_resource(VirtIOGPU *g,
597                                           struct virtio_gpu_ctrl_command *cmd)
598 {
599     struct virtio_gpu_ctx_resource att_res;
600 
601     VIRTIO_GPU_FILL_CMD(att_res);
602     trace_virtio_gpu_cmd_ctx_res_attach(att_res.hdr.ctx_id,
603                                         att_res.resource_id);
604 
605     virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id);
606 }
607 
virgl_cmd_ctx_detach_resource(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)608 static void virgl_cmd_ctx_detach_resource(VirtIOGPU *g,
609                                           struct virtio_gpu_ctrl_command *cmd)
610 {
611     struct virtio_gpu_ctx_resource det_res;
612 
613     VIRTIO_GPU_FILL_CMD(det_res);
614     trace_virtio_gpu_cmd_ctx_res_detach(det_res.hdr.ctx_id,
615                                         det_res.resource_id);
616 
617     virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id);
618 }
619 
virgl_cmd_get_capset_info(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)620 static void virgl_cmd_get_capset_info(VirtIOGPU *g,
621                                       struct virtio_gpu_ctrl_command *cmd)
622 {
623     struct virtio_gpu_get_capset_info info;
624     struct virtio_gpu_resp_capset_info resp;
625 
626     VIRTIO_GPU_FILL_CMD(info);
627 
628     memset(&resp, 0, sizeof(resp));
629 
630     if (info.capset_index < g->capset_ids->len) {
631         resp.capset_id = g_array_index(g->capset_ids, uint32_t,
632                                        info.capset_index);
633         virgl_renderer_get_cap_set(resp.capset_id,
634                                    &resp.capset_max_version,
635                                    &resp.capset_max_size);
636     }
637     resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO;
638     virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
639 }
640 
virgl_cmd_get_capset(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)641 static void virgl_cmd_get_capset(VirtIOGPU *g,
642                                  struct virtio_gpu_ctrl_command *cmd)
643 {
644     struct virtio_gpu_get_capset gc;
645     struct virtio_gpu_resp_capset *resp;
646     uint32_t max_ver, max_size;
647     VIRTIO_GPU_FILL_CMD(gc);
648 
649     virgl_renderer_get_cap_set(gc.capset_id, &max_ver,
650                                &max_size);
651     if (!max_size) {
652         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
653         return;
654     }
655 
656     resp = g_malloc0(sizeof(*resp) + max_size);
657     resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
658     virgl_renderer_fill_caps(gc.capset_id,
659                              gc.capset_version,
660                              (void *)resp->capset_data);
661     virtio_gpu_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size);
662     g_free(resp);
663 }
664 
665 #if VIRGL_VERSION_MAJOR >= 1
virgl_cmd_resource_create_blob(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)666 static void virgl_cmd_resource_create_blob(VirtIOGPU *g,
667                                            struct virtio_gpu_ctrl_command *cmd)
668 {
669     struct virgl_renderer_resource_create_blob_args virgl_args = { 0 };
670     g_autofree struct virtio_gpu_virgl_resource *res = NULL;
671     struct virtio_gpu_resource_create_blob cblob;
672     struct virgl_renderer_resource_info info;
673     int ret;
674 
675     if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
676         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
677         return;
678     }
679 
680     VIRTIO_GPU_FILL_CMD(cblob);
681     virtio_gpu_create_blob_bswap(&cblob);
682     trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
683 
684     if (cblob.resource_id == 0) {
685         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
686                       __func__);
687         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
688         return;
689     }
690 
691     res = virtio_gpu_virgl_find_resource(g, cblob.resource_id);
692     if (res) {
693         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
694                       __func__, cblob.resource_id);
695         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
696         return;
697     }
698 
699     res = g_new0(struct virtio_gpu_virgl_resource, 1);
700     res->base.resource_id = cblob.resource_id;
701     res->base.blob_size = cblob.size;
702     res->base.dmabuf_fd = -1;
703 
704     if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
705         ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
706                                             cmd, &res->base.addrs,
707                                             &res->base.iov, &res->base.iov_cnt);
708         if (ret != 0) {
709             cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
710             return;
711         }
712     }
713 
714     virgl_args.res_handle = cblob.resource_id;
715     virgl_args.ctx_id = cblob.hdr.ctx_id;
716     virgl_args.blob_mem = cblob.blob_mem;
717     virgl_args.blob_id = cblob.blob_id;
718     virgl_args.blob_flags = cblob.blob_flags;
719     virgl_args.size = cblob.size;
720     virgl_args.iovecs = res->base.iov;
721     virgl_args.num_iovs = res->base.iov_cnt;
722 
723     ret = virgl_renderer_resource_create_blob(&virgl_args);
724     if (ret) {
725         qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n",
726                       __func__, strerror(-ret));
727         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
728         virtio_gpu_cleanup_mapping(g, &res->base);
729         return;
730     }
731 
732     ret = virgl_renderer_resource_get_info(cblob.resource_id, &info);
733     if (ret) {
734         qemu_log_mask(LOG_GUEST_ERROR,
735                       "%s: resource does not have info %d: %s\n",
736                       __func__, cblob.resource_id, strerror(-ret));
737         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
738         virtio_gpu_cleanup_mapping(g, &res->base);
739         virgl_renderer_resource_unref(cblob.resource_id);
740         return;
741     }
742 
743     res->base.dmabuf_fd = info.fd;
744 
745     QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
746     res = NULL;
747 }
748 
virgl_cmd_resource_map_blob(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)749 static void virgl_cmd_resource_map_blob(VirtIOGPU *g,
750                                         struct virtio_gpu_ctrl_command *cmd)
751 {
752     struct virtio_gpu_resource_map_blob mblob;
753     struct virtio_gpu_virgl_resource *res;
754     struct virtio_gpu_resp_map_info resp;
755     int ret;
756 
757     VIRTIO_GPU_FILL_CMD(mblob);
758     virtio_gpu_map_blob_bswap(&mblob);
759 
760     res = virtio_gpu_virgl_find_resource(g, mblob.resource_id);
761     if (!res) {
762         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
763                       __func__, mblob.resource_id);
764         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
765         return;
766     }
767 
768     ret = virtio_gpu_virgl_map_resource_blob(g, res, mblob.offset);
769     if (ret) {
770         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
771         return;
772     }
773 
774     memset(&resp, 0, sizeof(resp));
775     resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO;
776     virgl_renderer_resource_get_map_info(mblob.resource_id, &resp.map_info);
777     virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
778 }
779 
virgl_cmd_resource_unmap_blob(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd,bool * cmd_suspended)780 static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g,
781                                           struct virtio_gpu_ctrl_command *cmd,
782                                           bool *cmd_suspended)
783 {
784     struct virtio_gpu_resource_unmap_blob ublob;
785     struct virtio_gpu_virgl_resource *res;
786     int ret;
787 
788     VIRTIO_GPU_FILL_CMD(ublob);
789     virtio_gpu_unmap_blob_bswap(&ublob);
790 
791     res = virtio_gpu_virgl_find_resource(g, ublob.resource_id);
792     if (!res) {
793         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
794                       __func__, ublob.resource_id);
795         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
796         return;
797     }
798 
799     ret = virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended);
800     if (ret) {
801         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
802         return;
803     }
804 }
805 
virgl_cmd_set_scanout_blob(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)806 static void virgl_cmd_set_scanout_blob(VirtIOGPU *g,
807                                        struct virtio_gpu_ctrl_command *cmd)
808 {
809     struct virtio_gpu_framebuffer fb = { 0 };
810     struct virtio_gpu_virgl_resource *res;
811     struct virtio_gpu_set_scanout_blob ss;
812 
813     VIRTIO_GPU_FILL_CMD(ss);
814     virtio_gpu_scanout_blob_bswap(&ss);
815     trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id,
816                                           ss.r.width, ss.r.height, ss.r.x,
817                                           ss.r.y);
818 
819     if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
820         qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
821                       __func__, ss.scanout_id);
822         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
823         return;
824     }
825 
826     if (ss.resource_id == 0) {
827         virtio_gpu_disable_scanout(g, ss.scanout_id);
828         return;
829     }
830 
831     if (ss.width < 16 ||
832         ss.height < 16 ||
833         ss.r.x + ss.r.width > ss.width ||
834         ss.r.y + ss.r.height > ss.height) {
835         qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
836                       " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
837                       __func__, ss.scanout_id, ss.resource_id,
838                       ss.r.x, ss.r.y, ss.r.width, ss.r.height,
839                       ss.width, ss.height);
840         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
841         return;
842     }
843 
844     res = virtio_gpu_virgl_find_resource(g, ss.resource_id);
845     if (!res) {
846         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
847                       __func__, ss.resource_id);
848         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
849         return;
850     }
851     if (res->base.dmabuf_fd < 0) {
852         qemu_log_mask(LOG_GUEST_ERROR, "%s: resource not backed by dmabuf %d\n",
853                       __func__, ss.resource_id);
854         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
855         return;
856     }
857 
858     if (!virtio_gpu_scanout_blob_to_fb(&fb, &ss, res->base.blob_size)) {
859         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
860         return;
861     }
862 
863     g->parent_obj.enable = 1;
864     if (virtio_gpu_update_dmabuf(g, ss.scanout_id, &res->base, &fb, &ss.r)) {
865         qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to update dmabuf\n",
866                       __func__);
867         cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
868         return;
869     }
870 
871     virtio_gpu_update_scanout(g, ss.scanout_id, &res->base, &fb, &ss.r);
872 }
873 #endif
874 
virtio_gpu_virgl_process_cmd(VirtIOGPU * g,struct virtio_gpu_ctrl_command * cmd)875 void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
876                                       struct virtio_gpu_ctrl_command *cmd)
877 {
878     bool cmd_suspended = false;
879 
880     VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
881 
882     virgl_renderer_force_ctx_0();
883     switch (cmd->cmd_hdr.type) {
884     case VIRTIO_GPU_CMD_CTX_CREATE:
885         virgl_cmd_context_create(g, cmd);
886         break;
887     case VIRTIO_GPU_CMD_CTX_DESTROY:
888         virgl_cmd_context_destroy(g, cmd);
889         break;
890     case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
891         virgl_cmd_create_resource_2d(g, cmd);
892         break;
893     case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D:
894         virgl_cmd_create_resource_3d(g, cmd);
895         break;
896     case VIRTIO_GPU_CMD_SUBMIT_3D:
897         virgl_cmd_submit_3d(g, cmd);
898         break;
899     case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
900         virgl_cmd_transfer_to_host_2d(g, cmd);
901         break;
902     case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D:
903         virgl_cmd_transfer_to_host_3d(g, cmd);
904         break;
905     case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D:
906         virgl_cmd_transfer_from_host_3d(g, cmd);
907         break;
908     case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
909         virgl_resource_attach_backing(g, cmd);
910         break;
911     case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
912         virgl_resource_detach_backing(g, cmd);
913         break;
914     case VIRTIO_GPU_CMD_SET_SCANOUT:
915         virgl_cmd_set_scanout(g, cmd);
916         break;
917     case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
918         virgl_cmd_resource_flush(g, cmd);
919         break;
920     case VIRTIO_GPU_CMD_RESOURCE_UNREF:
921         virgl_cmd_resource_unref(g, cmd, &cmd_suspended);
922         break;
923     case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
924         /* TODO add security */
925         virgl_cmd_ctx_attach_resource(g, cmd);
926         break;
927     case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE:
928         /* TODO add security */
929         virgl_cmd_ctx_detach_resource(g, cmd);
930         break;
931     case VIRTIO_GPU_CMD_GET_CAPSET_INFO:
932         virgl_cmd_get_capset_info(g, cmd);
933         break;
934     case VIRTIO_GPU_CMD_GET_CAPSET:
935         virgl_cmd_get_capset(g, cmd);
936         break;
937     case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
938         virtio_gpu_get_display_info(g, cmd);
939         break;
940     case VIRTIO_GPU_CMD_GET_EDID:
941         virtio_gpu_get_edid(g, cmd);
942         break;
943 #if VIRGL_VERSION_MAJOR >= 1
944     case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
945         virgl_cmd_resource_create_blob(g, cmd);
946         break;
947     case VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB:
948         virgl_cmd_resource_map_blob(g, cmd);
949         break;
950     case VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB:
951         virgl_cmd_resource_unmap_blob(g, cmd, &cmd_suspended);
952         break;
953     case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
954         virgl_cmd_set_scanout_blob(g, cmd);
955         break;
956 #endif
957     default:
958         cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
959         break;
960     }
961 
962     if (cmd_suspended || cmd->finished) {
963         return;
964     }
965     if (cmd->error) {
966         fprintf(stderr, "%s: ctrl 0x%x, error 0x%x\n", __func__,
967                 cmd->cmd_hdr.type, cmd->error);
968         virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error);
969         return;
970     }
971     if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) {
972         virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
973         return;
974     }
975 
976     trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
977 #if VIRGL_VERSION_MAJOR >= 1
978     if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX) {
979         virgl_renderer_context_create_fence(cmd->cmd_hdr.ctx_id,
980                                             VIRGL_RENDERER_FENCE_FLAG_MERGEABLE,
981                                             cmd->cmd_hdr.ring_idx,
982                                             cmd->cmd_hdr.fence_id);
983         return;
984     }
985 #endif
986     virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
987 }
988 
virgl_write_fence(void * opaque,uint32_t fence)989 static void virgl_write_fence(void *opaque, uint32_t fence)
990 {
991     VirtIOGPU *g = opaque;
992     struct virtio_gpu_ctrl_command *cmd, *tmp;
993 
994     QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
995         /*
996          * the guest can end up emitting fences out of order
997          * so we should check all fenced cmds not just the first one.
998          */
999 #if VIRGL_VERSION_MAJOR >= 1
1000         if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX) {
1001             continue;
1002         }
1003 #endif
1004         if (cmd->cmd_hdr.fence_id > fence) {
1005             continue;
1006         }
1007         trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
1008         virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
1009         QTAILQ_REMOVE(&g->fenceq, cmd, next);
1010         g_free(cmd);
1011         g->inflight--;
1012         if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1013             trace_virtio_gpu_dec_inflight_fences(g->inflight);
1014         }
1015     }
1016 }
1017 
1018 #if VIRGL_VERSION_MAJOR >= 1
virgl_write_context_fence(void * opaque,uint32_t ctx_id,uint32_t ring_idx,uint64_t fence_id)1019 static void virgl_write_context_fence(void *opaque, uint32_t ctx_id,
1020                                       uint32_t ring_idx, uint64_t fence_id) {
1021     VirtIOGPU *g = opaque;
1022     struct virtio_gpu_ctrl_command *cmd, *tmp;
1023 
1024     QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
1025         if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX &&
1026             cmd->cmd_hdr.ctx_id == ctx_id && cmd->cmd_hdr.ring_idx == ring_idx &&
1027             cmd->cmd_hdr.fence_id <= fence_id) {
1028             trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
1029             virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
1030             QTAILQ_REMOVE(&g->fenceq, cmd, next);
1031             g_free(cmd);
1032             g->inflight--;
1033             if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1034                 trace_virtio_gpu_dec_inflight_fences(g->inflight);
1035             }
1036         }
1037     }
1038 }
1039 #endif
1040 
1041 static virgl_renderer_gl_context
virgl_create_context(void * opaque,int scanout_idx,struct virgl_renderer_gl_ctx_param * params)1042 virgl_create_context(void *opaque, int scanout_idx,
1043                      struct virgl_renderer_gl_ctx_param *params)
1044 {
1045     VirtIOGPU *g = opaque;
1046     QEMUGLContext ctx;
1047     QEMUGLParams qparams;
1048 
1049     qparams.major_ver = params->major_ver;
1050     qparams.minor_ver = params->minor_ver;
1051 
1052     ctx = dpy_gl_ctx_create(g->parent_obj.scanout[scanout_idx].con, &qparams);
1053     return (virgl_renderer_gl_context)ctx;
1054 }
1055 
virgl_destroy_context(void * opaque,virgl_renderer_gl_context ctx)1056 static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx)
1057 {
1058     VirtIOGPU *g = opaque;
1059     QEMUGLContext qctx = (QEMUGLContext)ctx;
1060 
1061     dpy_gl_ctx_destroy(g->parent_obj.scanout[0].con, qctx);
1062 }
1063 
virgl_make_context_current(void * opaque,int scanout_idx,virgl_renderer_gl_context ctx)1064 static int virgl_make_context_current(void *opaque, int scanout_idx,
1065                                       virgl_renderer_gl_context ctx)
1066 {
1067     VirtIOGPU *g = opaque;
1068     QEMUGLContext qctx = (QEMUGLContext)ctx;
1069 
1070     return dpy_gl_ctx_make_current(g->parent_obj.scanout[scanout_idx].con,
1071                                    qctx);
1072 }
1073 
1074 static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = {
1075 #if VIRGL_VERSION_MAJOR >= 1
1076     .version             = 3,
1077 #else
1078     .version             = 1,
1079 #endif
1080     .write_fence         = virgl_write_fence,
1081     .create_gl_context   = virgl_create_context,
1082     .destroy_gl_context  = virgl_destroy_context,
1083     .make_current        = virgl_make_context_current,
1084 #if VIRGL_VERSION_MAJOR >= 1
1085     .write_context_fence = virgl_write_context_fence,
1086 #endif
1087 };
1088 
virtio_gpu_print_stats(void * opaque)1089 static void virtio_gpu_print_stats(void *opaque)
1090 {
1091     VirtIOGPU *g = opaque;
1092     VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
1093 
1094     if (g->stats.requests) {
1095         fprintf(stderr, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n",
1096                 g->stats.requests,
1097                 g->stats.max_inflight,
1098                 g->stats.req_3d,
1099                 g->stats.bytes_3d);
1100         g->stats.requests     = 0;
1101         g->stats.max_inflight = 0;
1102         g->stats.req_3d       = 0;
1103         g->stats.bytes_3d     = 0;
1104     } else {
1105         fprintf(stderr, "stats: idle\r");
1106     }
1107     timer_mod(gl->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
1108 }
1109 
virtio_gpu_fence_poll(void * opaque)1110 static void virtio_gpu_fence_poll(void *opaque)
1111 {
1112     VirtIOGPU *g = opaque;
1113     VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
1114 
1115     virgl_renderer_poll();
1116     virtio_gpu_process_cmdq(g);
1117     if (!QTAILQ_EMPTY(&g->cmdq) || !QTAILQ_EMPTY(&g->fenceq)) {
1118         timer_mod(gl->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10);
1119     }
1120 }
1121 
virtio_gpu_virgl_fence_poll(VirtIOGPU * g)1122 void virtio_gpu_virgl_fence_poll(VirtIOGPU *g)
1123 {
1124     virtio_gpu_fence_poll(g);
1125 }
1126 
virtio_gpu_virgl_reset_scanout(VirtIOGPU * g)1127 void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g)
1128 {
1129     int i;
1130 
1131     for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
1132         dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
1133         dpy_gl_scanout_disable(g->parent_obj.scanout[i].con);
1134     }
1135 }
1136 
virtio_gpu_virgl_reset(VirtIOGPU * g)1137 void virtio_gpu_virgl_reset(VirtIOGPU *g)
1138 {
1139     virgl_renderer_reset();
1140 }
1141 
virtio_gpu_virgl_init(VirtIOGPU * g)1142 int virtio_gpu_virgl_init(VirtIOGPU *g)
1143 {
1144     int ret;
1145     uint32_t flags = 0;
1146     VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
1147 
1148 #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
1149     if (qemu_egl_display) {
1150         virtio_gpu_3d_cbs.version = 4;
1151         virtio_gpu_3d_cbs.get_egl_display = virgl_get_egl_display;
1152     }
1153 #endif
1154 #ifdef VIRGL_RENDERER_D3D11_SHARE_TEXTURE
1155     if (qemu_egl_angle_d3d) {
1156         flags |= VIRGL_RENDERER_D3D11_SHARE_TEXTURE;
1157     }
1158 #endif
1159 #if VIRGL_VERSION_MAJOR >= 1
1160     if (virtio_gpu_venus_enabled(g->parent_obj.conf)) {
1161         flags |= VIRGL_RENDERER_VENUS | VIRGL_RENDERER_RENDER_SERVER;
1162     }
1163 #endif
1164 
1165     ret = virgl_renderer_init(g, flags, &virtio_gpu_3d_cbs);
1166     if (ret != 0) {
1167         error_report("virgl could not be initialized: %d", ret);
1168         return ret;
1169     }
1170 
1171     gl->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL,
1172                                   virtio_gpu_fence_poll, g);
1173 
1174     if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1175         gl->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL,
1176                                        virtio_gpu_print_stats, g);
1177         timer_mod(gl->print_stats,
1178                   qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
1179     }
1180 
1181 #if VIRGL_VERSION_MAJOR >= 1
1182     gl->cmdq_resume_bh = aio_bh_new(qemu_get_aio_context(),
1183                                     virtio_gpu_virgl_resume_cmdq_bh,
1184                                     g);
1185 #endif
1186 
1187     return 0;
1188 }
1189 
virtio_gpu_virgl_add_capset(GArray * capset_ids,uint32_t capset_id)1190 static void virtio_gpu_virgl_add_capset(GArray *capset_ids, uint32_t capset_id)
1191 {
1192     g_array_append_val(capset_ids, capset_id);
1193 }
1194 
virtio_gpu_virgl_get_capsets(VirtIOGPU * g)1195 GArray *virtio_gpu_virgl_get_capsets(VirtIOGPU *g)
1196 {
1197     uint32_t capset_max_ver, capset_max_size;
1198     GArray *capset_ids;
1199 
1200     capset_ids = g_array_new(false, false, sizeof(uint32_t));
1201 
1202     /* VIRGL is always supported. */
1203     virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VIRGL);
1204 
1205     virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2,
1206                                &capset_max_ver,
1207                                &capset_max_size);
1208     if (capset_max_ver) {
1209         virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VIRGL2);
1210     }
1211 
1212     if (virtio_gpu_venus_enabled(g->parent_obj.conf)) {
1213         virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VENUS,
1214                                    &capset_max_ver,
1215                                    &capset_max_size);
1216         if (capset_max_size) {
1217             virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VENUS);
1218         }
1219     }
1220 
1221     return capset_ids;
1222 }
1223