1 /*
2 * Virtio GPU Device
3 *
4 * Copyright Red Hat, Inc. 2013-2014
5 *
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 */
13
14 #include "qemu/osdep.h"
15
16 #include "hw/virtio/virtio-gpu.h"
17 #include "migration/blocker.h"
18 #include "qapi/error.h"
19 #include "qemu/error-report.h"
20 #include "hw/display/edid.h"
21 #include "trace.h"
22
23 void
virtio_gpu_base_reset(VirtIOGPUBase * g)24 virtio_gpu_base_reset(VirtIOGPUBase *g)
25 {
26 int i;
27
28 g->enable = 0;
29
30 for (i = 0; i < g->conf.max_outputs; i++) {
31 g->scanout[i].resource_id = 0;
32 g->scanout[i].width = 0;
33 g->scanout[i].height = 0;
34 g->scanout[i].x = 0;
35 g->scanout[i].y = 0;
36 g->scanout[i].ds = NULL;
37 }
38 }
39
40 void
virtio_gpu_base_fill_display_info(VirtIOGPUBase * g,struct virtio_gpu_resp_display_info * dpy_info)41 virtio_gpu_base_fill_display_info(VirtIOGPUBase *g,
42 struct virtio_gpu_resp_display_info *dpy_info)
43 {
44 int i;
45
46 for (i = 0; i < g->conf.max_outputs; i++) {
47 if (g->enabled_output_bitmask & (1 << i)) {
48 dpy_info->pmodes[i].enabled = 1;
49 dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width);
50 dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height);
51 }
52 }
53 }
54
55 void
virtio_gpu_base_generate_edid(VirtIOGPUBase * g,int scanout,struct virtio_gpu_resp_edid * edid)56 virtio_gpu_base_generate_edid(VirtIOGPUBase *g, int scanout,
57 struct virtio_gpu_resp_edid *edid)
58 {
59 qemu_edid_info info = {
60 .width_mm = g->req_state[scanout].width_mm,
61 .height_mm = g->req_state[scanout].height_mm,
62 .prefx = g->req_state[scanout].width,
63 .prefy = g->req_state[scanout].height,
64 .refresh_rate = g->req_state[scanout].refresh_rate,
65 };
66
67 edid->size = cpu_to_le32(sizeof(edid->edid));
68 qemu_edid_generate(edid->edid, sizeof(edid->edid), &info);
69 }
70
virtio_gpu_invalidate_display(void * opaque)71 static void virtio_gpu_invalidate_display(void *opaque)
72 {
73 }
74
virtio_gpu_update_display(void * opaque)75 static void virtio_gpu_update_display(void *opaque)
76 {
77 }
78
virtio_gpu_text_update(void * opaque,console_ch_t * chardata)79 static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
80 {
81 }
82
virtio_gpu_notify_event(VirtIOGPUBase * g,uint32_t event_type)83 static void virtio_gpu_notify_event(VirtIOGPUBase *g, uint32_t event_type)
84 {
85 g->virtio_config.events_read |= event_type;
86 virtio_notify_config(&g->parent_obj);
87 }
88
virtio_gpu_ui_info(void * opaque,uint32_t idx,QemuUIInfo * info)89 static void virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
90 {
91 VirtIOGPUBase *g = opaque;
92
93 if (idx >= g->conf.max_outputs) {
94 return;
95 }
96
97 g->req_state[idx].x = info->xoff;
98 g->req_state[idx].y = info->yoff;
99 g->req_state[idx].refresh_rate = info->refresh_rate;
100 g->req_state[idx].width = info->width;
101 g->req_state[idx].height = info->height;
102 g->req_state[idx].width_mm = info->width_mm;
103 g->req_state[idx].height_mm = info->height_mm;
104
105 if (info->width && info->height) {
106 g->enabled_output_bitmask |= (1 << idx);
107 } else {
108 g->enabled_output_bitmask &= ~(1 << idx);
109 }
110
111 /* send event to guest */
112 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
113 return;
114 }
115
116 static void
virtio_gpu_gl_flushed(void * opaque)117 virtio_gpu_gl_flushed(void *opaque)
118 {
119 VirtIOGPUBase *g = opaque;
120 VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_GET_CLASS(g);
121
122 if (vgc->gl_flushed) {
123 vgc->gl_flushed(g);
124 }
125 }
126
127 static void
virtio_gpu_gl_block(void * opaque,bool block)128 virtio_gpu_gl_block(void *opaque, bool block)
129 {
130 VirtIOGPUBase *g = opaque;
131
132 if (block) {
133 g->renderer_blocked++;
134 } else {
135 g->renderer_blocked--;
136 }
137 assert(g->renderer_blocked >= 0);
138
139 if (!block && g->renderer_blocked == 0) {
140 virtio_gpu_gl_flushed(g);
141 }
142 }
143
144 static int
virtio_gpu_get_flags(void * opaque)145 virtio_gpu_get_flags(void *opaque)
146 {
147 VirtIOGPUBase *g = opaque;
148 int flags = GRAPHIC_FLAGS_NONE;
149
150 if (virtio_gpu_virgl_enabled(g->conf)) {
151 flags |= GRAPHIC_FLAGS_GL;
152 }
153
154 if (virtio_gpu_dmabuf_enabled(g->conf)) {
155 flags |= GRAPHIC_FLAGS_DMABUF;
156 }
157
158 return flags;
159 }
160
161 static const GraphicHwOps virtio_gpu_ops = {
162 .get_flags = virtio_gpu_get_flags,
163 .invalidate = virtio_gpu_invalidate_display,
164 .gfx_update = virtio_gpu_update_display,
165 .text_update = virtio_gpu_text_update,
166 .ui_info = virtio_gpu_ui_info,
167 .gl_block = virtio_gpu_gl_block,
168 };
169
170 bool
virtio_gpu_base_device_realize(DeviceState * qdev,VirtIOHandleOutput ctrl_cb,VirtIOHandleOutput cursor_cb,Error ** errp)171 virtio_gpu_base_device_realize(DeviceState *qdev,
172 VirtIOHandleOutput ctrl_cb,
173 VirtIOHandleOutput cursor_cb,
174 Error **errp)
175 {
176 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
177 VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev);
178 int i;
179
180 if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
181 error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS);
182 return false;
183 }
184
185 if (virtio_gpu_virgl_enabled(g->conf)) {
186 error_setg(&g->migration_blocker, "virgl is not yet migratable");
187 if (migrate_add_blocker(&g->migration_blocker, errp) < 0) {
188 return false;
189 }
190 }
191
192 g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs);
193 virtio_init(VIRTIO_DEVICE(g), VIRTIO_ID_GPU,
194 sizeof(struct virtio_gpu_config));
195
196 if (virtio_gpu_virgl_enabled(g->conf)) {
197 /* use larger control queue in 3d mode */
198 virtio_add_queue(vdev, 256, ctrl_cb);
199 virtio_add_queue(vdev, 16, cursor_cb);
200 } else {
201 virtio_add_queue(vdev, 64, ctrl_cb);
202 virtio_add_queue(vdev, 16, cursor_cb);
203 }
204
205 g->enabled_output_bitmask = 1;
206
207 g->req_state[0].width = g->conf.xres;
208 g->req_state[0].height = g->conf.yres;
209
210 g->hw_ops = &virtio_gpu_ops;
211 for (i = 0; i < g->conf.max_outputs; i++) {
212 g->scanout[i].con =
213 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
214 }
215
216 return true;
217 }
218
219 static uint64_t
virtio_gpu_base_get_features(VirtIODevice * vdev,uint64_t features,Error ** errp)220 virtio_gpu_base_get_features(VirtIODevice *vdev, uint64_t features,
221 Error **errp)
222 {
223 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
224
225 if (virtio_gpu_virgl_enabled(g->conf) ||
226 virtio_gpu_rutabaga_enabled(g->conf)) {
227 features |= (1 << VIRTIO_GPU_F_VIRGL);
228 }
229 if (virtio_gpu_edid_enabled(g->conf)) {
230 features |= (1 << VIRTIO_GPU_F_EDID);
231 }
232 if (virtio_gpu_blob_enabled(g->conf)) {
233 features |= (1 << VIRTIO_GPU_F_RESOURCE_BLOB);
234 }
235 if (virtio_gpu_context_init_enabled(g->conf)) {
236 features |= (1 << VIRTIO_GPU_F_CONTEXT_INIT);
237 }
238
239 return features;
240 }
241
242 static void
virtio_gpu_base_set_features(VirtIODevice * vdev,uint64_t features)243 virtio_gpu_base_set_features(VirtIODevice *vdev, uint64_t features)
244 {
245 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL);
246
247 trace_virtio_gpu_features(((features & virgl) == virgl));
248 }
249
250 void
virtio_gpu_base_device_unrealize(DeviceState * qdev)251 virtio_gpu_base_device_unrealize(DeviceState *qdev)
252 {
253 VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev);
254 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
255
256 virtio_del_queue(vdev, 0);
257 virtio_del_queue(vdev, 1);
258 virtio_cleanup(vdev);
259 migrate_del_blocker(&g->migration_blocker);
260 }
261
262 static void
virtio_gpu_base_class_init(ObjectClass * klass,void * data)263 virtio_gpu_base_class_init(ObjectClass *klass, void *data)
264 {
265 DeviceClass *dc = DEVICE_CLASS(klass);
266 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
267
268 vdc->unrealize = virtio_gpu_base_device_unrealize;
269 vdc->get_features = virtio_gpu_base_get_features;
270 vdc->set_features = virtio_gpu_base_set_features;
271
272 set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
273 dc->hotpluggable = false;
274 }
275
276 static const TypeInfo virtio_gpu_base_info = {
277 .name = TYPE_VIRTIO_GPU_BASE,
278 .parent = TYPE_VIRTIO_DEVICE,
279 .instance_size = sizeof(VirtIOGPUBase),
280 .class_size = sizeof(VirtIOGPUBaseClass),
281 .class_init = virtio_gpu_base_class_init,
282 .abstract = true
283 };
284 module_obj(TYPE_VIRTIO_GPU_BASE);
285 module_kconfig(VIRTIO_GPU);
286
287 static void
virtio_register_types(void)288 virtio_register_types(void)
289 {
290 type_register_static(&virtio_gpu_base_info);
291 }
292
293 type_init(virtio_register_types)
294
295 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24);
296 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56);
297 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32);
298 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40);
299 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48);
300 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48);
301 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56);
302 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16);
303 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
304 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
305 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408);
306
307 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72);
308 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72);
309 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96);
310 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24);
311 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32);
312 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32);
313 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32);
314 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40);
315 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32);
316 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24);
317