1 /*
2 * Virtio GPU Device
3 *
4 * Copyright Red Hat, Inc. 2013-2014
5 *
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 */
13
14 #include "qemu/osdep.h"
15
16 #include "hw/virtio/virtio-gpu.h"
17 #include "migration/blocker.h"
18 #include "qapi/error.h"
19 #include "qemu/error-report.h"
20 #include "hw/display/edid.h"
21 #include "trace.h"
22 #include "qapi/qapi-types-virtio.h"
23
24 void
virtio_gpu_base_reset(VirtIOGPUBase * g)25 virtio_gpu_base_reset(VirtIOGPUBase *g)
26 {
27 int i;
28
29 g->enable = 0;
30
31 for (i = 0; i < g->conf.max_outputs; i++) {
32 g->scanout[i].resource_id = 0;
33 g->scanout[i].width = 0;
34 g->scanout[i].height = 0;
35 g->scanout[i].x = 0;
36 g->scanout[i].y = 0;
37 g->scanout[i].ds = NULL;
38 }
39 }
40
41 void
virtio_gpu_base_fill_display_info(VirtIOGPUBase * g,struct virtio_gpu_resp_display_info * dpy_info)42 virtio_gpu_base_fill_display_info(VirtIOGPUBase *g,
43 struct virtio_gpu_resp_display_info *dpy_info)
44 {
45 int i;
46
47 for (i = 0; i < g->conf.max_outputs; i++) {
48 if (g->enabled_output_bitmask & (1 << i)) {
49 dpy_info->pmodes[i].enabled = 1;
50 dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width);
51 dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height);
52 }
53 }
54 }
55
56 void
virtio_gpu_base_generate_edid(VirtIOGPUBase * g,int scanout,struct virtio_gpu_resp_edid * edid)57 virtio_gpu_base_generate_edid(VirtIOGPUBase *g, int scanout,
58 struct virtio_gpu_resp_edid *edid)
59 {
60 size_t output_idx;
61 VirtIOGPUOutputList *node;
62 qemu_edid_info info = {
63 .width_mm = g->req_state[scanout].width_mm,
64 .height_mm = g->req_state[scanout].height_mm,
65 .prefx = g->req_state[scanout].width,
66 .prefy = g->req_state[scanout].height,
67 .refresh_rate = g->req_state[scanout].refresh_rate,
68 };
69
70 for (output_idx = 0, node = g->conf.outputs;
71 output_idx <= scanout && node; output_idx++, node = node->next) {
72 if (output_idx == scanout && node->value && node->value->name) {
73 info.name = node->value->name;
74 break;
75 }
76 }
77
78 edid->size = cpu_to_le32(sizeof(edid->edid));
79 qemu_edid_generate(edid->edid, sizeof(edid->edid), &info);
80 }
81
virtio_gpu_invalidate_display(void * opaque)82 static void virtio_gpu_invalidate_display(void *opaque)
83 {
84 }
85
virtio_gpu_update_display(void * opaque)86 static void virtio_gpu_update_display(void *opaque)
87 {
88 }
89
virtio_gpu_text_update(void * opaque,console_ch_t * chardata)90 static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
91 {
92 }
93
virtio_gpu_notify_event(VirtIOGPUBase * g,uint32_t event_type)94 static void virtio_gpu_notify_event(VirtIOGPUBase *g, uint32_t event_type)
95 {
96 g->virtio_config.events_read |= event_type;
97 virtio_notify_config(&g->parent_obj);
98 }
99
virtio_gpu_ui_info(void * opaque,uint32_t idx,QemuUIInfo * info)100 static void virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
101 {
102 VirtIOGPUBase *g = opaque;
103
104 if (idx >= g->conf.max_outputs) {
105 return;
106 }
107
108 g->req_state[idx].x = info->xoff;
109 g->req_state[idx].y = info->yoff;
110 g->req_state[idx].refresh_rate = info->refresh_rate;
111 g->req_state[idx].width = info->width;
112 g->req_state[idx].height = info->height;
113 g->req_state[idx].width_mm = info->width_mm;
114 g->req_state[idx].height_mm = info->height_mm;
115
116 if (info->width && info->height) {
117 g->enabled_output_bitmask |= (1 << idx);
118 } else {
119 g->enabled_output_bitmask &= ~(1 << idx);
120 }
121
122 /* send event to guest */
123 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
124 }
125
126 static void
virtio_gpu_gl_flushed(void * opaque)127 virtio_gpu_gl_flushed(void *opaque)
128 {
129 VirtIOGPUBase *g = opaque;
130 VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_GET_CLASS(g);
131
132 if (vgc->gl_flushed) {
133 vgc->gl_flushed(g);
134 }
135 }
136
137 static void
virtio_gpu_gl_block(void * opaque,bool block)138 virtio_gpu_gl_block(void *opaque, bool block)
139 {
140 VirtIOGPUBase *g = opaque;
141
142 if (block) {
143 g->renderer_blocked++;
144 } else {
145 g->renderer_blocked--;
146 }
147 assert(g->renderer_blocked >= 0);
148
149 if (!block && g->renderer_blocked == 0) {
150 virtio_gpu_gl_flushed(g);
151 }
152 }
153
154 static int
virtio_gpu_get_flags(void * opaque)155 virtio_gpu_get_flags(void *opaque)
156 {
157 VirtIOGPUBase *g = opaque;
158 int flags = GRAPHIC_FLAGS_NONE;
159
160 if (virtio_gpu_virgl_enabled(g->conf)) {
161 flags |= GRAPHIC_FLAGS_GL;
162 }
163
164 if (virtio_gpu_dmabuf_enabled(g->conf)) {
165 flags |= GRAPHIC_FLAGS_DMABUF;
166 }
167
168 return flags;
169 }
170
171 static const GraphicHwOps virtio_gpu_ops = {
172 .get_flags = virtio_gpu_get_flags,
173 .invalidate = virtio_gpu_invalidate_display,
174 .gfx_update = virtio_gpu_update_display,
175 .text_update = virtio_gpu_text_update,
176 .ui_info = virtio_gpu_ui_info,
177 .gl_block = virtio_gpu_gl_block,
178 };
179
180 bool
virtio_gpu_base_device_realize(DeviceState * qdev,VirtIOHandleOutput ctrl_cb,VirtIOHandleOutput cursor_cb,Error ** errp)181 virtio_gpu_base_device_realize(DeviceState *qdev,
182 VirtIOHandleOutput ctrl_cb,
183 VirtIOHandleOutput cursor_cb,
184 Error **errp)
185 {
186 size_t output_idx;
187 VirtIOGPUOutputList *node;
188 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
189 VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev);
190 int i;
191
192 if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
193 error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS);
194 return false;
195 }
196
197 for (output_idx = 0, node = g->conf.outputs;
198 node; output_idx++, node = node->next) {
199 if (output_idx == g->conf.max_outputs) {
200 error_setg(errp, "invalid outputs > %d", g->conf.max_outputs);
201 return false;
202 }
203 if (node->value && node->value->name &&
204 strlen(node->value->name) > EDID_NAME_MAX_LENGTH) {
205 error_setg(errp, "invalid output name '%s' > %d",
206 node->value->name, EDID_NAME_MAX_LENGTH);
207 return false;
208 }
209 }
210
211 if (virtio_gpu_virgl_enabled(g->conf)) {
212 error_setg(&g->migration_blocker, "virgl is not yet migratable");
213 if (migrate_add_blocker(&g->migration_blocker, errp) < 0) {
214 return false;
215 }
216 }
217
218 g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs);
219 virtio_init(VIRTIO_DEVICE(g), VIRTIO_ID_GPU,
220 sizeof(struct virtio_gpu_config));
221
222 if (virtio_gpu_virgl_enabled(g->conf)) {
223 /* use larger control queue in 3d mode */
224 virtio_add_queue(vdev, 256, ctrl_cb);
225 virtio_add_queue(vdev, 16, cursor_cb);
226 } else {
227 virtio_add_queue(vdev, 64, ctrl_cb);
228 virtio_add_queue(vdev, 16, cursor_cb);
229 }
230
231 g->enabled_output_bitmask = 1;
232
233 g->req_state[0].width = g->conf.xres;
234 g->req_state[0].height = g->conf.yres;
235
236 g->hw_ops = &virtio_gpu_ops;
237 for (i = 0; i < g->conf.max_outputs; i++) {
238 g->scanout[i].con =
239 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
240 }
241
242 return true;
243 }
244
245 static uint64_t
virtio_gpu_base_get_features(VirtIODevice * vdev,uint64_t features,Error ** errp)246 virtio_gpu_base_get_features(VirtIODevice *vdev, uint64_t features,
247 Error **errp)
248 {
249 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
250
251 if (virtio_gpu_virgl_enabled(g->conf) ||
252 virtio_gpu_rutabaga_enabled(g->conf)) {
253 features |= (1 << VIRTIO_GPU_F_VIRGL);
254 }
255 if (virtio_gpu_edid_enabled(g->conf)) {
256 features |= (1 << VIRTIO_GPU_F_EDID);
257 }
258 if (virtio_gpu_blob_enabled(g->conf)) {
259 features |= (1 << VIRTIO_GPU_F_RESOURCE_BLOB);
260 }
261 if (virtio_gpu_context_init_enabled(g->conf)) {
262 features |= (1 << VIRTIO_GPU_F_CONTEXT_INIT);
263 }
264 if (virtio_gpu_resource_uuid_enabled(g->conf)) {
265 features |= (1 << VIRTIO_GPU_F_RESOURCE_UUID);
266 }
267
268 return features;
269 }
270
271 static void
virtio_gpu_base_set_features(VirtIODevice * vdev,uint64_t features)272 virtio_gpu_base_set_features(VirtIODevice *vdev, uint64_t features)
273 {
274 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL);
275
276 trace_virtio_gpu_features(((features & virgl) == virgl));
277 }
278
279 void
virtio_gpu_base_device_unrealize(DeviceState * qdev)280 virtio_gpu_base_device_unrealize(DeviceState *qdev)
281 {
282 VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev);
283 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
284
285 virtio_del_queue(vdev, 0);
286 virtio_del_queue(vdev, 1);
287 virtio_cleanup(vdev);
288 migrate_del_blocker(&g->migration_blocker);
289 }
290
291 static void
virtio_gpu_base_class_init(ObjectClass * klass,const void * data)292 virtio_gpu_base_class_init(ObjectClass *klass, const void *data)
293 {
294 DeviceClass *dc = DEVICE_CLASS(klass);
295 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
296
297 vdc->unrealize = virtio_gpu_base_device_unrealize;
298 vdc->get_features = virtio_gpu_base_get_features;
299 vdc->set_features = virtio_gpu_base_set_features;
300
301 set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
302 dc->hotpluggable = false;
303 }
304
305 static const TypeInfo virtio_gpu_base_info = {
306 .name = TYPE_VIRTIO_GPU_BASE,
307 .parent = TYPE_VIRTIO_DEVICE,
308 .instance_size = sizeof(VirtIOGPUBase),
309 .class_size = sizeof(VirtIOGPUBaseClass),
310 .class_init = virtio_gpu_base_class_init,
311 .abstract = true
312 };
313 module_obj(TYPE_VIRTIO_GPU_BASE);
314 module_kconfig(VIRTIO_GPU);
315
316 static void
virtio_register_types(void)317 virtio_register_types(void)
318 {
319 type_register_static(&virtio_gpu_base_info);
320 }
321
322 type_init(virtio_register_types)
323
324 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24);
325 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56);
326 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32);
327 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40);
328 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48);
329 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48);
330 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56);
331 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16);
332 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
333 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
334 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408);
335
336 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72);
337 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72);
338 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96);
339 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24);
340 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32);
341 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32);
342 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32);
343 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40);
344 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32);
345 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24);
346