1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 16 #include "hw/virtio/virtio-gpu.h" 17 #include "migration/blocker.h" 18 #include "qapi/error.h" 19 #include "qemu/error-report.h" 20 #include "trace.h" 21 22 void 23 virtio_gpu_base_reset(VirtIOGPUBase *g) 24 { 25 int i; 26 27 g->enable = 0; 28 g->use_virgl_renderer = false; 29 30 for (i = 0; i < g->conf.max_outputs; i++) { 31 g->scanout[i].resource_id = 0; 32 g->scanout[i].width = 0; 33 g->scanout[i].height = 0; 34 g->scanout[i].x = 0; 35 g->scanout[i].y = 0; 36 g->scanout[i].ds = NULL; 37 } 38 } 39 40 void 41 virtio_gpu_base_fill_display_info(VirtIOGPUBase *g, 42 struct virtio_gpu_resp_display_info *dpy_info) 43 { 44 int i; 45 46 for (i = 0; i < g->conf.max_outputs; i++) { 47 if (g->enabled_output_bitmask & (1 << i)) { 48 dpy_info->pmodes[i].enabled = 1; 49 dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width); 50 dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height); 51 } 52 } 53 } 54 55 static void virtio_gpu_invalidate_display(void *opaque) 56 { 57 } 58 59 static void virtio_gpu_update_display(void *opaque) 60 { 61 } 62 63 static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) 64 { 65 } 66 67 static void virtio_gpu_notify_event(VirtIOGPUBase *g, uint32_t event_type) 68 { 69 g->virtio_config.events_read |= event_type; 70 virtio_notify_config(&g->parent_obj); 71 } 72 73 static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) 74 { 75 VirtIOGPUBase *g = opaque; 76 77 if (idx >= g->conf.max_outputs) { 78 return -1; 79 } 80 81 g->req_state[idx].x = info->xoff; 82 g->req_state[idx].y = info->yoff; 83 g->req_state[idx].width = info->width; 84 g->req_state[idx].height = info->height; 85 g->req_state[idx].width_mm = info->width_mm; 86 g->req_state[idx].height_mm = info->height_mm; 87 88 if (info->width && info->height) { 89 g->enabled_output_bitmask |= (1 << idx); 90 } else { 91 g->enabled_output_bitmask &= ~(1 << idx); 92 } 93 94 /* send event to guest */ 95 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); 96 return 0; 97 } 98 99 static void 100 virtio_gpu_gl_block(void *opaque, bool block) 101 { 102 VirtIOGPUBase *g = opaque; 103 VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_GET_CLASS(g); 104 105 if (block) { 106 g->renderer_blocked++; 107 } else { 108 g->renderer_blocked--; 109 } 110 assert(g->renderer_blocked >= 0); 111 112 if (g->renderer_blocked == 0) { 113 vgc->gl_unblock(g); 114 } 115 } 116 117 static const GraphicHwOps virtio_gpu_ops = { 118 .invalidate = virtio_gpu_invalidate_display, 119 .gfx_update = virtio_gpu_update_display, 120 .text_update = virtio_gpu_text_update, 121 .ui_info = virtio_gpu_ui_info, 122 .gl_block = virtio_gpu_gl_block, 123 }; 124 125 bool 126 virtio_gpu_base_device_realize(DeviceState *qdev, 127 VirtIOHandleOutput ctrl_cb, 128 VirtIOHandleOutput cursor_cb, 129 Error **errp) 130 { 131 VirtIODevice *vdev = VIRTIO_DEVICE(qdev); 132 VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev); 133 int i; 134 135 if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) { 136 error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS); 137 return false; 138 } 139 140 g->use_virgl_renderer = false; 141 if (virtio_gpu_virgl_enabled(g->conf)) { 142 error_setg(&g->migration_blocker, "virgl is not yet migratable"); 143 if (migrate_add_blocker(g->migration_blocker, errp) < 0) { 144 error_free(g->migration_blocker); 145 return false; 146 } 147 } 148 149 g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs); 150 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, 151 sizeof(struct virtio_gpu_config)); 152 153 if (virtio_gpu_virgl_enabled(g->conf)) { 154 /* use larger control queue in 3d mode */ 155 virtio_add_queue(vdev, 256, ctrl_cb); 156 virtio_add_queue(vdev, 16, cursor_cb); 157 } else { 158 virtio_add_queue(vdev, 64, ctrl_cb); 159 virtio_add_queue(vdev, 16, cursor_cb); 160 } 161 162 g->enabled_output_bitmask = 1; 163 164 g->req_state[0].width = g->conf.xres; 165 g->req_state[0].height = g->conf.yres; 166 167 g->hw_ops = &virtio_gpu_ops; 168 for (i = 0; i < g->conf.max_outputs; i++) { 169 g->scanout[i].con = 170 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); 171 if (i > 0) { 172 dpy_gfx_replace_surface(g->scanout[i].con, NULL); 173 } 174 } 175 176 return true; 177 } 178 179 static uint64_t 180 virtio_gpu_base_get_features(VirtIODevice *vdev, uint64_t features, 181 Error **errp) 182 { 183 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 184 185 if (virtio_gpu_virgl_enabled(g->conf)) { 186 features |= (1 << VIRTIO_GPU_F_VIRGL); 187 } 188 if (virtio_gpu_edid_enabled(g->conf)) { 189 features |= (1 << VIRTIO_GPU_F_EDID); 190 } 191 192 return features; 193 } 194 195 static void 196 virtio_gpu_base_set_features(VirtIODevice *vdev, uint64_t features) 197 { 198 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL); 199 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); 200 201 g->use_virgl_renderer = ((features & virgl) == virgl); 202 trace_virtio_gpu_features(g->use_virgl_renderer); 203 } 204 205 static void 206 virtio_gpu_base_device_unrealize(DeviceState *qdev) 207 { 208 VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev); 209 210 if (g->migration_blocker) { 211 migrate_del_blocker(g->migration_blocker); 212 error_free(g->migration_blocker); 213 } 214 } 215 216 static void 217 virtio_gpu_base_class_init(ObjectClass *klass, void *data) 218 { 219 DeviceClass *dc = DEVICE_CLASS(klass); 220 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 221 222 vdc->unrealize = virtio_gpu_base_device_unrealize; 223 vdc->get_features = virtio_gpu_base_get_features; 224 vdc->set_features = virtio_gpu_base_set_features; 225 226 set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); 227 dc->hotpluggable = false; 228 } 229 230 static const TypeInfo virtio_gpu_base_info = { 231 .name = TYPE_VIRTIO_GPU_BASE, 232 .parent = TYPE_VIRTIO_DEVICE, 233 .instance_size = sizeof(VirtIOGPUBase), 234 .class_size = sizeof(VirtIOGPUBaseClass), 235 .class_init = virtio_gpu_base_class_init, 236 .abstract = true 237 }; 238 239 static void 240 virtio_register_types(void) 241 { 242 type_register_static(&virtio_gpu_base_info); 243 } 244 245 type_init(virtio_register_types) 246 247 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); 248 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); 249 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); 250 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); 251 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); 252 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); 253 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); 254 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); 255 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); 256 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); 257 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); 258 259 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72); 260 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72); 261 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96); 262 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24); 263 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32); 264 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32); 265 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32); 266 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40); 267 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32); 268 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24); 269