1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #ifndef HW_VIRTIO_GPU_H 15 #define HW_VIRTIO_GPU_H 16 17 #include "qemu/queue.h" 18 #include "ui/qemu-pixman.h" 19 #include "ui/console.h" 20 #include "hw/virtio/virtio.h" 21 #include "qemu/log.h" 22 #include "system/vhost-user-backend.h" 23 #include "qapi/qapi-types-virtio.h" 24 25 #include "standard-headers/linux/virtio_gpu.h" 26 #include "standard-headers/linux/virtio_ids.h" 27 #include "qom/object.h" 28 29 #define TYPE_VIRTIO_GPU_BASE "virtio-gpu-base" 30 OBJECT_DECLARE_TYPE(VirtIOGPUBase, VirtIOGPUBaseClass, 31 VIRTIO_GPU_BASE) 32 33 #define TYPE_VIRTIO_GPU "virtio-gpu-device" 34 OBJECT_DECLARE_TYPE(VirtIOGPU, VirtIOGPUClass, VIRTIO_GPU) 35 36 #define TYPE_VIRTIO_GPU_GL "virtio-gpu-gl-device" 37 OBJECT_DECLARE_SIMPLE_TYPE(VirtIOGPUGL, VIRTIO_GPU_GL) 38 39 #define TYPE_VHOST_USER_GPU "vhost-user-gpu" 40 OBJECT_DECLARE_SIMPLE_TYPE(VhostUserGPU, VHOST_USER_GPU) 41 42 #define TYPE_VIRTIO_GPU_RUTABAGA "virtio-gpu-rutabaga-device" 43 OBJECT_DECLARE_SIMPLE_TYPE(VirtIOGPURutabaga, VIRTIO_GPU_RUTABAGA) 44 45 struct virtio_gpu_simple_resource { 46 uint32_t resource_id; 47 uint32_t width; 48 uint32_t height; 49 uint32_t format; 50 uint64_t *addrs; 51 struct iovec *iov; 52 unsigned int iov_cnt; 53 uint32_t scanout_bitmask; 54 pixman_image_t *image; 55 qemu_pixman_shareable share_handle; 56 uint64_t hostmem; 57 58 uint64_t blob_size; 59 void *blob; 60 int dmabuf_fd; 61 uint8_t *remapped; 62 63 QTAILQ_ENTRY(virtio_gpu_simple_resource) next; 64 }; 65 66 struct virtio_gpu_framebuffer { 67 pixman_format_code_t format; 68 uint32_t bytes_pp; 69 uint32_t width, height; 70 uint32_t stride; 71 uint32_t offset; 72 }; 73 74 struct virtio_gpu_scanout { 75 QemuConsole *con; 76 DisplaySurface *ds; 77 uint32_t width, height; 78 int x, y; 79 int invalidate; 80 uint32_t resource_id; 81 struct virtio_gpu_update_cursor cursor; 82 QEMUCursor *current_cursor; 83 struct virtio_gpu_framebuffer fb; 84 }; 85 86 struct virtio_gpu_requested_state { 87 uint16_t width_mm, height_mm; 88 uint32_t width, height; 89 uint32_t refresh_rate; 90 int x, y; 91 }; 92 93 enum virtio_gpu_base_conf_flags { 94 VIRTIO_GPU_FLAG_VIRGL_ENABLED = 1, 95 VIRTIO_GPU_FLAG_STATS_ENABLED, 96 VIRTIO_GPU_FLAG_EDID_ENABLED, 97 VIRTIO_GPU_FLAG_DMABUF_ENABLED, 98 VIRTIO_GPU_FLAG_BLOB_ENABLED, 99 VIRTIO_GPU_FLAG_CONTEXT_INIT_ENABLED, 100 VIRTIO_GPU_FLAG_RUTABAGA_ENABLED, 101 VIRTIO_GPU_FLAG_VENUS_ENABLED, 102 VIRTIO_GPU_FLAG_RESOURCE_UUID_ENABLED, 103 }; 104 105 #define virtio_gpu_virgl_enabled(_cfg) \ 106 (_cfg.flags & (1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED)) 107 #define virtio_gpu_stats_enabled(_cfg) \ 108 (_cfg.flags & (1 << VIRTIO_GPU_FLAG_STATS_ENABLED)) 109 #define virtio_gpu_edid_enabled(_cfg) \ 110 (_cfg.flags & (1 << VIRTIO_GPU_FLAG_EDID_ENABLED)) 111 #define virtio_gpu_dmabuf_enabled(_cfg) \ 112 (_cfg.flags & (1 << VIRTIO_GPU_FLAG_DMABUF_ENABLED)) 113 #define virtio_gpu_blob_enabled(_cfg) \ 114 (_cfg.flags & (1 << VIRTIO_GPU_FLAG_BLOB_ENABLED)) 115 #define virtio_gpu_context_init_enabled(_cfg) \ 116 (_cfg.flags & (1 << VIRTIO_GPU_FLAG_CONTEXT_INIT_ENABLED)) 117 #define virtio_gpu_rutabaga_enabled(_cfg) \ 118 (_cfg.flags & (1 << VIRTIO_GPU_FLAG_RUTABAGA_ENABLED)) 119 #define virtio_gpu_resource_uuid_enabled(_cfg) \ 120 (_cfg.flags & (1 << VIRTIO_GPU_FLAG_RESOURCE_UUID_ENABLED)) 121 #define virtio_gpu_hostmem_enabled(_cfg) \ 122 (_cfg.hostmem > 0) 123 #define virtio_gpu_venus_enabled(_cfg) \ 124 (_cfg.flags & (1 << VIRTIO_GPU_FLAG_VENUS_ENABLED)) 125 126 struct virtio_gpu_base_conf { 127 uint32_t max_outputs; 128 uint32_t flags; 129 uint32_t xres; 130 uint32_t yres; 131 uint64_t hostmem; 132 VirtIOGPUOutputList *outputs; 133 }; 134 135 struct virtio_gpu_ctrl_command { 136 VirtQueueElement elem; 137 VirtQueue *vq; 138 struct virtio_gpu_ctrl_hdr cmd_hdr; 139 uint32_t error; 140 bool finished; 141 QTAILQ_ENTRY(virtio_gpu_ctrl_command) next; 142 }; 143 144 struct VirtIOGPUBase { 145 VirtIODevice parent_obj; 146 147 Error *migration_blocker; 148 149 struct virtio_gpu_base_conf conf; 150 struct virtio_gpu_config virtio_config; 151 const GraphicHwOps *hw_ops; 152 153 int renderer_blocked; 154 int enable; 155 156 MemoryRegion hostmem; 157 158 struct virtio_gpu_scanout scanout[VIRTIO_GPU_MAX_SCANOUTS]; 159 160 int enabled_output_bitmask; 161 struct virtio_gpu_requested_state req_state[VIRTIO_GPU_MAX_SCANOUTS]; 162 }; 163 164 struct VirtIOGPUBaseClass { 165 VirtioDeviceClass parent; 166 167 void (*gl_flushed)(VirtIOGPUBase *g); 168 }; 169 170 #define VIRTIO_GPU_BASE_PROPERTIES(_state, _conf) \ 171 DEFINE_PROP_UINT32("max_outputs", _state, _conf.max_outputs, 1), \ 172 DEFINE_PROP_VIRTIO_GPU_OUTPUT_LIST("outputs", _state, _conf.outputs), \ 173 DEFINE_PROP_BIT("edid", _state, _conf.flags, \ 174 VIRTIO_GPU_FLAG_EDID_ENABLED, true), \ 175 DEFINE_PROP_UINT32("xres", _state, _conf.xres, 1280), \ 176 DEFINE_PROP_UINT32("yres", _state, _conf.yres, 800) 177 178 typedef struct VGPUDMABuf { 179 QemuDmaBuf *buf; 180 uint32_t scanout_id; 181 QTAILQ_ENTRY(VGPUDMABuf) next; 182 } VGPUDMABuf; 183 184 struct VirtIOGPU { 185 VirtIOGPUBase parent_obj; 186 187 uint8_t scanout_vmstate_version; 188 uint64_t conf_max_hostmem; 189 190 VirtQueue *ctrl_vq; 191 VirtQueue *cursor_vq; 192 193 QEMUBH *ctrl_bh; 194 QEMUBH *cursor_bh; 195 QEMUBH *reset_bh; 196 QemuCond reset_cond; 197 bool reset_finished; 198 199 QTAILQ_HEAD(, virtio_gpu_simple_resource) reslist; 200 QTAILQ_HEAD(, virtio_gpu_ctrl_command) cmdq; 201 QTAILQ_HEAD(, virtio_gpu_ctrl_command) fenceq; 202 203 uint64_t hostmem; 204 205 bool processing_cmdq; 206 207 uint32_t inflight; 208 struct { 209 uint32_t max_inflight; 210 uint32_t requests; 211 uint32_t req_3d; 212 uint32_t bytes_3d; 213 } stats; 214 215 struct { 216 QTAILQ_HEAD(, VGPUDMABuf) bufs; 217 VGPUDMABuf *primary[VIRTIO_GPU_MAX_SCANOUTS]; 218 } dmabuf; 219 220 GArray *capset_ids; 221 }; 222 223 struct VirtIOGPUClass { 224 VirtIOGPUBaseClass parent; 225 226 void (*handle_ctrl)(VirtIODevice *vdev, VirtQueue *vq); 227 void (*process_cmd)(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd); 228 void (*update_cursor_data)(VirtIOGPU *g, 229 struct virtio_gpu_scanout *s, 230 uint32_t resource_id); 231 void (*resource_destroy)(VirtIOGPU *g, 232 struct virtio_gpu_simple_resource *res, 233 Error **errp); 234 }; 235 236 /* VirtIOGPUGL renderer states */ 237 typedef enum { 238 RS_START, /* starting state */ 239 RS_INIT_FAILED, /* failed initialisation */ 240 RS_INITED, /* initialised and working */ 241 RS_RESET, /* inited and reset pending, moves to start after reset */ 242 } RenderState; 243 244 struct VirtIOGPUGL { 245 struct VirtIOGPU parent_obj; 246 247 RenderState renderer_state; 248 249 QEMUTimer *fence_poll; 250 QEMUTimer *print_stats; 251 252 QEMUBH *cmdq_resume_bh; 253 }; 254 255 struct VhostUserGPU { 256 VirtIOGPUBase parent_obj; 257 258 VhostUserBackend *vhost; 259 int vhost_gpu_fd; /* closed by the chardev */ 260 CharBackend vhost_chr; 261 QemuDmaBuf *dmabuf[VIRTIO_GPU_MAX_SCANOUTS]; 262 bool backend_blocked; 263 }; 264 265 #define MAX_SLOTS 4096 266 267 struct MemoryRegionInfo { 268 int used; 269 MemoryRegion mr; 270 uint32_t resource_id; 271 }; 272 273 struct rutabaga; 274 275 struct VirtIOGPURutabaga { 276 VirtIOGPU parent_obj; 277 struct MemoryRegionInfo memory_regions[MAX_SLOTS]; 278 uint64_t capset_mask; 279 char *wayland_socket_path; 280 char *wsi; 281 bool headless; 282 uint32_t num_capsets; 283 struct rutabaga *rutabaga; 284 }; 285 286 #define VIRTIO_GPU_FILL_CMD(out) do { \ 287 size_t virtiogpufillcmd_s_ = \ 288 iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 0, \ 289 &out, sizeof(out)); \ 290 if (virtiogpufillcmd_s_ != sizeof(out)) { \ 291 qemu_log_mask(LOG_GUEST_ERROR, \ 292 "%s: command size incorrect %zu vs %zu\n", \ 293 __func__, virtiogpufillcmd_s_, sizeof(out)); \ 294 return; \ 295 } \ 296 } while (0) 297 298 /* virtio-gpu-base.c */ 299 bool virtio_gpu_base_device_realize(DeviceState *qdev, 300 VirtIOHandleOutput ctrl_cb, 301 VirtIOHandleOutput cursor_cb, 302 Error **errp); 303 void virtio_gpu_base_device_unrealize(DeviceState *qdev); 304 void virtio_gpu_base_reset(VirtIOGPUBase *g); 305 void virtio_gpu_base_fill_display_info(VirtIOGPUBase *g, 306 struct virtio_gpu_resp_display_info *dpy_info); 307 308 void virtio_gpu_base_generate_edid(VirtIOGPUBase *g, int scanout, 309 struct virtio_gpu_resp_edid *edid); 310 /* virtio-gpu.c */ 311 struct virtio_gpu_simple_resource * 312 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); 313 314 void virtio_gpu_ctrl_response(VirtIOGPU *g, 315 struct virtio_gpu_ctrl_command *cmd, 316 struct virtio_gpu_ctrl_hdr *resp, 317 size_t resp_len); 318 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 319 struct virtio_gpu_ctrl_command *cmd, 320 enum virtio_gpu_ctrl_type type); 321 void virtio_gpu_get_display_info(VirtIOGPU *g, 322 struct virtio_gpu_ctrl_command *cmd); 323 void virtio_gpu_get_edid(VirtIOGPU *g, 324 struct virtio_gpu_ctrl_command *cmd); 325 int virtio_gpu_create_mapping_iov(VirtIOGPU *g, 326 uint32_t nr_entries, uint32_t offset, 327 struct virtio_gpu_ctrl_command *cmd, 328 uint64_t **addr, struct iovec **iov, 329 uint32_t *niov); 330 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g, 331 struct iovec *iov, uint32_t count); 332 void virtio_gpu_cleanup_mapping(VirtIOGPU *g, 333 struct virtio_gpu_simple_resource *res); 334 void virtio_gpu_process_cmdq(VirtIOGPU *g); 335 void virtio_gpu_device_realize(DeviceState *qdev, Error **errp); 336 void virtio_gpu_reset(VirtIODevice *vdev); 337 void virtio_gpu_simple_process_cmd(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd); 338 void virtio_gpu_update_cursor_data(VirtIOGPU *g, 339 struct virtio_gpu_scanout *s, 340 uint32_t resource_id); 341 342 /** 343 * virtio_gpu_scanout_blob_to_fb() - fill out fb based on scanout data 344 * fb: the frame-buffer descriptor to fill out 345 * ss: the scanout blob data 346 * blob_size: size of scanout blob data 347 * 348 * This will check we have enough space for the frame taking into 349 * account that stride. 350 * 351 * Returns true on success, otherwise logs guest error and returns false 352 */ 353 bool virtio_gpu_scanout_blob_to_fb(struct virtio_gpu_framebuffer *fb, 354 struct virtio_gpu_set_scanout_blob *ss, 355 uint64_t blob_size); 356 357 /* virtio-gpu-udmabuf.c */ 358 bool virtio_gpu_have_udmabuf(void); 359 void virtio_gpu_init_udmabuf(struct virtio_gpu_simple_resource *res); 360 void virtio_gpu_fini_udmabuf(struct virtio_gpu_simple_resource *res); 361 int virtio_gpu_update_dmabuf(VirtIOGPU *g, 362 uint32_t scanout_id, 363 struct virtio_gpu_simple_resource *res, 364 struct virtio_gpu_framebuffer *fb, 365 struct virtio_gpu_rect *r); 366 367 void virtio_gpu_update_scanout(VirtIOGPU *g, 368 uint32_t scanout_id, 369 struct virtio_gpu_simple_resource *res, 370 struct virtio_gpu_framebuffer *fb, 371 struct virtio_gpu_rect *r); 372 void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id); 373 374 /* virtio-gpu-3d.c */ 375 void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, 376 struct virtio_gpu_ctrl_command *cmd); 377 void virtio_gpu_virgl_fence_poll(VirtIOGPU *g); 378 void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g); 379 void virtio_gpu_virgl_reset(VirtIOGPU *g); 380 int virtio_gpu_virgl_init(VirtIOGPU *g); 381 GArray *virtio_gpu_virgl_get_capsets(VirtIOGPU *g); 382 383 #endif 384