1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #ifndef HW_VIRTIO_GPU_H 15 #define HW_VIRTIO_GPU_H 16 17 #include "qemu/queue.h" 18 #include "ui/qemu-pixman.h" 19 #include "ui/console.h" 20 #include "hw/virtio/virtio.h" 21 #include "qemu/log.h" 22 #include "sysemu/vhost-user-backend.h" 23 24 #include "standard-headers/linux/virtio_gpu.h" 25 #include "qom/object.h" 26 27 #define TYPE_VIRTIO_GPU_BASE "virtio-gpu-base" 28 OBJECT_DECLARE_TYPE(VirtIOGPUBase, VirtIOGPUBaseClass, 29 VIRTIO_GPU_BASE) 30 31 #define TYPE_VIRTIO_GPU "virtio-gpu-device" 32 OBJECT_DECLARE_TYPE(VirtIOGPU, VirtIOGPUClass, VIRTIO_GPU) 33 34 #define TYPE_VIRTIO_GPU_GL "virtio-gpu-gl-device" 35 OBJECT_DECLARE_SIMPLE_TYPE(VirtIOGPUGL, VIRTIO_GPU_GL) 36 37 #define TYPE_VHOST_USER_GPU "vhost-user-gpu" 38 OBJECT_DECLARE_SIMPLE_TYPE(VhostUserGPU, VHOST_USER_GPU) 39 40 #define VIRTIO_ID_GPU 16 41 42 struct virtio_gpu_simple_resource { 43 uint32_t resource_id; 44 uint32_t width; 45 uint32_t height; 46 uint32_t format; 47 uint64_t *addrs; 48 struct iovec *iov; 49 unsigned int iov_cnt; 50 uint32_t scanout_bitmask; 51 pixman_image_t *image; 52 uint64_t hostmem; 53 54 uint64_t blob_size; 55 void *blob; 56 int dmabuf_fd; 57 uint8_t *remapped; 58 59 QTAILQ_ENTRY(virtio_gpu_simple_resource) next; 60 }; 61 62 struct virtio_gpu_framebuffer { 63 pixman_format_code_t format; 64 uint32_t bytes_pp; 65 uint32_t width, height; 66 uint32_t stride; 67 uint32_t offset; 68 }; 69 70 struct virtio_gpu_scanout { 71 QemuConsole *con; 72 DisplaySurface *ds; 73 uint32_t width, height; 74 int x, y; 75 int invalidate; 76 uint32_t resource_id; 77 struct virtio_gpu_update_cursor cursor; 78 QEMUCursor *current_cursor; 79 }; 80 81 struct virtio_gpu_requested_state { 82 uint16_t width_mm, height_mm; 83 uint32_t width, height; 84 int x, y; 85 }; 86 87 enum virtio_gpu_base_conf_flags { 88 VIRTIO_GPU_FLAG_VIRGL_ENABLED = 1, 89 VIRTIO_GPU_FLAG_STATS_ENABLED, 90 VIRTIO_GPU_FLAG_EDID_ENABLED, 91 VIRTIO_GPU_FLAG_DMABUF_ENABLED, 92 VIRTIO_GPU_FLAG_BLOB_ENABLED, 93 }; 94 95 #define virtio_gpu_virgl_enabled(_cfg) \ 96 (_cfg.flags & (1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED)) 97 #define virtio_gpu_stats_enabled(_cfg) \ 98 (_cfg.flags & (1 << VIRTIO_GPU_FLAG_STATS_ENABLED)) 99 #define virtio_gpu_edid_enabled(_cfg) \ 100 (_cfg.flags & (1 << VIRTIO_GPU_FLAG_EDID_ENABLED)) 101 #define virtio_gpu_dmabuf_enabled(_cfg) \ 102 (_cfg.flags & (1 << VIRTIO_GPU_FLAG_DMABUF_ENABLED)) 103 #define virtio_gpu_blob_enabled(_cfg) \ 104 (_cfg.flags & (1 << VIRTIO_GPU_FLAG_BLOB_ENABLED)) 105 106 struct virtio_gpu_base_conf { 107 uint32_t max_outputs; 108 uint32_t flags; 109 uint32_t xres; 110 uint32_t yres; 111 }; 112 113 struct virtio_gpu_ctrl_command { 114 VirtQueueElement elem; 115 VirtQueue *vq; 116 struct virtio_gpu_ctrl_hdr cmd_hdr; 117 uint32_t error; 118 bool finished; 119 QTAILQ_ENTRY(virtio_gpu_ctrl_command) next; 120 }; 121 122 struct VirtIOGPUBase { 123 VirtIODevice parent_obj; 124 125 Error *migration_blocker; 126 127 struct virtio_gpu_base_conf conf; 128 struct virtio_gpu_config virtio_config; 129 const GraphicHwOps *hw_ops; 130 131 int renderer_blocked; 132 int enable; 133 134 struct virtio_gpu_scanout scanout[VIRTIO_GPU_MAX_SCANOUTS]; 135 136 int enabled_output_bitmask; 137 struct virtio_gpu_requested_state req_state[VIRTIO_GPU_MAX_SCANOUTS]; 138 }; 139 140 struct VirtIOGPUBaseClass { 141 VirtioDeviceClass parent; 142 143 void (*gl_flushed)(VirtIOGPUBase *g); 144 }; 145 146 #define VIRTIO_GPU_BASE_PROPERTIES(_state, _conf) \ 147 DEFINE_PROP_UINT32("max_outputs", _state, _conf.max_outputs, 1), \ 148 DEFINE_PROP_BIT("edid", _state, _conf.flags, \ 149 VIRTIO_GPU_FLAG_EDID_ENABLED, true), \ 150 DEFINE_PROP_UINT32("xres", _state, _conf.xres, 1024), \ 151 DEFINE_PROP_UINT32("yres", _state, _conf.yres, 768) 152 153 struct VirtIOGPU { 154 VirtIOGPUBase parent_obj; 155 156 uint64_t conf_max_hostmem; 157 158 VirtQueue *ctrl_vq; 159 VirtQueue *cursor_vq; 160 161 QEMUBH *ctrl_bh; 162 QEMUBH *cursor_bh; 163 164 QTAILQ_HEAD(, virtio_gpu_simple_resource) reslist; 165 QTAILQ_HEAD(, virtio_gpu_ctrl_command) cmdq; 166 QTAILQ_HEAD(, virtio_gpu_ctrl_command) fenceq; 167 168 uint64_t hostmem; 169 170 bool processing_cmdq; 171 QEMUTimer *fence_poll; 172 QEMUTimer *print_stats; 173 174 uint32_t inflight; 175 struct { 176 uint32_t max_inflight; 177 uint32_t requests; 178 uint32_t req_3d; 179 uint32_t bytes_3d; 180 } stats; 181 }; 182 183 struct VirtIOGPUClass { 184 VirtIOGPUBaseClass parent; 185 186 void (*handle_ctrl)(VirtIODevice *vdev, VirtQueue *vq); 187 void (*process_cmd)(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd); 188 void (*update_cursor_data)(VirtIOGPU *g, 189 struct virtio_gpu_scanout *s, 190 uint32_t resource_id); 191 }; 192 193 struct VirtIOGPUGL { 194 struct VirtIOGPU parent_obj; 195 196 bool renderer_inited; 197 bool renderer_reset; 198 }; 199 200 struct VhostUserGPU { 201 VirtIOGPUBase parent_obj; 202 203 VhostUserBackend *vhost; 204 int vhost_gpu_fd; /* closed by the chardev */ 205 CharBackend vhost_chr; 206 QemuDmaBuf dmabuf[VIRTIO_GPU_MAX_SCANOUTS]; 207 bool backend_blocked; 208 }; 209 210 #define VIRTIO_GPU_FILL_CMD(out) do { \ 211 size_t s; \ 212 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 0, \ 213 &out, sizeof(out)); \ 214 if (s != sizeof(out)) { \ 215 qemu_log_mask(LOG_GUEST_ERROR, \ 216 "%s: command size incorrect %zu vs %zu\n", \ 217 __func__, s, sizeof(out)); \ 218 return; \ 219 } \ 220 } while (0) 221 222 /* virtio-gpu-base.c */ 223 bool virtio_gpu_base_device_realize(DeviceState *qdev, 224 VirtIOHandleOutput ctrl_cb, 225 VirtIOHandleOutput cursor_cb, 226 Error **errp); 227 void virtio_gpu_base_reset(VirtIOGPUBase *g); 228 void virtio_gpu_base_fill_display_info(VirtIOGPUBase *g, 229 struct virtio_gpu_resp_display_info *dpy_info); 230 231 /* virtio-gpu.c */ 232 void virtio_gpu_ctrl_response(VirtIOGPU *g, 233 struct virtio_gpu_ctrl_command *cmd, 234 struct virtio_gpu_ctrl_hdr *resp, 235 size_t resp_len); 236 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, 237 struct virtio_gpu_ctrl_command *cmd, 238 enum virtio_gpu_ctrl_type type); 239 void virtio_gpu_get_display_info(VirtIOGPU *g, 240 struct virtio_gpu_ctrl_command *cmd); 241 void virtio_gpu_get_edid(VirtIOGPU *g, 242 struct virtio_gpu_ctrl_command *cmd); 243 int virtio_gpu_create_mapping_iov(VirtIOGPU *g, 244 uint32_t nr_entries, uint32_t offset, 245 struct virtio_gpu_ctrl_command *cmd, 246 uint64_t **addr, struct iovec **iov, 247 uint32_t *niov); 248 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g, 249 struct iovec *iov, uint32_t count); 250 void virtio_gpu_process_cmdq(VirtIOGPU *g); 251 void virtio_gpu_device_realize(DeviceState *qdev, Error **errp); 252 void virtio_gpu_reset(VirtIODevice *vdev); 253 void virtio_gpu_simple_process_cmd(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd); 254 void virtio_gpu_update_cursor_data(VirtIOGPU *g, 255 struct virtio_gpu_scanout *s, 256 uint32_t resource_id); 257 258 /* virtio-gpu-udmabuf.c */ 259 bool virtio_gpu_have_udmabuf(void); 260 void virtio_gpu_init_udmabuf(struct virtio_gpu_simple_resource *res); 261 void virtio_gpu_fini_udmabuf(struct virtio_gpu_simple_resource *res); 262 263 /* virtio-gpu-3d.c */ 264 void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, 265 struct virtio_gpu_ctrl_command *cmd); 266 void virtio_gpu_virgl_fence_poll(VirtIOGPU *g); 267 void virtio_gpu_virgl_reset(VirtIOGPU *g); 268 int virtio_gpu_virgl_init(VirtIOGPU *g); 269 int virtio_gpu_virgl_get_num_capsets(VirtIOGPU *g); 270 271 #endif 272