1 /* 2 * Virtio GPU Device 3 * 4 * Copyright Red Hat, Inc. 2013-2014 5 * 6 * Authors: 7 * Dave Airlie <airlied@redhat.com> 8 * Gerd Hoffmann <kraxel@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/units.h" 16 #include "qemu/iov.h" 17 #include "ui/console.h" 18 #include "hw/virtio/virtio-gpu.h" 19 #include "hw/virtio/virtio-gpu-pixman.h" 20 #include "trace.h" 21 #include "exec/ramblock.h" 22 #include "sysemu/hostmem.h" 23 #include <sys/ioctl.h> 24 #include <linux/memfd.h> 25 #include "qemu/memfd.h" 26 #include "standard-headers/linux/udmabuf.h" 27 28 static void virtio_gpu_create_udmabuf(struct virtio_gpu_simple_resource *res) 29 { 30 struct udmabuf_create_list *list; 31 RAMBlock *rb; 32 ram_addr_t offset; 33 int udmabuf, i; 34 35 udmabuf = udmabuf_fd(); 36 if (udmabuf < 0) { 37 return; 38 } 39 40 list = g_malloc0(sizeof(struct udmabuf_create_list) + 41 sizeof(struct udmabuf_create_item) * res->iov_cnt); 42 43 for (i = 0; i < res->iov_cnt; i++) { 44 rcu_read_lock(); 45 rb = qemu_ram_block_from_host(res->iov[i].iov_base, false, &offset); 46 rcu_read_unlock(); 47 48 if (!rb || rb->fd < 0) { 49 g_free(list); 50 return; 51 } 52 53 list->list[i].memfd = rb->fd; 54 list->list[i].offset = offset; 55 list->list[i].size = res->iov[i].iov_len; 56 } 57 58 list->count = res->iov_cnt; 59 list->flags = UDMABUF_FLAGS_CLOEXEC; 60 61 res->dmabuf_fd = ioctl(udmabuf, UDMABUF_CREATE_LIST, list); 62 if (res->dmabuf_fd < 0) { 63 warn_report("%s: UDMABUF_CREATE_LIST: %s", __func__, 64 strerror(errno)); 65 } 66 g_free(list); 67 } 68 69 static void virtio_gpu_remap_udmabuf(struct virtio_gpu_simple_resource *res) 70 { 71 res->remapped = mmap(NULL, res->blob_size, PROT_READ, 72 MAP_SHARED, res->dmabuf_fd, 0); 73 if (res->remapped == MAP_FAILED) { 74 warn_report("%s: dmabuf mmap failed: %s", __func__, 75 strerror(errno)); 76 res->remapped = NULL; 77 } 78 } 79 80 static void virtio_gpu_destroy_udmabuf(struct virtio_gpu_simple_resource *res) 81 { 82 if (res->remapped) { 83 munmap(res->remapped, res->blob_size); 84 res->remapped = NULL; 85 } 86 if (res->dmabuf_fd >= 0) { 87 close(res->dmabuf_fd); 88 res->dmabuf_fd = -1; 89 } 90 } 91 92 static int find_memory_backend_type(Object *obj, void *opaque) 93 { 94 bool *memfd_backend = opaque; 95 int ret; 96 97 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { 98 HostMemoryBackend *backend = MEMORY_BACKEND(obj); 99 RAMBlock *rb = backend->mr.ram_block; 100 101 if (rb && rb->fd > 0) { 102 ret = fcntl(rb->fd, F_GET_SEALS); 103 if (ret > 0) { 104 *memfd_backend = true; 105 } 106 } 107 } 108 109 return 0; 110 } 111 112 bool virtio_gpu_have_udmabuf(void) 113 { 114 Object *memdev_root; 115 int udmabuf; 116 bool memfd_backend = false; 117 118 udmabuf = udmabuf_fd(); 119 if (udmabuf < 0) { 120 return false; 121 } 122 123 memdev_root = object_resolve_path("/objects", NULL); 124 object_child_foreach(memdev_root, find_memory_backend_type, &memfd_backend); 125 126 return memfd_backend; 127 } 128 129 void virtio_gpu_init_udmabuf(struct virtio_gpu_simple_resource *res) 130 { 131 void *pdata = NULL; 132 133 res->dmabuf_fd = -1; 134 if (res->iov_cnt == 1) { 135 pdata = res->iov[0].iov_base; 136 } else { 137 virtio_gpu_create_udmabuf(res); 138 if (res->dmabuf_fd < 0) { 139 return; 140 } 141 virtio_gpu_remap_udmabuf(res); 142 if (!res->remapped) { 143 return; 144 } 145 pdata = res->remapped; 146 } 147 148 res->blob = pdata; 149 } 150 151 void virtio_gpu_fini_udmabuf(struct virtio_gpu_simple_resource *res) 152 { 153 if (res->remapped) { 154 virtio_gpu_destroy_udmabuf(res); 155 } 156 } 157 158 static void virtio_gpu_free_dmabuf(VirtIOGPU *g, VGPUDMABuf *dmabuf) 159 { 160 struct virtio_gpu_scanout *scanout; 161 162 scanout = &g->parent_obj.scanout[dmabuf->scanout_id]; 163 dpy_gl_release_dmabuf(scanout->con, &dmabuf->buf); 164 QTAILQ_REMOVE(&g->dmabuf.bufs, dmabuf, next); 165 g_free(dmabuf); 166 } 167 168 static VGPUDMABuf 169 *virtio_gpu_create_dmabuf(VirtIOGPU *g, 170 uint32_t scanout_id, 171 struct virtio_gpu_simple_resource *res, 172 struct virtio_gpu_framebuffer *fb, 173 struct virtio_gpu_rect *r) 174 { 175 VGPUDMABuf *dmabuf; 176 177 if (res->dmabuf_fd < 0) { 178 return NULL; 179 } 180 181 dmabuf = g_new0(VGPUDMABuf, 1); 182 dmabuf->buf.width = fb->width; 183 dmabuf->buf.height = fb->height; 184 dmabuf->buf.stride = fb->stride; 185 dmabuf->buf.x = r->x; 186 dmabuf->buf.y = r->y; 187 dmabuf->buf.scanout_width = r->width; 188 dmabuf->buf.scanout_height = r->height; 189 dmabuf->buf.fourcc = qemu_pixman_to_drm_format(fb->format); 190 dmabuf->buf.fd = res->dmabuf_fd; 191 dmabuf->buf.allow_fences = true; 192 dmabuf->buf.draw_submitted = false; 193 dmabuf->scanout_id = scanout_id; 194 QTAILQ_INSERT_HEAD(&g->dmabuf.bufs, dmabuf, next); 195 196 return dmabuf; 197 } 198 199 int virtio_gpu_update_dmabuf(VirtIOGPU *g, 200 uint32_t scanout_id, 201 struct virtio_gpu_simple_resource *res, 202 struct virtio_gpu_framebuffer *fb, 203 struct virtio_gpu_rect *r) 204 { 205 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; 206 VGPUDMABuf *new_primary, *old_primary = NULL; 207 208 new_primary = virtio_gpu_create_dmabuf(g, scanout_id, res, fb, r); 209 if (!new_primary) { 210 return -EINVAL; 211 } 212 213 if (g->dmabuf.primary[scanout_id]) { 214 old_primary = g->dmabuf.primary[scanout_id]; 215 } 216 217 g->dmabuf.primary[scanout_id] = new_primary; 218 qemu_console_resize(scanout->con, 219 new_primary->buf.scanout_width, 220 new_primary->buf.scanout_height); 221 dpy_gl_scanout_dmabuf(scanout->con, &new_primary->buf); 222 223 if (old_primary) { 224 virtio_gpu_free_dmabuf(g, old_primary); 225 } 226 227 return 0; 228 } 229