1 /* 2 * Copyright (C) 2015 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 */ 25 26 #include <linux/virtio.h> 27 #include <linux/virtio_config.h> 28 #include <linux/virtio_ring.h> 29 30 #include <drm/drm_file.h> 31 32 #include "virtgpu_drv.h" 33 34 static void virtio_gpu_config_changed_work_func(struct work_struct *work) 35 { 36 struct virtio_gpu_device *vgdev = 37 container_of(work, struct virtio_gpu_device, 38 config_changed_work); 39 u32 events_read, events_clear = 0; 40 41 /* read the config space */ 42 virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, 43 events_read, &events_read); 44 if (events_read & VIRTIO_GPU_EVENT_DISPLAY) { 45 if (vgdev->has_edid) 46 virtio_gpu_cmd_get_edids(vgdev); 47 virtio_gpu_cmd_get_display_info(vgdev); 48 virtio_gpu_notify(vgdev); 49 drm_helper_hpd_irq_event(vgdev->ddev); 50 events_clear |= VIRTIO_GPU_EVENT_DISPLAY; 51 } 52 virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config, 53 events_clear, &events_clear); 54 } 55 56 static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq, 57 void (*work_func)(struct work_struct *work)) 58 { 59 spin_lock_init(&vgvq->qlock); 60 init_waitqueue_head(&vgvq->ack_queue); 61 INIT_WORK(&vgvq->dequeue_work, work_func); 62 } 63 64 static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev, 65 int num_capsets) 66 { 67 int i, ret; 68 69 vgdev->capsets = kcalloc(num_capsets, 70 sizeof(struct virtio_gpu_drv_capset), 71 GFP_KERNEL); 72 if (!vgdev->capsets) { 73 DRM_ERROR("failed to allocate cap sets\n"); 74 return; 75 } 76 for (i = 0; i < num_capsets; i++) { 77 virtio_gpu_cmd_get_capset_info(vgdev, i); 78 virtio_gpu_notify(vgdev); 79 ret = wait_event_timeout(vgdev->resp_wq, 80 vgdev->capsets[i].id > 0, 5 * HZ); 81 if (ret == 0) { 82 DRM_ERROR("timed out waiting for cap set %d\n", i); 83 spin_lock(&vgdev->display_info_lock); 84 kfree(vgdev->capsets); 85 vgdev->capsets = NULL; 86 spin_unlock(&vgdev->display_info_lock); 87 return; 88 } 89 DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n", 90 i, vgdev->capsets[i].id, 91 vgdev->capsets[i].max_version, 92 vgdev->capsets[i].max_size); 93 } 94 vgdev->num_capsets = num_capsets; 95 } 96 97 int virtio_gpu_init(struct drm_device *dev) 98 { 99 static vq_callback_t *callbacks[] = { 100 virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack 101 }; 102 static const char * const names[] = { "control", "cursor" }; 103 104 struct virtio_gpu_device *vgdev; 105 /* this will expand later */ 106 struct virtqueue *vqs[2]; 107 u32 num_scanouts, num_capsets; 108 int ret = 0; 109 110 if (!virtio_has_feature(dev_to_virtio(dev->dev), VIRTIO_F_VERSION_1)) 111 return -ENODEV; 112 113 vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL); 114 if (!vgdev) 115 return -ENOMEM; 116 117 vgdev->ddev = dev; 118 dev->dev_private = vgdev; 119 vgdev->vdev = dev_to_virtio(dev->dev); 120 vgdev->dev = dev->dev; 121 122 spin_lock_init(&vgdev->display_info_lock); 123 spin_lock_init(&vgdev->resource_export_lock); 124 spin_lock_init(&vgdev->host_visible_lock); 125 ida_init(&vgdev->ctx_id_ida); 126 ida_init(&vgdev->resource_ida); 127 init_waitqueue_head(&vgdev->resp_wq); 128 virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func); 129 virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func); 130 131 vgdev->fence_drv.context = dma_fence_context_alloc(1); 132 spin_lock_init(&vgdev->fence_drv.lock); 133 INIT_LIST_HEAD(&vgdev->fence_drv.fences); 134 INIT_LIST_HEAD(&vgdev->cap_cache); 135 INIT_WORK(&vgdev->config_changed_work, 136 virtio_gpu_config_changed_work_func); 137 138 INIT_WORK(&vgdev->obj_free_work, 139 virtio_gpu_array_put_free_work); 140 INIT_LIST_HEAD(&vgdev->obj_free_list); 141 spin_lock_init(&vgdev->obj_free_lock); 142 143 #ifdef __LITTLE_ENDIAN 144 if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL)) 145 vgdev->has_virgl_3d = true; 146 #endif 147 if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) { 148 vgdev->has_edid = true; 149 } 150 if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) { 151 vgdev->has_indirect = true; 152 } 153 if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) { 154 vgdev->has_resource_assign_uuid = true; 155 } 156 if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) { 157 vgdev->has_resource_blob = true; 158 } 159 if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region, 160 VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) { 161 if (!devm_request_mem_region(&vgdev->vdev->dev, 162 vgdev->host_visible_region.addr, 163 vgdev->host_visible_region.len, 164 dev_name(&vgdev->vdev->dev))) { 165 DRM_ERROR("Could not reserve host visible region\n"); 166 goto err_vqs; 167 } 168 169 DRM_INFO("Host memory window: 0x%lx +0x%lx\n", 170 (unsigned long)vgdev->host_visible_region.addr, 171 (unsigned long)vgdev->host_visible_region.len); 172 vgdev->has_host_visible = true; 173 drm_mm_init(&vgdev->host_visible_mm, 174 (unsigned long)vgdev->host_visible_region.addr, 175 (unsigned long)vgdev->host_visible_region.len); 176 } 177 178 DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible\n", 179 vgdev->has_virgl_3d ? '+' : '-', 180 vgdev->has_edid ? '+' : '-', 181 vgdev->has_resource_blob ? '+' : '-', 182 vgdev->has_host_visible ? '+' : '-'); 183 184 ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL); 185 if (ret) { 186 DRM_ERROR("failed to find virt queues\n"); 187 goto err_vqs; 188 } 189 vgdev->ctrlq.vq = vqs[0]; 190 vgdev->cursorq.vq = vqs[1]; 191 ret = virtio_gpu_alloc_vbufs(vgdev); 192 if (ret) { 193 DRM_ERROR("failed to alloc vbufs\n"); 194 goto err_vbufs; 195 } 196 197 /* get display info */ 198 virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, 199 num_scanouts, &num_scanouts); 200 vgdev->num_scanouts = min_t(uint32_t, num_scanouts, 201 VIRTIO_GPU_MAX_SCANOUTS); 202 if (!vgdev->num_scanouts) { 203 DRM_ERROR("num_scanouts is zero\n"); 204 ret = -EINVAL; 205 goto err_scanouts; 206 } 207 DRM_INFO("number of scanouts: %d\n", num_scanouts); 208 209 virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, 210 num_capsets, &num_capsets); 211 DRM_INFO("number of cap sets: %d\n", num_capsets); 212 213 ret = virtio_gpu_modeset_init(vgdev); 214 if (ret) { 215 DRM_ERROR("modeset init failed\n"); 216 goto err_scanouts; 217 } 218 219 virtio_device_ready(vgdev->vdev); 220 221 if (num_capsets) 222 virtio_gpu_get_capsets(vgdev, num_capsets); 223 if (vgdev->has_edid) 224 virtio_gpu_cmd_get_edids(vgdev); 225 virtio_gpu_cmd_get_display_info(vgdev); 226 virtio_gpu_notify(vgdev); 227 wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending, 228 5 * HZ); 229 return 0; 230 231 err_scanouts: 232 virtio_gpu_free_vbufs(vgdev); 233 err_vbufs: 234 vgdev->vdev->config->del_vqs(vgdev->vdev); 235 err_vqs: 236 kfree(vgdev); 237 return ret; 238 } 239 240 static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev) 241 { 242 struct virtio_gpu_drv_cap_cache *cache_ent, *tmp; 243 244 list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) { 245 kfree(cache_ent->caps_cache); 246 kfree(cache_ent); 247 } 248 } 249 250 void virtio_gpu_deinit(struct drm_device *dev) 251 { 252 struct virtio_gpu_device *vgdev = dev->dev_private; 253 254 flush_work(&vgdev->obj_free_work); 255 flush_work(&vgdev->ctrlq.dequeue_work); 256 flush_work(&vgdev->cursorq.dequeue_work); 257 flush_work(&vgdev->config_changed_work); 258 vgdev->vdev->config->reset(vgdev->vdev); 259 vgdev->vdev->config->del_vqs(vgdev->vdev); 260 } 261 262 void virtio_gpu_release(struct drm_device *dev) 263 { 264 struct virtio_gpu_device *vgdev = dev->dev_private; 265 266 virtio_gpu_modeset_fini(vgdev); 267 virtio_gpu_free_vbufs(vgdev); 268 virtio_gpu_cleanup_cap_cache(vgdev); 269 270 if (vgdev->has_host_visible) 271 drm_mm_takedown(&vgdev->host_visible_mm); 272 273 kfree(vgdev->capsets); 274 kfree(vgdev); 275 } 276 277 int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file) 278 { 279 struct virtio_gpu_device *vgdev = dev->dev_private; 280 struct virtio_gpu_fpriv *vfpriv; 281 int handle; 282 283 /* can't create contexts without 3d renderer */ 284 if (!vgdev->has_virgl_3d) 285 return 0; 286 287 /* allocate a virt GPU context for this opener */ 288 vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL); 289 if (!vfpriv) 290 return -ENOMEM; 291 292 mutex_init(&vfpriv->context_lock); 293 294 handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL); 295 if (handle < 0) { 296 kfree(vfpriv); 297 return handle; 298 } 299 300 vfpriv->ctx_id = handle + 1; 301 file->driver_priv = vfpriv; 302 return 0; 303 } 304 305 void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file) 306 { 307 struct virtio_gpu_device *vgdev = dev->dev_private; 308 struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 309 310 if (!vgdev->has_virgl_3d) 311 return; 312 313 if (vfpriv->context_created) { 314 virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id); 315 virtio_gpu_notify(vgdev); 316 } 317 318 ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1); 319 mutex_destroy(&vfpriv->context_lock); 320 kfree(vfpriv); 321 file->driver_priv = NULL; 322 } 323