1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include <linux/virtio.h>
27 #include <linux/virtio_config.h>
28 #include <linux/virtio_ring.h>
29 
30 #include <drm/drm_file.h>
31 
32 #include "virtgpu_drv.h"
33 
34 static void virtio_gpu_config_changed_work_func(struct work_struct *work)
35 {
36 	struct virtio_gpu_device *vgdev =
37 		container_of(work, struct virtio_gpu_device,
38 			     config_changed_work);
39 	u32 events_read, events_clear = 0;
40 
41 	/* read the config space */
42 	virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
43 			events_read, &events_read);
44 	if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
45 		if (vgdev->has_edid)
46 			virtio_gpu_cmd_get_edids(vgdev);
47 		virtio_gpu_cmd_get_display_info(vgdev);
48 		virtio_gpu_notify(vgdev);
49 		drm_helper_hpd_irq_event(vgdev->ddev);
50 		events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
51 	}
52 	virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config,
53 			 events_clear, &events_clear);
54 }
55 
56 static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
57 			       void (*work_func)(struct work_struct *work))
58 {
59 	spin_lock_init(&vgvq->qlock);
60 	init_waitqueue_head(&vgvq->ack_queue);
61 	INIT_WORK(&vgvq->dequeue_work, work_func);
62 }
63 
64 static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
65 				   int num_capsets)
66 {
67 	int i, ret;
68 
69 	vgdev->capsets = kcalloc(num_capsets,
70 				 sizeof(struct virtio_gpu_drv_capset),
71 				 GFP_KERNEL);
72 	if (!vgdev->capsets) {
73 		DRM_ERROR("failed to allocate cap sets\n");
74 		return;
75 	}
76 	for (i = 0; i < num_capsets; i++) {
77 		virtio_gpu_cmd_get_capset_info(vgdev, i);
78 		virtio_gpu_notify(vgdev);
79 		ret = wait_event_timeout(vgdev->resp_wq,
80 					 vgdev->capsets[i].id > 0, 5 * HZ);
81 		if (ret == 0) {
82 			DRM_ERROR("timed out waiting for cap set %d\n", i);
83 			spin_lock(&vgdev->display_info_lock);
84 			kfree(vgdev->capsets);
85 			vgdev->capsets = NULL;
86 			spin_unlock(&vgdev->display_info_lock);
87 			return;
88 		}
89 		DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
90 			 i, vgdev->capsets[i].id,
91 			 vgdev->capsets[i].max_version,
92 			 vgdev->capsets[i].max_size);
93 	}
94 	vgdev->num_capsets = num_capsets;
95 }
96 
97 int virtio_gpu_init(struct drm_device *dev)
98 {
99 	static vq_callback_t *callbacks[] = {
100 		virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
101 	};
102 	static const char * const names[] = { "control", "cursor" };
103 
104 	struct virtio_gpu_device *vgdev;
105 	/* this will expand later */
106 	struct virtqueue *vqs[2];
107 	u32 num_scanouts, num_capsets;
108 	int ret = 0;
109 
110 	if (!virtio_has_feature(dev_to_virtio(dev->dev), VIRTIO_F_VERSION_1))
111 		return -ENODEV;
112 
113 	vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL);
114 	if (!vgdev)
115 		return -ENOMEM;
116 
117 	vgdev->ddev = dev;
118 	dev->dev_private = vgdev;
119 	vgdev->vdev = dev_to_virtio(dev->dev);
120 	vgdev->dev = dev->dev;
121 
122 	spin_lock_init(&vgdev->display_info_lock);
123 	spin_lock_init(&vgdev->resource_export_lock);
124 	ida_init(&vgdev->ctx_id_ida);
125 	ida_init(&vgdev->resource_ida);
126 	init_waitqueue_head(&vgdev->resp_wq);
127 	virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
128 	virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
129 
130 	vgdev->fence_drv.context = dma_fence_context_alloc(1);
131 	spin_lock_init(&vgdev->fence_drv.lock);
132 	INIT_LIST_HEAD(&vgdev->fence_drv.fences);
133 	INIT_LIST_HEAD(&vgdev->cap_cache);
134 	INIT_WORK(&vgdev->config_changed_work,
135 		  virtio_gpu_config_changed_work_func);
136 
137 	INIT_WORK(&vgdev->obj_free_work,
138 		  virtio_gpu_array_put_free_work);
139 	INIT_LIST_HEAD(&vgdev->obj_free_list);
140 	spin_lock_init(&vgdev->obj_free_lock);
141 
142 #ifdef __LITTLE_ENDIAN
143 	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
144 		vgdev->has_virgl_3d = true;
145 #endif
146 	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
147 		vgdev->has_edid = true;
148 	}
149 	if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
150 		vgdev->has_indirect = true;
151 	}
152 	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) {
153 		vgdev->has_resource_assign_uuid = true;
154 	}
155 
156 	DRM_INFO("features: %cvirgl %cedid\n",
157 		 vgdev->has_virgl_3d ? '+' : '-',
158 		 vgdev->has_edid     ? '+' : '-');
159 
160 	ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
161 	if (ret) {
162 		DRM_ERROR("failed to find virt queues\n");
163 		goto err_vqs;
164 	}
165 	vgdev->ctrlq.vq = vqs[0];
166 	vgdev->cursorq.vq = vqs[1];
167 	ret = virtio_gpu_alloc_vbufs(vgdev);
168 	if (ret) {
169 		DRM_ERROR("failed to alloc vbufs\n");
170 		goto err_vbufs;
171 	}
172 
173 	/* get display info */
174 	virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
175 			num_scanouts, &num_scanouts);
176 	vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
177 				    VIRTIO_GPU_MAX_SCANOUTS);
178 	if (!vgdev->num_scanouts) {
179 		DRM_ERROR("num_scanouts is zero\n");
180 		ret = -EINVAL;
181 		goto err_scanouts;
182 	}
183 	DRM_INFO("number of scanouts: %d\n", num_scanouts);
184 
185 	virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
186 			num_capsets, &num_capsets);
187 	DRM_INFO("number of cap sets: %d\n", num_capsets);
188 
189 	ret = virtio_gpu_modeset_init(vgdev);
190 	if (ret) {
191 		DRM_ERROR("modeset init failed\n");
192 		goto err_scanouts;
193 	}
194 
195 	virtio_device_ready(vgdev->vdev);
196 
197 	if (num_capsets)
198 		virtio_gpu_get_capsets(vgdev, num_capsets);
199 	if (vgdev->has_edid)
200 		virtio_gpu_cmd_get_edids(vgdev);
201 	virtio_gpu_cmd_get_display_info(vgdev);
202 	virtio_gpu_notify(vgdev);
203 	wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
204 			   5 * HZ);
205 	return 0;
206 
207 err_scanouts:
208 	virtio_gpu_free_vbufs(vgdev);
209 err_vbufs:
210 	vgdev->vdev->config->del_vqs(vgdev->vdev);
211 err_vqs:
212 	kfree(vgdev);
213 	return ret;
214 }
215 
216 static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev)
217 {
218 	struct virtio_gpu_drv_cap_cache *cache_ent, *tmp;
219 
220 	list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) {
221 		kfree(cache_ent->caps_cache);
222 		kfree(cache_ent);
223 	}
224 }
225 
226 void virtio_gpu_deinit(struct drm_device *dev)
227 {
228 	struct virtio_gpu_device *vgdev = dev->dev_private;
229 
230 	flush_work(&vgdev->obj_free_work);
231 	flush_work(&vgdev->ctrlq.dequeue_work);
232 	flush_work(&vgdev->cursorq.dequeue_work);
233 	flush_work(&vgdev->config_changed_work);
234 	vgdev->vdev->config->reset(vgdev->vdev);
235 	vgdev->vdev->config->del_vqs(vgdev->vdev);
236 }
237 
238 void virtio_gpu_release(struct drm_device *dev)
239 {
240 	struct virtio_gpu_device *vgdev = dev->dev_private;
241 
242 	virtio_gpu_modeset_fini(vgdev);
243 	virtio_gpu_free_vbufs(vgdev);
244 	virtio_gpu_cleanup_cap_cache(vgdev);
245 	kfree(vgdev->capsets);
246 	kfree(vgdev);
247 }
248 
249 int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
250 {
251 	struct virtio_gpu_device *vgdev = dev->dev_private;
252 	struct virtio_gpu_fpriv *vfpriv;
253 	int handle;
254 
255 	/* can't create contexts without 3d renderer */
256 	if (!vgdev->has_virgl_3d)
257 		return 0;
258 
259 	/* allocate a virt GPU context for this opener */
260 	vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL);
261 	if (!vfpriv)
262 		return -ENOMEM;
263 
264 	mutex_init(&vfpriv->context_lock);
265 
266 	handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
267 	if (handle < 0) {
268 		kfree(vfpriv);
269 		return handle;
270 	}
271 
272 	vfpriv->ctx_id = handle + 1;
273 	file->driver_priv = vfpriv;
274 	return 0;
275 }
276 
277 void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
278 {
279 	struct virtio_gpu_device *vgdev = dev->dev_private;
280 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
281 
282 	if (!vgdev->has_virgl_3d)
283 		return;
284 
285 	if (vfpriv->context_created) {
286 		virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id);
287 		virtio_gpu_notify(vgdev);
288 	}
289 
290 	ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1);
291 	mutex_destroy(&vfpriv->context_lock);
292 	kfree(vfpriv);
293 	file->driver_priv = NULL;
294 }
295