1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include <linux/virtio.h>
27 #include <linux/virtio_config.h>
28 
29 #include <drm/drm_file.h>
30 
31 #include "virtgpu_drv.h"
32 
33 static void virtio_gpu_config_changed_work_func(struct work_struct *work)
34 {
35 	struct virtio_gpu_device *vgdev =
36 		container_of(work, struct virtio_gpu_device,
37 			     config_changed_work);
38 	u32 events_read, events_clear = 0;
39 
40 	/* read the config space */
41 	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
42 		     events_read, &events_read);
43 	if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
44 		if (vgdev->has_edid)
45 			virtio_gpu_cmd_get_edids(vgdev);
46 		virtio_gpu_cmd_get_display_info(vgdev);
47 		virtio_gpu_notify(vgdev);
48 		drm_helper_hpd_irq_event(vgdev->ddev);
49 		events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
50 	}
51 	virtio_cwrite(vgdev->vdev, struct virtio_gpu_config,
52 		      events_clear, &events_clear);
53 }
54 
55 static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
56 				      uint32_t ctx_id)
57 {
58 	virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
59 	virtio_gpu_notify(vgdev);
60 	ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
61 }
62 
63 static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
64 			       void (*work_func)(struct work_struct *work))
65 {
66 	spin_lock_init(&vgvq->qlock);
67 	init_waitqueue_head(&vgvq->ack_queue);
68 	INIT_WORK(&vgvq->dequeue_work, work_func);
69 }
70 
71 static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
72 				   int num_capsets)
73 {
74 	int i, ret;
75 
76 	vgdev->capsets = kcalloc(num_capsets,
77 				 sizeof(struct virtio_gpu_drv_capset),
78 				 GFP_KERNEL);
79 	if (!vgdev->capsets) {
80 		DRM_ERROR("failed to allocate cap sets\n");
81 		return;
82 	}
83 	for (i = 0; i < num_capsets; i++) {
84 		virtio_gpu_cmd_get_capset_info(vgdev, i);
85 		virtio_gpu_notify(vgdev);
86 		ret = wait_event_timeout(vgdev->resp_wq,
87 					 vgdev->capsets[i].id > 0, 5 * HZ);
88 		if (ret == 0) {
89 			DRM_ERROR("timed out waiting for cap set %d\n", i);
90 			kfree(vgdev->capsets);
91 			vgdev->capsets = NULL;
92 			return;
93 		}
94 		DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
95 			 i, vgdev->capsets[i].id,
96 			 vgdev->capsets[i].max_version,
97 			 vgdev->capsets[i].max_size);
98 	}
99 	vgdev->num_capsets = num_capsets;
100 }
101 
102 int virtio_gpu_init(struct drm_device *dev)
103 {
104 	static vq_callback_t *callbacks[] = {
105 		virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
106 	};
107 	static const char * const names[] = { "control", "cursor" };
108 
109 	struct virtio_gpu_device *vgdev;
110 	/* this will expand later */
111 	struct virtqueue *vqs[2];
112 	u32 num_scanouts, num_capsets;
113 	int ret;
114 
115 	if (!virtio_has_feature(dev_to_virtio(dev->dev), VIRTIO_F_VERSION_1))
116 		return -ENODEV;
117 
118 	vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL);
119 	if (!vgdev)
120 		return -ENOMEM;
121 
122 	vgdev->ddev = dev;
123 	dev->dev_private = vgdev;
124 	vgdev->vdev = dev_to_virtio(dev->dev);
125 	vgdev->dev = dev->dev;
126 
127 	spin_lock_init(&vgdev->display_info_lock);
128 	ida_init(&vgdev->ctx_id_ida);
129 	ida_init(&vgdev->resource_ida);
130 	init_waitqueue_head(&vgdev->resp_wq);
131 	virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
132 	virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
133 
134 	vgdev->fence_drv.context = dma_fence_context_alloc(1);
135 	spin_lock_init(&vgdev->fence_drv.lock);
136 	INIT_LIST_HEAD(&vgdev->fence_drv.fences);
137 	INIT_LIST_HEAD(&vgdev->cap_cache);
138 	INIT_WORK(&vgdev->config_changed_work,
139 		  virtio_gpu_config_changed_work_func);
140 
141 	INIT_WORK(&vgdev->obj_free_work,
142 		  virtio_gpu_array_put_free_work);
143 	INIT_LIST_HEAD(&vgdev->obj_free_list);
144 	spin_lock_init(&vgdev->obj_free_lock);
145 
146 #ifdef __LITTLE_ENDIAN
147 	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
148 		vgdev->has_virgl_3d = true;
149 #endif
150 	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
151 		vgdev->has_edid = true;
152 	}
153 	if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
154 		vgdev->has_indirect = true;
155 	}
156 
157 	DRM_INFO("features: %cvirgl %cedid\n",
158 		 vgdev->has_virgl_3d ? '+' : '-',
159 		 vgdev->has_edid     ? '+' : '-');
160 
161 	ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
162 	if (ret) {
163 		DRM_ERROR("failed to find virt queues\n");
164 		goto err_vqs;
165 	}
166 	vgdev->ctrlq.vq = vqs[0];
167 	vgdev->cursorq.vq = vqs[1];
168 	ret = virtio_gpu_alloc_vbufs(vgdev);
169 	if (ret) {
170 		DRM_ERROR("failed to alloc vbufs\n");
171 		goto err_vbufs;
172 	}
173 
174 	/* get display info */
175 	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
176 		     num_scanouts, &num_scanouts);
177 	vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
178 				    VIRTIO_GPU_MAX_SCANOUTS);
179 	if (!vgdev->num_scanouts) {
180 		DRM_ERROR("num_scanouts is zero\n");
181 		ret = -EINVAL;
182 		goto err_scanouts;
183 	}
184 	DRM_INFO("number of scanouts: %d\n", num_scanouts);
185 
186 	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
187 		     num_capsets, &num_capsets);
188 	DRM_INFO("number of cap sets: %d\n", num_capsets);
189 
190 	virtio_gpu_modeset_init(vgdev);
191 
192 	virtio_device_ready(vgdev->vdev);
193 
194 	if (num_capsets)
195 		virtio_gpu_get_capsets(vgdev, num_capsets);
196 	if (vgdev->has_edid)
197 		virtio_gpu_cmd_get_edids(vgdev);
198 	virtio_gpu_cmd_get_display_info(vgdev);
199 	virtio_gpu_notify(vgdev);
200 	wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
201 			   5 * HZ);
202 	return 0;
203 
204 err_scanouts:
205 	virtio_gpu_free_vbufs(vgdev);
206 err_vbufs:
207 	vgdev->vdev->config->del_vqs(vgdev->vdev);
208 err_vqs:
209 	kfree(vgdev);
210 	return ret;
211 }
212 
213 static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev)
214 {
215 	struct virtio_gpu_drv_cap_cache *cache_ent, *tmp;
216 
217 	list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) {
218 		kfree(cache_ent->caps_cache);
219 		kfree(cache_ent);
220 	}
221 }
222 
223 void virtio_gpu_deinit(struct drm_device *dev)
224 {
225 	struct virtio_gpu_device *vgdev = dev->dev_private;
226 
227 	flush_work(&vgdev->obj_free_work);
228 	flush_work(&vgdev->ctrlq.dequeue_work);
229 	flush_work(&vgdev->cursorq.dequeue_work);
230 	flush_work(&vgdev->config_changed_work);
231 	vgdev->vdev->config->reset(vgdev->vdev);
232 	vgdev->vdev->config->del_vqs(vgdev->vdev);
233 }
234 
235 void virtio_gpu_release(struct drm_device *dev)
236 {
237 	struct virtio_gpu_device *vgdev = dev->dev_private;
238 
239 	virtio_gpu_modeset_fini(vgdev);
240 	virtio_gpu_free_vbufs(vgdev);
241 	virtio_gpu_cleanup_cap_cache(vgdev);
242 	kfree(vgdev->capsets);
243 	kfree(vgdev);
244 }
245 
246 int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
247 {
248 	struct virtio_gpu_device *vgdev = dev->dev_private;
249 	struct virtio_gpu_fpriv *vfpriv;
250 	int handle;
251 
252 	/* can't create contexts without 3d renderer */
253 	if (!vgdev->has_virgl_3d)
254 		return 0;
255 
256 	/* allocate a virt GPU context for this opener */
257 	vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL);
258 	if (!vfpriv)
259 		return -ENOMEM;
260 
261 	mutex_init(&vfpriv->context_lock);
262 
263 	handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
264 	if (handle < 0) {
265 		kfree(vfpriv);
266 		return handle;
267 	}
268 
269 	vfpriv->ctx_id = handle + 1;
270 	file->driver_priv = vfpriv;
271 	return 0;
272 }
273 
274 void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
275 {
276 	struct virtio_gpu_device *vgdev = dev->dev_private;
277 	struct virtio_gpu_fpriv *vfpriv;
278 
279 	if (!vgdev->has_virgl_3d)
280 		return;
281 
282 	vfpriv = file->driver_priv;
283 
284 	virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id);
285 	mutex_destroy(&vfpriv->context_lock);
286 	kfree(vfpriv);
287 	file->driver_priv = NULL;
288 }
289