1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include <linux/virtio.h>
27 #include <linux/virtio_config.h>
28 #include <drm/drmP.h>
29 #include "virtgpu_drv.h"
30 
31 static void virtio_gpu_config_changed_work_func(struct work_struct *work)
32 {
33 	struct virtio_gpu_device *vgdev =
34 		container_of(work, struct virtio_gpu_device,
35 			     config_changed_work);
36 	u32 events_read, events_clear = 0;
37 
38 	/* read the config space */
39 	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
40 		     events_read, &events_read);
41 	if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
42 		if (vgdev->has_edid)
43 			virtio_gpu_cmd_get_edids(vgdev);
44 		virtio_gpu_cmd_get_display_info(vgdev);
45 		drm_helper_hpd_irq_event(vgdev->ddev);
46 		events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
47 	}
48 	virtio_cwrite(vgdev->vdev, struct virtio_gpu_config,
49 		      events_clear, &events_clear);
50 }
51 
52 static int virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
53 				      uint32_t nlen, const char *name)
54 {
55 	int handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
56 
57 	if (handle < 0)
58 		return handle;
59 	handle += 1;
60 	virtio_gpu_cmd_context_create(vgdev, handle, nlen, name);
61 	return handle;
62 }
63 
64 static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
65 				      uint32_t ctx_id)
66 {
67 	virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
68 	ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
69 }
70 
71 static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
72 			       void (*work_func)(struct work_struct *work))
73 {
74 	spin_lock_init(&vgvq->qlock);
75 	init_waitqueue_head(&vgvq->ack_queue);
76 	INIT_WORK(&vgvq->dequeue_work, work_func);
77 }
78 
79 static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
80 				   int num_capsets)
81 {
82 	int i, ret;
83 
84 	vgdev->capsets = kcalloc(num_capsets,
85 				 sizeof(struct virtio_gpu_drv_capset),
86 				 GFP_KERNEL);
87 	if (!vgdev->capsets) {
88 		DRM_ERROR("failed to allocate cap sets\n");
89 		return;
90 	}
91 	for (i = 0; i < num_capsets; i++) {
92 		virtio_gpu_cmd_get_capset_info(vgdev, i);
93 		ret = wait_event_timeout(vgdev->resp_wq,
94 					 vgdev->capsets[i].id > 0, 5 * HZ);
95 		if (ret == 0) {
96 			DRM_ERROR("timed out waiting for cap set %d\n", i);
97 			kfree(vgdev->capsets);
98 			vgdev->capsets = NULL;
99 			return;
100 		}
101 		DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
102 			 i, vgdev->capsets[i].id,
103 			 vgdev->capsets[i].max_version,
104 			 vgdev->capsets[i].max_size);
105 	}
106 	vgdev->num_capsets = num_capsets;
107 }
108 
109 int virtio_gpu_init(struct drm_device *dev)
110 {
111 	static vq_callback_t *callbacks[] = {
112 		virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
113 	};
114 	static const char * const names[] = { "control", "cursor" };
115 
116 	struct virtio_gpu_device *vgdev;
117 	/* this will expand later */
118 	struct virtqueue *vqs[2];
119 	u32 num_scanouts, num_capsets;
120 	int ret;
121 
122 	if (!virtio_has_feature(dev_to_virtio(dev->dev), VIRTIO_F_VERSION_1))
123 		return -ENODEV;
124 
125 	vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL);
126 	if (!vgdev)
127 		return -ENOMEM;
128 
129 	vgdev->ddev = dev;
130 	dev->dev_private = vgdev;
131 	vgdev->vdev = dev_to_virtio(dev->dev);
132 	vgdev->dev = dev->dev;
133 
134 	spin_lock_init(&vgdev->display_info_lock);
135 	ida_init(&vgdev->ctx_id_ida);
136 	ida_init(&vgdev->resource_ida);
137 	init_waitqueue_head(&vgdev->resp_wq);
138 	virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
139 	virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
140 
141 	vgdev->fence_drv.context = dma_fence_context_alloc(1);
142 	spin_lock_init(&vgdev->fence_drv.lock);
143 	INIT_LIST_HEAD(&vgdev->fence_drv.fences);
144 	INIT_LIST_HEAD(&vgdev->cap_cache);
145 	INIT_WORK(&vgdev->config_changed_work,
146 		  virtio_gpu_config_changed_work_func);
147 
148 #ifdef __LITTLE_ENDIAN
149 	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
150 		vgdev->has_virgl_3d = true;
151 	DRM_INFO("virgl 3d acceleration %s\n",
152 		 vgdev->has_virgl_3d ? "enabled" : "not supported by host");
153 #else
154 	DRM_INFO("virgl 3d acceleration not supported by guest\n");
155 #endif
156 	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
157 		vgdev->has_edid = true;
158 		DRM_INFO("EDID support available.\n");
159 	}
160 
161 	ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
162 	if (ret) {
163 		DRM_ERROR("failed to find virt queues\n");
164 		goto err_vqs;
165 	}
166 	vgdev->ctrlq.vq = vqs[0];
167 	vgdev->cursorq.vq = vqs[1];
168 	ret = virtio_gpu_alloc_vbufs(vgdev);
169 	if (ret) {
170 		DRM_ERROR("failed to alloc vbufs\n");
171 		goto err_vbufs;
172 	}
173 
174 	ret = virtio_gpu_ttm_init(vgdev);
175 	if (ret) {
176 		DRM_ERROR("failed to init ttm %d\n", ret);
177 		goto err_ttm;
178 	}
179 
180 	/* get display info */
181 	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
182 		     num_scanouts, &num_scanouts);
183 	vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
184 				    VIRTIO_GPU_MAX_SCANOUTS);
185 	if (!vgdev->num_scanouts) {
186 		DRM_ERROR("num_scanouts is zero\n");
187 		ret = -EINVAL;
188 		goto err_scanouts;
189 	}
190 	DRM_INFO("number of scanouts: %d\n", num_scanouts);
191 
192 	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
193 		     num_capsets, &num_capsets);
194 	DRM_INFO("number of cap sets: %d\n", num_capsets);
195 
196 	virtio_gpu_modeset_init(vgdev);
197 
198 	virtio_device_ready(vgdev->vdev);
199 	vgdev->vqs_ready = true;
200 
201 	if (num_capsets)
202 		virtio_gpu_get_capsets(vgdev, num_capsets);
203 	if (vgdev->has_edid)
204 		virtio_gpu_cmd_get_edids(vgdev);
205 	virtio_gpu_cmd_get_display_info(vgdev);
206 	wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
207 			   5 * HZ);
208 	return 0;
209 
210 err_scanouts:
211 	virtio_gpu_ttm_fini(vgdev);
212 err_ttm:
213 	virtio_gpu_free_vbufs(vgdev);
214 err_vbufs:
215 	vgdev->vdev->config->del_vqs(vgdev->vdev);
216 err_vqs:
217 	kfree(vgdev);
218 	return ret;
219 }
220 
221 static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev)
222 {
223 	struct virtio_gpu_drv_cap_cache *cache_ent, *tmp;
224 
225 	list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) {
226 		kfree(cache_ent->caps_cache);
227 		kfree(cache_ent);
228 	}
229 }
230 
231 void virtio_gpu_deinit(struct drm_device *dev)
232 {
233 	struct virtio_gpu_device *vgdev = dev->dev_private;
234 
235 	vgdev->vqs_ready = false;
236 	flush_work(&vgdev->ctrlq.dequeue_work);
237 	flush_work(&vgdev->cursorq.dequeue_work);
238 	flush_work(&vgdev->config_changed_work);
239 	vgdev->vdev->config->reset(vgdev->vdev);
240 	vgdev->vdev->config->del_vqs(vgdev->vdev);
241 
242 	virtio_gpu_modeset_fini(vgdev);
243 	virtio_gpu_ttm_fini(vgdev);
244 	virtio_gpu_free_vbufs(vgdev);
245 	virtio_gpu_cleanup_cap_cache(vgdev);
246 	kfree(vgdev->capsets);
247 	kfree(vgdev);
248 }
249 
250 int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
251 {
252 	struct virtio_gpu_device *vgdev = dev->dev_private;
253 	struct virtio_gpu_fpriv *vfpriv;
254 	int id;
255 	char dbgname[TASK_COMM_LEN];
256 
257 	/* can't create contexts without 3d renderer */
258 	if (!vgdev->has_virgl_3d)
259 		return 0;
260 
261 	/* allocate a virt GPU context for this opener */
262 	vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL);
263 	if (!vfpriv)
264 		return -ENOMEM;
265 
266 	get_task_comm(dbgname, current);
267 	id = virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname);
268 	if (id < 0) {
269 		kfree(vfpriv);
270 		return id;
271 	}
272 
273 	vfpriv->ctx_id = id;
274 	file->driver_priv = vfpriv;
275 	return 0;
276 }
277 
278 void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
279 {
280 	struct virtio_gpu_device *vgdev = dev->dev_private;
281 	struct virtio_gpu_fpriv *vfpriv;
282 
283 	if (!vgdev->has_virgl_3d)
284 		return;
285 
286 	vfpriv = file->driver_priv;
287 
288 	virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id);
289 	kfree(vfpriv);
290 	file->driver_priv = NULL;
291 }
292