1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie <airlied@redhat.com>
7  *    Gerd Hoffmann <kraxel@redhat.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26  * OTHER DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <drm/drmP.h>
30 #include "virtgpu_drv.h"
31 #include <linux/virtio.h>
32 #include <linux/virtio_config.h>
33 #include <linux/virtio_ring.h>
34 
35 #define MAX_INLINE_CMD_SIZE   96
36 #define MAX_INLINE_RESP_SIZE  24
37 #define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
38 			       + MAX_INLINE_CMD_SIZE		 \
39 			       + MAX_INLINE_RESP_SIZE)
40 
41 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
42 {
43 	struct drm_device *dev = vq->vdev->priv;
44 	struct virtio_gpu_device *vgdev = dev->dev_private;
45 
46 	schedule_work(&vgdev->ctrlq.dequeue_work);
47 }
48 
49 void virtio_gpu_cursor_ack(struct virtqueue *vq)
50 {
51 	struct drm_device *dev = vq->vdev->priv;
52 	struct virtio_gpu_device *vgdev = dev->dev_private;
53 
54 	schedule_work(&vgdev->cursorq.dequeue_work);
55 }
56 
57 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
58 {
59 	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
60 					 VBUFFER_SIZE,
61 					 __alignof__(struct virtio_gpu_vbuffer),
62 					 0, NULL);
63 	if (!vgdev->vbufs)
64 		return -ENOMEM;
65 	return 0;
66 }
67 
68 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
69 {
70 	kmem_cache_destroy(vgdev->vbufs);
71 	vgdev->vbufs = NULL;
72 }
73 
74 static struct virtio_gpu_vbuffer*
75 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
76 		    int size, int resp_size, void *resp_buf,
77 		    virtio_gpu_resp_cb resp_cb)
78 {
79 	struct virtio_gpu_vbuffer *vbuf;
80 
81 	vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
82 	if (!vbuf)
83 		return ERR_PTR(-ENOMEM);
84 
85 	BUG_ON(size > MAX_INLINE_CMD_SIZE);
86 	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
87 	vbuf->size = size;
88 
89 	vbuf->resp_cb = resp_cb;
90 	vbuf->resp_size = resp_size;
91 	if (resp_size <= MAX_INLINE_RESP_SIZE)
92 		vbuf->resp_buf = (void *)vbuf->buf + size;
93 	else
94 		vbuf->resp_buf = resp_buf;
95 	BUG_ON(!vbuf->resp_buf);
96 	return vbuf;
97 }
98 
99 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
100 				  struct virtio_gpu_vbuffer **vbuffer_p,
101 				  int size)
102 {
103 	struct virtio_gpu_vbuffer *vbuf;
104 
105 	vbuf = virtio_gpu_get_vbuf(vgdev, size,
106 				   sizeof(struct virtio_gpu_ctrl_hdr),
107 				   NULL, NULL);
108 	if (IS_ERR(vbuf)) {
109 		*vbuffer_p = NULL;
110 		return ERR_CAST(vbuf);
111 	}
112 	*vbuffer_p = vbuf;
113 	return vbuf->buf;
114 }
115 
116 static struct virtio_gpu_update_cursor*
117 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
118 			struct virtio_gpu_vbuffer **vbuffer_p)
119 {
120 	struct virtio_gpu_vbuffer *vbuf;
121 
122 	vbuf = virtio_gpu_get_vbuf
123 		(vgdev, sizeof(struct virtio_gpu_update_cursor),
124 		 0, NULL, NULL);
125 	if (IS_ERR(vbuf)) {
126 		*vbuffer_p = NULL;
127 		return ERR_CAST(vbuf);
128 	}
129 	*vbuffer_p = vbuf;
130 	return (struct virtio_gpu_update_cursor *)vbuf->buf;
131 }
132 
133 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
134 				       virtio_gpu_resp_cb cb,
135 				       struct virtio_gpu_vbuffer **vbuffer_p,
136 				       int cmd_size, int resp_size,
137 				       void *resp_buf)
138 {
139 	struct virtio_gpu_vbuffer *vbuf;
140 
141 	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
142 				   resp_size, resp_buf, cb);
143 	if (IS_ERR(vbuf)) {
144 		*vbuffer_p = NULL;
145 		return ERR_CAST(vbuf);
146 	}
147 	*vbuffer_p = vbuf;
148 	return (struct virtio_gpu_command *)vbuf->buf;
149 }
150 
151 static void free_vbuf(struct virtio_gpu_device *vgdev,
152 		      struct virtio_gpu_vbuffer *vbuf)
153 {
154 	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
155 		kfree(vbuf->resp_buf);
156 	kfree(vbuf->data_buf);
157 	kmem_cache_free(vgdev->vbufs, vbuf);
158 }
159 
160 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
161 {
162 	struct virtio_gpu_vbuffer *vbuf;
163 	unsigned int len;
164 	int freed = 0;
165 
166 	while ((vbuf = virtqueue_get_buf(vq, &len))) {
167 		list_add_tail(&vbuf->list, reclaim_list);
168 		freed++;
169 	}
170 	if (freed == 0)
171 		DRM_DEBUG("Huh? zero vbufs reclaimed");
172 }
173 
174 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
175 {
176 	struct virtio_gpu_device *vgdev =
177 		container_of(work, struct virtio_gpu_device,
178 			     ctrlq.dequeue_work);
179 	struct list_head reclaim_list;
180 	struct virtio_gpu_vbuffer *entry, *tmp;
181 	struct virtio_gpu_ctrl_hdr *resp;
182 	u64 fence_id = 0;
183 
184 	INIT_LIST_HEAD(&reclaim_list);
185 	spin_lock(&vgdev->ctrlq.qlock);
186 	do {
187 		virtqueue_disable_cb(vgdev->ctrlq.vq);
188 		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
189 
190 	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
191 	spin_unlock(&vgdev->ctrlq.qlock);
192 
193 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
194 		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
195 		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
196 			if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
197 				struct virtio_gpu_ctrl_hdr *cmd;
198 				cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
199 				DRM_ERROR("response 0x%x (command 0x%x)\n",
200 					  le32_to_cpu(resp->type),
201 					  le32_to_cpu(cmd->type));
202 			} else
203 				DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
204 		}
205 		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
206 			u64 f = le64_to_cpu(resp->fence_id);
207 
208 			if (fence_id > f) {
209 				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
210 					  __func__, fence_id, f);
211 			} else {
212 				fence_id = f;
213 			}
214 		}
215 		if (entry->resp_cb)
216 			entry->resp_cb(vgdev, entry);
217 
218 		list_del(&entry->list);
219 		free_vbuf(vgdev, entry);
220 	}
221 	wake_up(&vgdev->ctrlq.ack_queue);
222 
223 	if (fence_id)
224 		virtio_gpu_fence_event_process(vgdev, fence_id);
225 }
226 
227 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
228 {
229 	struct virtio_gpu_device *vgdev =
230 		container_of(work, struct virtio_gpu_device,
231 			     cursorq.dequeue_work);
232 	struct list_head reclaim_list;
233 	struct virtio_gpu_vbuffer *entry, *tmp;
234 
235 	INIT_LIST_HEAD(&reclaim_list);
236 	spin_lock(&vgdev->cursorq.qlock);
237 	do {
238 		virtqueue_disable_cb(vgdev->cursorq.vq);
239 		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
240 	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
241 	spin_unlock(&vgdev->cursorq.qlock);
242 
243 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
244 		list_del(&entry->list);
245 		free_vbuf(vgdev, entry);
246 	}
247 	wake_up(&vgdev->cursorq.ack_queue);
248 }
249 
250 static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
251 					       struct virtio_gpu_vbuffer *vbuf)
252 		__releases(&vgdev->ctrlq.qlock)
253 		__acquires(&vgdev->ctrlq.qlock)
254 {
255 	struct virtqueue *vq = vgdev->ctrlq.vq;
256 	struct scatterlist *sgs[3], vcmd, vout, vresp;
257 	int outcnt = 0, incnt = 0;
258 	int ret;
259 
260 	if (!vgdev->vqs_ready)
261 		return -ENODEV;
262 
263 	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
264 	sgs[outcnt + incnt] = &vcmd;
265 	outcnt++;
266 
267 	if (vbuf->data_size) {
268 		sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
269 		sgs[outcnt + incnt] = &vout;
270 		outcnt++;
271 	}
272 
273 	if (vbuf->resp_size) {
274 		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
275 		sgs[outcnt + incnt] = &vresp;
276 		incnt++;
277 	}
278 
279 retry:
280 	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
281 	if (ret == -ENOSPC) {
282 		spin_unlock(&vgdev->ctrlq.qlock);
283 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
284 		spin_lock(&vgdev->ctrlq.qlock);
285 		goto retry;
286 	} else {
287 		virtqueue_kick(vq);
288 	}
289 
290 	if (!ret)
291 		ret = vq->num_free;
292 	return ret;
293 }
294 
295 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
296 					struct virtio_gpu_vbuffer *vbuf)
297 {
298 	int rc;
299 
300 	spin_lock(&vgdev->ctrlq.qlock);
301 	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
302 	spin_unlock(&vgdev->ctrlq.qlock);
303 	return rc;
304 }
305 
306 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
307 					       struct virtio_gpu_vbuffer *vbuf,
308 					       struct virtio_gpu_ctrl_hdr *hdr,
309 					       struct virtio_gpu_fence *fence)
310 {
311 	struct virtqueue *vq = vgdev->ctrlq.vq;
312 	int rc;
313 
314 again:
315 	spin_lock(&vgdev->ctrlq.qlock);
316 
317 	/*
318 	 * Make sure we have enouth space in the virtqueue.  If not
319 	 * wait here until we have.
320 	 *
321 	 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
322 	 * to wait for free space, which can result in fence ids being
323 	 * submitted out-of-order.
324 	 */
325 	if (vq->num_free < 3) {
326 		spin_unlock(&vgdev->ctrlq.qlock);
327 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
328 		goto again;
329 	}
330 
331 	if (fence)
332 		virtio_gpu_fence_emit(vgdev, hdr, fence);
333 	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
334 	spin_unlock(&vgdev->ctrlq.qlock);
335 	return rc;
336 }
337 
338 static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
339 				   struct virtio_gpu_vbuffer *vbuf)
340 {
341 	struct virtqueue *vq = vgdev->cursorq.vq;
342 	struct scatterlist *sgs[1], ccmd;
343 	int ret;
344 	int outcnt;
345 
346 	if (!vgdev->vqs_ready)
347 		return -ENODEV;
348 
349 	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
350 	sgs[0] = &ccmd;
351 	outcnt = 1;
352 
353 	spin_lock(&vgdev->cursorq.qlock);
354 retry:
355 	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
356 	if (ret == -ENOSPC) {
357 		spin_unlock(&vgdev->cursorq.qlock);
358 		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
359 		spin_lock(&vgdev->cursorq.qlock);
360 		goto retry;
361 	} else {
362 		virtqueue_kick(vq);
363 	}
364 
365 	spin_unlock(&vgdev->cursorq.qlock);
366 
367 	if (!ret)
368 		ret = vq->num_free;
369 	return ret;
370 }
371 
372 /* just create gem objects for userspace and long lived objects,
373  * just use dma_alloced pages for the queue objects?
374  */
375 
376 /* create a basic resource */
377 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
378 				    struct virtio_gpu_object *bo,
379 				    struct virtio_gpu_object_params *params,
380 				    struct virtio_gpu_fence *fence)
381 {
382 	struct virtio_gpu_resource_create_2d *cmd_p;
383 	struct virtio_gpu_vbuffer *vbuf;
384 
385 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
386 	memset(cmd_p, 0, sizeof(*cmd_p));
387 
388 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
389 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
390 	cmd_p->format = cpu_to_le32(params->format);
391 	cmd_p->width = cpu_to_le32(params->width);
392 	cmd_p->height = cpu_to_le32(params->height);
393 
394 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
395 	bo->created = true;
396 }
397 
398 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
399 				   uint32_t resource_id)
400 {
401 	struct virtio_gpu_resource_unref *cmd_p;
402 	struct virtio_gpu_vbuffer *vbuf;
403 
404 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
405 	memset(cmd_p, 0, sizeof(*cmd_p));
406 
407 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
408 	cmd_p->resource_id = cpu_to_le32(resource_id);
409 
410 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
411 }
412 
413 static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
414 						  uint32_t resource_id,
415 						  struct virtio_gpu_fence *fence)
416 {
417 	struct virtio_gpu_resource_detach_backing *cmd_p;
418 	struct virtio_gpu_vbuffer *vbuf;
419 
420 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
421 	memset(cmd_p, 0, sizeof(*cmd_p));
422 
423 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
424 	cmd_p->resource_id = cpu_to_le32(resource_id);
425 
426 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
427 }
428 
429 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
430 				uint32_t scanout_id, uint32_t resource_id,
431 				uint32_t width, uint32_t height,
432 				uint32_t x, uint32_t y)
433 {
434 	struct virtio_gpu_set_scanout *cmd_p;
435 	struct virtio_gpu_vbuffer *vbuf;
436 
437 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
438 	memset(cmd_p, 0, sizeof(*cmd_p));
439 
440 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
441 	cmd_p->resource_id = cpu_to_le32(resource_id);
442 	cmd_p->scanout_id = cpu_to_le32(scanout_id);
443 	cmd_p->r.width = cpu_to_le32(width);
444 	cmd_p->r.height = cpu_to_le32(height);
445 	cmd_p->r.x = cpu_to_le32(x);
446 	cmd_p->r.y = cpu_to_le32(y);
447 
448 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
449 }
450 
451 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
452 				   uint32_t resource_id,
453 				   uint32_t x, uint32_t y,
454 				   uint32_t width, uint32_t height)
455 {
456 	struct virtio_gpu_resource_flush *cmd_p;
457 	struct virtio_gpu_vbuffer *vbuf;
458 
459 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
460 	memset(cmd_p, 0, sizeof(*cmd_p));
461 
462 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
463 	cmd_p->resource_id = cpu_to_le32(resource_id);
464 	cmd_p->r.width = cpu_to_le32(width);
465 	cmd_p->r.height = cpu_to_le32(height);
466 	cmd_p->r.x = cpu_to_le32(x);
467 	cmd_p->r.y = cpu_to_le32(y);
468 
469 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
470 }
471 
472 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
473 					struct virtio_gpu_object *bo,
474 					uint64_t offset,
475 					__le32 width, __le32 height,
476 					__le32 x, __le32 y,
477 					struct virtio_gpu_fence *fence)
478 {
479 	struct virtio_gpu_transfer_to_host_2d *cmd_p;
480 	struct virtio_gpu_vbuffer *vbuf;
481 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
482 
483 	if (use_dma_api)
484 		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
485 				       bo->pages->sgl, bo->pages->nents,
486 				       DMA_TO_DEVICE);
487 
488 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
489 	memset(cmd_p, 0, sizeof(*cmd_p));
490 
491 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
492 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
493 	cmd_p->offset = cpu_to_le64(offset);
494 	cmd_p->r.width = width;
495 	cmd_p->r.height = height;
496 	cmd_p->r.x = x;
497 	cmd_p->r.y = y;
498 
499 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
500 }
501 
502 static void
503 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
504 				       uint32_t resource_id,
505 				       struct virtio_gpu_mem_entry *ents,
506 				       uint32_t nents,
507 				       struct virtio_gpu_fence *fence)
508 {
509 	struct virtio_gpu_resource_attach_backing *cmd_p;
510 	struct virtio_gpu_vbuffer *vbuf;
511 
512 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
513 	memset(cmd_p, 0, sizeof(*cmd_p));
514 
515 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
516 	cmd_p->resource_id = cpu_to_le32(resource_id);
517 	cmd_p->nr_entries = cpu_to_le32(nents);
518 
519 	vbuf->data_buf = ents;
520 	vbuf->data_size = sizeof(*ents) * nents;
521 
522 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
523 }
524 
525 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
526 					       struct virtio_gpu_vbuffer *vbuf)
527 {
528 	struct virtio_gpu_resp_display_info *resp =
529 		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
530 	int i;
531 
532 	spin_lock(&vgdev->display_info_lock);
533 	for (i = 0; i < vgdev->num_scanouts; i++) {
534 		vgdev->outputs[i].info = resp->pmodes[i];
535 		if (resp->pmodes[i].enabled) {
536 			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
537 				  le32_to_cpu(resp->pmodes[i].r.width),
538 				  le32_to_cpu(resp->pmodes[i].r.height),
539 				  le32_to_cpu(resp->pmodes[i].r.x),
540 				  le32_to_cpu(resp->pmodes[i].r.y));
541 		} else {
542 			DRM_DEBUG("output %d: disabled", i);
543 		}
544 	}
545 
546 	vgdev->display_info_pending = false;
547 	spin_unlock(&vgdev->display_info_lock);
548 	wake_up(&vgdev->resp_wq);
549 
550 	if (!drm_helper_hpd_irq_event(vgdev->ddev))
551 		drm_kms_helper_hotplug_event(vgdev->ddev);
552 }
553 
554 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
555 					      struct virtio_gpu_vbuffer *vbuf)
556 {
557 	struct virtio_gpu_get_capset_info *cmd =
558 		(struct virtio_gpu_get_capset_info *)vbuf->buf;
559 	struct virtio_gpu_resp_capset_info *resp =
560 		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
561 	int i = le32_to_cpu(cmd->capset_index);
562 
563 	spin_lock(&vgdev->display_info_lock);
564 	vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
565 	vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
566 	vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
567 	spin_unlock(&vgdev->display_info_lock);
568 	wake_up(&vgdev->resp_wq);
569 }
570 
571 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
572 				     struct virtio_gpu_vbuffer *vbuf)
573 {
574 	struct virtio_gpu_get_capset *cmd =
575 		(struct virtio_gpu_get_capset *)vbuf->buf;
576 	struct virtio_gpu_resp_capset *resp =
577 		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
578 	struct virtio_gpu_drv_cap_cache *cache_ent;
579 
580 	spin_lock(&vgdev->display_info_lock);
581 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
582 		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
583 		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
584 			memcpy(cache_ent->caps_cache, resp->capset_data,
585 			       cache_ent->size);
586 			atomic_set(&cache_ent->is_valid, 1);
587 			break;
588 		}
589 	}
590 	spin_unlock(&vgdev->display_info_lock);
591 	wake_up(&vgdev->resp_wq);
592 }
593 
594 static int virtio_get_edid_block(void *data, u8 *buf,
595 				 unsigned int block, size_t len)
596 {
597 	struct virtio_gpu_resp_edid *resp = data;
598 	size_t start = block * EDID_LENGTH;
599 
600 	if (start + len > le32_to_cpu(resp->size))
601 		return -1;
602 	memcpy(buf, resp->edid + start, len);
603 	return 0;
604 }
605 
606 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
607 				       struct virtio_gpu_vbuffer *vbuf)
608 {
609 	struct virtio_gpu_cmd_get_edid *cmd =
610 		(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
611 	struct virtio_gpu_resp_edid *resp =
612 		(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
613 	uint32_t scanout = le32_to_cpu(cmd->scanout);
614 	struct virtio_gpu_output *output;
615 	struct edid *new_edid, *old_edid;
616 
617 	if (scanout >= vgdev->num_scanouts)
618 		return;
619 	output = vgdev->outputs + scanout;
620 
621 	new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
622 
623 	spin_lock(&vgdev->display_info_lock);
624 	old_edid = output->edid;
625 	output->edid = new_edid;
626 	drm_connector_update_edid_property(&output->conn, output->edid);
627 	spin_unlock(&vgdev->display_info_lock);
628 
629 	kfree(old_edid);
630 	wake_up(&vgdev->resp_wq);
631 }
632 
633 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
634 {
635 	struct virtio_gpu_ctrl_hdr *cmd_p;
636 	struct virtio_gpu_vbuffer *vbuf;
637 	void *resp_buf;
638 
639 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
640 			   GFP_KERNEL);
641 	if (!resp_buf)
642 		return -ENOMEM;
643 
644 	cmd_p = virtio_gpu_alloc_cmd_resp
645 		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
646 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
647 		 resp_buf);
648 	memset(cmd_p, 0, sizeof(*cmd_p));
649 
650 	vgdev->display_info_pending = true;
651 	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
652 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
653 	return 0;
654 }
655 
656 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
657 {
658 	struct virtio_gpu_get_capset_info *cmd_p;
659 	struct virtio_gpu_vbuffer *vbuf;
660 	void *resp_buf;
661 
662 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
663 			   GFP_KERNEL);
664 	if (!resp_buf)
665 		return -ENOMEM;
666 
667 	cmd_p = virtio_gpu_alloc_cmd_resp
668 		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
669 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
670 		 resp_buf);
671 	memset(cmd_p, 0, sizeof(*cmd_p));
672 
673 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
674 	cmd_p->capset_index = cpu_to_le32(idx);
675 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
676 	return 0;
677 }
678 
679 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
680 			      int idx, int version,
681 			      struct virtio_gpu_drv_cap_cache **cache_p)
682 {
683 	struct virtio_gpu_get_capset *cmd_p;
684 	struct virtio_gpu_vbuffer *vbuf;
685 	int max_size;
686 	struct virtio_gpu_drv_cap_cache *cache_ent;
687 	void *resp_buf;
688 
689 	if (idx >= vgdev->num_capsets)
690 		return -EINVAL;
691 
692 	if (version > vgdev->capsets[idx].max_version)
693 		return -EINVAL;
694 
695 	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
696 	if (!cache_ent)
697 		return -ENOMEM;
698 
699 	max_size = vgdev->capsets[idx].max_size;
700 	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
701 	if (!cache_ent->caps_cache) {
702 		kfree(cache_ent);
703 		return -ENOMEM;
704 	}
705 
706 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
707 			   GFP_KERNEL);
708 	if (!resp_buf) {
709 		kfree(cache_ent->caps_cache);
710 		kfree(cache_ent);
711 		return -ENOMEM;
712 	}
713 
714 	cache_ent->version = version;
715 	cache_ent->id = vgdev->capsets[idx].id;
716 	atomic_set(&cache_ent->is_valid, 0);
717 	cache_ent->size = max_size;
718 	spin_lock(&vgdev->display_info_lock);
719 	list_add_tail(&cache_ent->head, &vgdev->cap_cache);
720 	spin_unlock(&vgdev->display_info_lock);
721 
722 	cmd_p = virtio_gpu_alloc_cmd_resp
723 		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
724 		 sizeof(struct virtio_gpu_resp_capset) + max_size,
725 		 resp_buf);
726 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
727 	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
728 	cmd_p->capset_version = cpu_to_le32(version);
729 	*cache_p = cache_ent;
730 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
731 
732 	return 0;
733 }
734 
735 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
736 {
737 	struct virtio_gpu_cmd_get_edid *cmd_p;
738 	struct virtio_gpu_vbuffer *vbuf;
739 	void *resp_buf;
740 	int scanout;
741 
742 	if (WARN_ON(!vgdev->has_edid))
743 		return -EINVAL;
744 
745 	for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
746 		resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
747 				   GFP_KERNEL);
748 		if (!resp_buf)
749 			return -ENOMEM;
750 
751 		cmd_p = virtio_gpu_alloc_cmd_resp
752 			(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
753 			 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
754 			 resp_buf);
755 		cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
756 		cmd_p->scanout = cpu_to_le32(scanout);
757 		virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
758 	}
759 
760 	return 0;
761 }
762 
763 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
764 				   uint32_t nlen, const char *name)
765 {
766 	struct virtio_gpu_ctx_create *cmd_p;
767 	struct virtio_gpu_vbuffer *vbuf;
768 
769 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
770 	memset(cmd_p, 0, sizeof(*cmd_p));
771 
772 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
773 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
774 	cmd_p->nlen = cpu_to_le32(nlen);
775 	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
776 	cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
777 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
778 }
779 
780 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
781 				    uint32_t id)
782 {
783 	struct virtio_gpu_ctx_destroy *cmd_p;
784 	struct virtio_gpu_vbuffer *vbuf;
785 
786 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
787 	memset(cmd_p, 0, sizeof(*cmd_p));
788 
789 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
790 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
791 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
792 }
793 
794 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
795 					    uint32_t ctx_id,
796 					    uint32_t resource_id)
797 {
798 	struct virtio_gpu_ctx_resource *cmd_p;
799 	struct virtio_gpu_vbuffer *vbuf;
800 
801 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
802 	memset(cmd_p, 0, sizeof(*cmd_p));
803 
804 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
805 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
806 	cmd_p->resource_id = cpu_to_le32(resource_id);
807 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
808 
809 }
810 
811 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
812 					    uint32_t ctx_id,
813 					    uint32_t resource_id)
814 {
815 	struct virtio_gpu_ctx_resource *cmd_p;
816 	struct virtio_gpu_vbuffer *vbuf;
817 
818 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
819 	memset(cmd_p, 0, sizeof(*cmd_p));
820 
821 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
822 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
823 	cmd_p->resource_id = cpu_to_le32(resource_id);
824 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
825 }
826 
827 void
828 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
829 				  struct virtio_gpu_object *bo,
830 				  struct virtio_gpu_object_params *params,
831 				  struct virtio_gpu_fence *fence)
832 {
833 	struct virtio_gpu_resource_create_3d *cmd_p;
834 	struct virtio_gpu_vbuffer *vbuf;
835 
836 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
837 	memset(cmd_p, 0, sizeof(*cmd_p));
838 
839 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
840 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
841 	cmd_p->format = cpu_to_le32(params->format);
842 	cmd_p->width = cpu_to_le32(params->width);
843 	cmd_p->height = cpu_to_le32(params->height);
844 
845 	cmd_p->target = cpu_to_le32(params->target);
846 	cmd_p->bind = cpu_to_le32(params->bind);
847 	cmd_p->depth = cpu_to_le32(params->depth);
848 	cmd_p->array_size = cpu_to_le32(params->array_size);
849 	cmd_p->last_level = cpu_to_le32(params->last_level);
850 	cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
851 	cmd_p->flags = cpu_to_le32(params->flags);
852 
853 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
854 	bo->created = true;
855 }
856 
857 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
858 					struct virtio_gpu_object *bo,
859 					uint32_t ctx_id,
860 					uint64_t offset, uint32_t level,
861 					struct virtio_gpu_box *box,
862 					struct virtio_gpu_fence *fence)
863 {
864 	struct virtio_gpu_transfer_host_3d *cmd_p;
865 	struct virtio_gpu_vbuffer *vbuf;
866 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
867 
868 	if (use_dma_api)
869 		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
870 				       bo->pages->sgl, bo->pages->nents,
871 				       DMA_TO_DEVICE);
872 
873 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
874 	memset(cmd_p, 0, sizeof(*cmd_p));
875 
876 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
877 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
878 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
879 	cmd_p->box = *box;
880 	cmd_p->offset = cpu_to_le64(offset);
881 	cmd_p->level = cpu_to_le32(level);
882 
883 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
884 }
885 
886 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
887 					  uint32_t resource_id, uint32_t ctx_id,
888 					  uint64_t offset, uint32_t level,
889 					  struct virtio_gpu_box *box,
890 					  struct virtio_gpu_fence *fence)
891 {
892 	struct virtio_gpu_transfer_host_3d *cmd_p;
893 	struct virtio_gpu_vbuffer *vbuf;
894 
895 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
896 	memset(cmd_p, 0, sizeof(*cmd_p));
897 
898 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
899 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
900 	cmd_p->resource_id = cpu_to_le32(resource_id);
901 	cmd_p->box = *box;
902 	cmd_p->offset = cpu_to_le64(offset);
903 	cmd_p->level = cpu_to_le32(level);
904 
905 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
906 }
907 
908 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
909 			   void *data, uint32_t data_size,
910 			   uint32_t ctx_id, struct virtio_gpu_fence *fence)
911 {
912 	struct virtio_gpu_cmd_submit *cmd_p;
913 	struct virtio_gpu_vbuffer *vbuf;
914 
915 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
916 	memset(cmd_p, 0, sizeof(*cmd_p));
917 
918 	vbuf->data_buf = data;
919 	vbuf->data_size = data_size;
920 
921 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
922 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
923 	cmd_p->size = cpu_to_le32(data_size);
924 
925 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
926 }
927 
928 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
929 			     struct virtio_gpu_object *obj,
930 			     struct virtio_gpu_fence *fence)
931 {
932 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
933 	struct virtio_gpu_mem_entry *ents;
934 	struct scatterlist *sg;
935 	int si, nents;
936 
937 	if (WARN_ON_ONCE(!obj->created))
938 		return -EINVAL;
939 
940 	if (!obj->pages) {
941 		int ret;
942 
943 		ret = virtio_gpu_object_get_sg_table(vgdev, obj);
944 		if (ret)
945 			return ret;
946 	}
947 
948 	if (use_dma_api) {
949 		obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
950 					 obj->pages->sgl, obj->pages->nents,
951 					 DMA_TO_DEVICE);
952 		nents = obj->mapped;
953 	} else {
954 		nents = obj->pages->nents;
955 	}
956 
957 	/* gets freed when the ring has consumed it */
958 	ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
959 			     GFP_KERNEL);
960 	if (!ents) {
961 		DRM_ERROR("failed to allocate ent list\n");
962 		return -ENOMEM;
963 	}
964 
965 	for_each_sg(obj->pages->sgl, sg, nents, si) {
966 		ents[si].addr = cpu_to_le64(use_dma_api
967 					    ? sg_dma_address(sg)
968 					    : sg_phys(sg));
969 		ents[si].length = cpu_to_le32(sg->length);
970 		ents[si].padding = 0;
971 	}
972 
973 	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
974 					       ents, nents,
975 					       fence);
976 	return 0;
977 }
978 
979 void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
980 			      struct virtio_gpu_object *obj)
981 {
982 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
983 
984 	if (use_dma_api && obj->mapped) {
985 		struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
986 		/* detach backing and wait for the host process it ... */
987 		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
988 		dma_fence_wait(&fence->f, true);
989 		dma_fence_put(&fence->f);
990 
991 		/* ... then tear down iommu mappings */
992 		dma_unmap_sg(vgdev->vdev->dev.parent,
993 			     obj->pages->sgl, obj->mapped,
994 			     DMA_TO_DEVICE);
995 		obj->mapped = 0;
996 	} else {
997 		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
998 	}
999 }
1000 
1001 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1002 			    struct virtio_gpu_output *output)
1003 {
1004 	struct virtio_gpu_vbuffer *vbuf;
1005 	struct virtio_gpu_update_cursor *cur_p;
1006 
1007 	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1008 	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1009 	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1010 	virtio_gpu_queue_cursor(vgdev, vbuf);
1011 }
1012