1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie <airlied@redhat.com>
7  *    Gerd Hoffmann <kraxel@redhat.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26  * OTHER DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <drm/drmP.h>
30 #include "virtgpu_drv.h"
31 #include "virtgpu_trace.h"
32 #include <linux/virtio.h>
33 #include <linux/virtio_config.h>
34 #include <linux/virtio_ring.h>
35 
36 #define MAX_INLINE_CMD_SIZE   96
37 #define MAX_INLINE_RESP_SIZE  24
38 #define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
39 			       + MAX_INLINE_CMD_SIZE		 \
40 			       + MAX_INLINE_RESP_SIZE)
41 
42 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
43 {
44 	struct drm_device *dev = vq->vdev->priv;
45 	struct virtio_gpu_device *vgdev = dev->dev_private;
46 
47 	schedule_work(&vgdev->ctrlq.dequeue_work);
48 }
49 
50 void virtio_gpu_cursor_ack(struct virtqueue *vq)
51 {
52 	struct drm_device *dev = vq->vdev->priv;
53 	struct virtio_gpu_device *vgdev = dev->dev_private;
54 
55 	schedule_work(&vgdev->cursorq.dequeue_work);
56 }
57 
58 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
59 {
60 	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
61 					 VBUFFER_SIZE,
62 					 __alignof__(struct virtio_gpu_vbuffer),
63 					 0, NULL);
64 	if (!vgdev->vbufs)
65 		return -ENOMEM;
66 	return 0;
67 }
68 
69 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
70 {
71 	kmem_cache_destroy(vgdev->vbufs);
72 	vgdev->vbufs = NULL;
73 }
74 
75 static struct virtio_gpu_vbuffer*
76 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
77 		    int size, int resp_size, void *resp_buf,
78 		    virtio_gpu_resp_cb resp_cb)
79 {
80 	struct virtio_gpu_vbuffer *vbuf;
81 
82 	vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
83 	if (!vbuf)
84 		return ERR_PTR(-ENOMEM);
85 
86 	BUG_ON(size > MAX_INLINE_CMD_SIZE);
87 	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
88 	vbuf->size = size;
89 
90 	vbuf->resp_cb = resp_cb;
91 	vbuf->resp_size = resp_size;
92 	if (resp_size <= MAX_INLINE_RESP_SIZE)
93 		vbuf->resp_buf = (void *)vbuf->buf + size;
94 	else
95 		vbuf->resp_buf = resp_buf;
96 	BUG_ON(!vbuf->resp_buf);
97 	return vbuf;
98 }
99 
100 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
101 				  struct virtio_gpu_vbuffer **vbuffer_p,
102 				  int size)
103 {
104 	struct virtio_gpu_vbuffer *vbuf;
105 
106 	vbuf = virtio_gpu_get_vbuf(vgdev, size,
107 				   sizeof(struct virtio_gpu_ctrl_hdr),
108 				   NULL, NULL);
109 	if (IS_ERR(vbuf)) {
110 		*vbuffer_p = NULL;
111 		return ERR_CAST(vbuf);
112 	}
113 	*vbuffer_p = vbuf;
114 	return vbuf->buf;
115 }
116 
117 static struct virtio_gpu_update_cursor*
118 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
119 			struct virtio_gpu_vbuffer **vbuffer_p)
120 {
121 	struct virtio_gpu_vbuffer *vbuf;
122 
123 	vbuf = virtio_gpu_get_vbuf
124 		(vgdev, sizeof(struct virtio_gpu_update_cursor),
125 		 0, NULL, NULL);
126 	if (IS_ERR(vbuf)) {
127 		*vbuffer_p = NULL;
128 		return ERR_CAST(vbuf);
129 	}
130 	*vbuffer_p = vbuf;
131 	return (struct virtio_gpu_update_cursor *)vbuf->buf;
132 }
133 
134 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
135 				       virtio_gpu_resp_cb cb,
136 				       struct virtio_gpu_vbuffer **vbuffer_p,
137 				       int cmd_size, int resp_size,
138 				       void *resp_buf)
139 {
140 	struct virtio_gpu_vbuffer *vbuf;
141 
142 	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
143 				   resp_size, resp_buf, cb);
144 	if (IS_ERR(vbuf)) {
145 		*vbuffer_p = NULL;
146 		return ERR_CAST(vbuf);
147 	}
148 	*vbuffer_p = vbuf;
149 	return (struct virtio_gpu_command *)vbuf->buf;
150 }
151 
152 static void free_vbuf(struct virtio_gpu_device *vgdev,
153 		      struct virtio_gpu_vbuffer *vbuf)
154 {
155 	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
156 		kfree(vbuf->resp_buf);
157 	kfree(vbuf->data_buf);
158 	kmem_cache_free(vgdev->vbufs, vbuf);
159 }
160 
161 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
162 {
163 	struct virtio_gpu_vbuffer *vbuf;
164 	unsigned int len;
165 	int freed = 0;
166 
167 	while ((vbuf = virtqueue_get_buf(vq, &len))) {
168 		list_add_tail(&vbuf->list, reclaim_list);
169 		freed++;
170 	}
171 	if (freed == 0)
172 		DRM_DEBUG("Huh? zero vbufs reclaimed");
173 }
174 
175 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
176 {
177 	struct virtio_gpu_device *vgdev =
178 		container_of(work, struct virtio_gpu_device,
179 			     ctrlq.dequeue_work);
180 	struct list_head reclaim_list;
181 	struct virtio_gpu_vbuffer *entry, *tmp;
182 	struct virtio_gpu_ctrl_hdr *resp;
183 	u64 fence_id = 0;
184 
185 	INIT_LIST_HEAD(&reclaim_list);
186 	spin_lock(&vgdev->ctrlq.qlock);
187 	do {
188 		virtqueue_disable_cb(vgdev->ctrlq.vq);
189 		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
190 
191 	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
192 	spin_unlock(&vgdev->ctrlq.qlock);
193 
194 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
195 		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
196 
197 		trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
198 
199 		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
200 			if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
201 				struct virtio_gpu_ctrl_hdr *cmd;
202 				cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
203 				DRM_ERROR("response 0x%x (command 0x%x)\n",
204 					  le32_to_cpu(resp->type),
205 					  le32_to_cpu(cmd->type));
206 			} else
207 				DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
208 		}
209 		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
210 			u64 f = le64_to_cpu(resp->fence_id);
211 
212 			if (fence_id > f) {
213 				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
214 					  __func__, fence_id, f);
215 			} else {
216 				fence_id = f;
217 			}
218 		}
219 		if (entry->resp_cb)
220 			entry->resp_cb(vgdev, entry);
221 
222 		list_del(&entry->list);
223 		free_vbuf(vgdev, entry);
224 	}
225 	wake_up(&vgdev->ctrlq.ack_queue);
226 
227 	if (fence_id)
228 		virtio_gpu_fence_event_process(vgdev, fence_id);
229 }
230 
231 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
232 {
233 	struct virtio_gpu_device *vgdev =
234 		container_of(work, struct virtio_gpu_device,
235 			     cursorq.dequeue_work);
236 	struct list_head reclaim_list;
237 	struct virtio_gpu_vbuffer *entry, *tmp;
238 
239 	INIT_LIST_HEAD(&reclaim_list);
240 	spin_lock(&vgdev->cursorq.qlock);
241 	do {
242 		virtqueue_disable_cb(vgdev->cursorq.vq);
243 		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
244 	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
245 	spin_unlock(&vgdev->cursorq.qlock);
246 
247 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
248 		list_del(&entry->list);
249 		free_vbuf(vgdev, entry);
250 	}
251 	wake_up(&vgdev->cursorq.ack_queue);
252 }
253 
254 static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
255 					       struct virtio_gpu_vbuffer *vbuf)
256 		__releases(&vgdev->ctrlq.qlock)
257 		__acquires(&vgdev->ctrlq.qlock)
258 {
259 	struct virtqueue *vq = vgdev->ctrlq.vq;
260 	struct scatterlist *sgs[3], vcmd, vout, vresp;
261 	int outcnt = 0, incnt = 0;
262 	int ret;
263 
264 	if (!vgdev->vqs_ready)
265 		return -ENODEV;
266 
267 	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
268 	sgs[outcnt + incnt] = &vcmd;
269 	outcnt++;
270 
271 	if (vbuf->data_size) {
272 		sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
273 		sgs[outcnt + incnt] = &vout;
274 		outcnt++;
275 	}
276 
277 	if (vbuf->resp_size) {
278 		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
279 		sgs[outcnt + incnt] = &vresp;
280 		incnt++;
281 	}
282 
283 retry:
284 	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
285 	if (ret == -ENOSPC) {
286 		spin_unlock(&vgdev->ctrlq.qlock);
287 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
288 		spin_lock(&vgdev->ctrlq.qlock);
289 		goto retry;
290 	} else {
291 		trace_virtio_gpu_cmd_queue(vq,
292 			(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
293 
294 		virtqueue_kick(vq);
295 	}
296 
297 	if (!ret)
298 		ret = vq->num_free;
299 	return ret;
300 }
301 
302 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
303 					struct virtio_gpu_vbuffer *vbuf)
304 {
305 	int rc;
306 
307 	spin_lock(&vgdev->ctrlq.qlock);
308 	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
309 	spin_unlock(&vgdev->ctrlq.qlock);
310 	return rc;
311 }
312 
313 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
314 					       struct virtio_gpu_vbuffer *vbuf,
315 					       struct virtio_gpu_ctrl_hdr *hdr,
316 					       struct virtio_gpu_fence *fence)
317 {
318 	struct virtqueue *vq = vgdev->ctrlq.vq;
319 	int rc;
320 
321 again:
322 	spin_lock(&vgdev->ctrlq.qlock);
323 
324 	/*
325 	 * Make sure we have enouth space in the virtqueue.  If not
326 	 * wait here until we have.
327 	 *
328 	 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
329 	 * to wait for free space, which can result in fence ids being
330 	 * submitted out-of-order.
331 	 */
332 	if (vq->num_free < 3) {
333 		spin_unlock(&vgdev->ctrlq.qlock);
334 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
335 		goto again;
336 	}
337 
338 	if (fence)
339 		virtio_gpu_fence_emit(vgdev, hdr, fence);
340 	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
341 	spin_unlock(&vgdev->ctrlq.qlock);
342 	return rc;
343 }
344 
345 static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
346 				   struct virtio_gpu_vbuffer *vbuf)
347 {
348 	struct virtqueue *vq = vgdev->cursorq.vq;
349 	struct scatterlist *sgs[1], ccmd;
350 	int ret;
351 	int outcnt;
352 
353 	if (!vgdev->vqs_ready)
354 		return -ENODEV;
355 
356 	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
357 	sgs[0] = &ccmd;
358 	outcnt = 1;
359 
360 	spin_lock(&vgdev->cursorq.qlock);
361 retry:
362 	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
363 	if (ret == -ENOSPC) {
364 		spin_unlock(&vgdev->cursorq.qlock);
365 		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
366 		spin_lock(&vgdev->cursorq.qlock);
367 		goto retry;
368 	} else {
369 		trace_virtio_gpu_cmd_queue(vq,
370 			(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
371 
372 		virtqueue_kick(vq);
373 	}
374 
375 	spin_unlock(&vgdev->cursorq.qlock);
376 
377 	if (!ret)
378 		ret = vq->num_free;
379 	return ret;
380 }
381 
382 /* just create gem objects for userspace and long lived objects,
383  * just use dma_alloced pages for the queue objects?
384  */
385 
386 /* create a basic resource */
387 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
388 				    struct virtio_gpu_object *bo,
389 				    struct virtio_gpu_object_params *params,
390 				    struct virtio_gpu_fence *fence)
391 {
392 	struct virtio_gpu_resource_create_2d *cmd_p;
393 	struct virtio_gpu_vbuffer *vbuf;
394 
395 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
396 	memset(cmd_p, 0, sizeof(*cmd_p));
397 
398 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
399 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
400 	cmd_p->format = cpu_to_le32(params->format);
401 	cmd_p->width = cpu_to_le32(params->width);
402 	cmd_p->height = cpu_to_le32(params->height);
403 
404 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
405 	bo->created = true;
406 }
407 
408 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
409 				   uint32_t resource_id)
410 {
411 	struct virtio_gpu_resource_unref *cmd_p;
412 	struct virtio_gpu_vbuffer *vbuf;
413 
414 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
415 	memset(cmd_p, 0, sizeof(*cmd_p));
416 
417 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
418 	cmd_p->resource_id = cpu_to_le32(resource_id);
419 
420 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
421 }
422 
423 static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
424 						  uint32_t resource_id,
425 						  struct virtio_gpu_fence *fence)
426 {
427 	struct virtio_gpu_resource_detach_backing *cmd_p;
428 	struct virtio_gpu_vbuffer *vbuf;
429 
430 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
431 	memset(cmd_p, 0, sizeof(*cmd_p));
432 
433 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
434 	cmd_p->resource_id = cpu_to_le32(resource_id);
435 
436 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
437 }
438 
439 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
440 				uint32_t scanout_id, uint32_t resource_id,
441 				uint32_t width, uint32_t height,
442 				uint32_t x, uint32_t y)
443 {
444 	struct virtio_gpu_set_scanout *cmd_p;
445 	struct virtio_gpu_vbuffer *vbuf;
446 
447 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
448 	memset(cmd_p, 0, sizeof(*cmd_p));
449 
450 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
451 	cmd_p->resource_id = cpu_to_le32(resource_id);
452 	cmd_p->scanout_id = cpu_to_le32(scanout_id);
453 	cmd_p->r.width = cpu_to_le32(width);
454 	cmd_p->r.height = cpu_to_le32(height);
455 	cmd_p->r.x = cpu_to_le32(x);
456 	cmd_p->r.y = cpu_to_le32(y);
457 
458 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
459 }
460 
461 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
462 				   uint32_t resource_id,
463 				   uint32_t x, uint32_t y,
464 				   uint32_t width, uint32_t height)
465 {
466 	struct virtio_gpu_resource_flush *cmd_p;
467 	struct virtio_gpu_vbuffer *vbuf;
468 
469 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
470 	memset(cmd_p, 0, sizeof(*cmd_p));
471 
472 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
473 	cmd_p->resource_id = cpu_to_le32(resource_id);
474 	cmd_p->r.width = cpu_to_le32(width);
475 	cmd_p->r.height = cpu_to_le32(height);
476 	cmd_p->r.x = cpu_to_le32(x);
477 	cmd_p->r.y = cpu_to_le32(y);
478 
479 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
480 }
481 
482 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
483 					struct virtio_gpu_object *bo,
484 					uint64_t offset,
485 					__le32 width, __le32 height,
486 					__le32 x, __le32 y,
487 					struct virtio_gpu_fence *fence)
488 {
489 	struct virtio_gpu_transfer_to_host_2d *cmd_p;
490 	struct virtio_gpu_vbuffer *vbuf;
491 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
492 
493 	if (use_dma_api)
494 		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
495 				       bo->pages->sgl, bo->pages->nents,
496 				       DMA_TO_DEVICE);
497 
498 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
499 	memset(cmd_p, 0, sizeof(*cmd_p));
500 
501 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
502 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
503 	cmd_p->offset = cpu_to_le64(offset);
504 	cmd_p->r.width = width;
505 	cmd_p->r.height = height;
506 	cmd_p->r.x = x;
507 	cmd_p->r.y = y;
508 
509 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
510 }
511 
512 static void
513 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
514 				       uint32_t resource_id,
515 				       struct virtio_gpu_mem_entry *ents,
516 				       uint32_t nents,
517 				       struct virtio_gpu_fence *fence)
518 {
519 	struct virtio_gpu_resource_attach_backing *cmd_p;
520 	struct virtio_gpu_vbuffer *vbuf;
521 
522 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
523 	memset(cmd_p, 0, sizeof(*cmd_p));
524 
525 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
526 	cmd_p->resource_id = cpu_to_le32(resource_id);
527 	cmd_p->nr_entries = cpu_to_le32(nents);
528 
529 	vbuf->data_buf = ents;
530 	vbuf->data_size = sizeof(*ents) * nents;
531 
532 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
533 }
534 
535 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
536 					       struct virtio_gpu_vbuffer *vbuf)
537 {
538 	struct virtio_gpu_resp_display_info *resp =
539 		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
540 	int i;
541 
542 	spin_lock(&vgdev->display_info_lock);
543 	for (i = 0; i < vgdev->num_scanouts; i++) {
544 		vgdev->outputs[i].info = resp->pmodes[i];
545 		if (resp->pmodes[i].enabled) {
546 			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
547 				  le32_to_cpu(resp->pmodes[i].r.width),
548 				  le32_to_cpu(resp->pmodes[i].r.height),
549 				  le32_to_cpu(resp->pmodes[i].r.x),
550 				  le32_to_cpu(resp->pmodes[i].r.y));
551 		} else {
552 			DRM_DEBUG("output %d: disabled", i);
553 		}
554 	}
555 
556 	vgdev->display_info_pending = false;
557 	spin_unlock(&vgdev->display_info_lock);
558 	wake_up(&vgdev->resp_wq);
559 
560 	if (!drm_helper_hpd_irq_event(vgdev->ddev))
561 		drm_kms_helper_hotplug_event(vgdev->ddev);
562 }
563 
564 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
565 					      struct virtio_gpu_vbuffer *vbuf)
566 {
567 	struct virtio_gpu_get_capset_info *cmd =
568 		(struct virtio_gpu_get_capset_info *)vbuf->buf;
569 	struct virtio_gpu_resp_capset_info *resp =
570 		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
571 	int i = le32_to_cpu(cmd->capset_index);
572 
573 	spin_lock(&vgdev->display_info_lock);
574 	vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
575 	vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
576 	vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
577 	spin_unlock(&vgdev->display_info_lock);
578 	wake_up(&vgdev->resp_wq);
579 }
580 
581 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
582 				     struct virtio_gpu_vbuffer *vbuf)
583 {
584 	struct virtio_gpu_get_capset *cmd =
585 		(struct virtio_gpu_get_capset *)vbuf->buf;
586 	struct virtio_gpu_resp_capset *resp =
587 		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
588 	struct virtio_gpu_drv_cap_cache *cache_ent;
589 
590 	spin_lock(&vgdev->display_info_lock);
591 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
592 		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
593 		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
594 			memcpy(cache_ent->caps_cache, resp->capset_data,
595 			       cache_ent->size);
596 			atomic_set(&cache_ent->is_valid, 1);
597 			break;
598 		}
599 	}
600 	spin_unlock(&vgdev->display_info_lock);
601 	wake_up_all(&vgdev->resp_wq);
602 }
603 
604 static int virtio_get_edid_block(void *data, u8 *buf,
605 				 unsigned int block, size_t len)
606 {
607 	struct virtio_gpu_resp_edid *resp = data;
608 	size_t start = block * EDID_LENGTH;
609 
610 	if (start + len > le32_to_cpu(resp->size))
611 		return -1;
612 	memcpy(buf, resp->edid + start, len);
613 	return 0;
614 }
615 
616 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
617 				       struct virtio_gpu_vbuffer *vbuf)
618 {
619 	struct virtio_gpu_cmd_get_edid *cmd =
620 		(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
621 	struct virtio_gpu_resp_edid *resp =
622 		(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
623 	uint32_t scanout = le32_to_cpu(cmd->scanout);
624 	struct virtio_gpu_output *output;
625 	struct edid *new_edid, *old_edid;
626 
627 	if (scanout >= vgdev->num_scanouts)
628 		return;
629 	output = vgdev->outputs + scanout;
630 
631 	new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
632 
633 	spin_lock(&vgdev->display_info_lock);
634 	old_edid = output->edid;
635 	output->edid = new_edid;
636 	drm_connector_update_edid_property(&output->conn, output->edid);
637 	spin_unlock(&vgdev->display_info_lock);
638 
639 	kfree(old_edid);
640 	wake_up(&vgdev->resp_wq);
641 }
642 
643 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
644 {
645 	struct virtio_gpu_ctrl_hdr *cmd_p;
646 	struct virtio_gpu_vbuffer *vbuf;
647 	void *resp_buf;
648 
649 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
650 			   GFP_KERNEL);
651 	if (!resp_buf)
652 		return -ENOMEM;
653 
654 	cmd_p = virtio_gpu_alloc_cmd_resp
655 		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
656 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
657 		 resp_buf);
658 	memset(cmd_p, 0, sizeof(*cmd_p));
659 
660 	vgdev->display_info_pending = true;
661 	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
662 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
663 	return 0;
664 }
665 
666 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
667 {
668 	struct virtio_gpu_get_capset_info *cmd_p;
669 	struct virtio_gpu_vbuffer *vbuf;
670 	void *resp_buf;
671 
672 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
673 			   GFP_KERNEL);
674 	if (!resp_buf)
675 		return -ENOMEM;
676 
677 	cmd_p = virtio_gpu_alloc_cmd_resp
678 		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
679 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
680 		 resp_buf);
681 	memset(cmd_p, 0, sizeof(*cmd_p));
682 
683 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
684 	cmd_p->capset_index = cpu_to_le32(idx);
685 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
686 	return 0;
687 }
688 
689 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
690 			      int idx, int version,
691 			      struct virtio_gpu_drv_cap_cache **cache_p)
692 {
693 	struct virtio_gpu_get_capset *cmd_p;
694 	struct virtio_gpu_vbuffer *vbuf;
695 	int max_size;
696 	struct virtio_gpu_drv_cap_cache *cache_ent;
697 	struct virtio_gpu_drv_cap_cache *search_ent;
698 	void *resp_buf;
699 
700 	*cache_p = NULL;
701 
702 	if (idx >= vgdev->num_capsets)
703 		return -EINVAL;
704 
705 	if (version > vgdev->capsets[idx].max_version)
706 		return -EINVAL;
707 
708 	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
709 	if (!cache_ent)
710 		return -ENOMEM;
711 
712 	max_size = vgdev->capsets[idx].max_size;
713 	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
714 	if (!cache_ent->caps_cache) {
715 		kfree(cache_ent);
716 		return -ENOMEM;
717 	}
718 
719 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
720 			   GFP_KERNEL);
721 	if (!resp_buf) {
722 		kfree(cache_ent->caps_cache);
723 		kfree(cache_ent);
724 		return -ENOMEM;
725 	}
726 
727 	cache_ent->version = version;
728 	cache_ent->id = vgdev->capsets[idx].id;
729 	atomic_set(&cache_ent->is_valid, 0);
730 	cache_ent->size = max_size;
731 	spin_lock(&vgdev->display_info_lock);
732 	/* Search while under lock in case it was added by another task. */
733 	list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
734 		if (search_ent->id == vgdev->capsets[idx].id &&
735 		    search_ent->version == version) {
736 			*cache_p = search_ent;
737 			break;
738 		}
739 	}
740 	if (!*cache_p)
741 		list_add_tail(&cache_ent->head, &vgdev->cap_cache);
742 	spin_unlock(&vgdev->display_info_lock);
743 
744 	if (*cache_p) {
745 		/* Entry was found, so free everything that was just created. */
746 		kfree(resp_buf);
747 		kfree(cache_ent->caps_cache);
748 		kfree(cache_ent);
749 		return 0;
750 	}
751 
752 	cmd_p = virtio_gpu_alloc_cmd_resp
753 		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
754 		 sizeof(struct virtio_gpu_resp_capset) + max_size,
755 		 resp_buf);
756 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
757 	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
758 	cmd_p->capset_version = cpu_to_le32(version);
759 	*cache_p = cache_ent;
760 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
761 
762 	return 0;
763 }
764 
765 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
766 {
767 	struct virtio_gpu_cmd_get_edid *cmd_p;
768 	struct virtio_gpu_vbuffer *vbuf;
769 	void *resp_buf;
770 	int scanout;
771 
772 	if (WARN_ON(!vgdev->has_edid))
773 		return -EINVAL;
774 
775 	for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
776 		resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
777 				   GFP_KERNEL);
778 		if (!resp_buf)
779 			return -ENOMEM;
780 
781 		cmd_p = virtio_gpu_alloc_cmd_resp
782 			(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
783 			 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
784 			 resp_buf);
785 		cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
786 		cmd_p->scanout = cpu_to_le32(scanout);
787 		virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
788 	}
789 
790 	return 0;
791 }
792 
793 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
794 				   uint32_t nlen, const char *name)
795 {
796 	struct virtio_gpu_ctx_create *cmd_p;
797 	struct virtio_gpu_vbuffer *vbuf;
798 
799 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
800 	memset(cmd_p, 0, sizeof(*cmd_p));
801 
802 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
803 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
804 	cmd_p->nlen = cpu_to_le32(nlen);
805 	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
806 	cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
807 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
808 }
809 
810 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
811 				    uint32_t id)
812 {
813 	struct virtio_gpu_ctx_destroy *cmd_p;
814 	struct virtio_gpu_vbuffer *vbuf;
815 
816 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
817 	memset(cmd_p, 0, sizeof(*cmd_p));
818 
819 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
820 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
821 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
822 }
823 
824 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
825 					    uint32_t ctx_id,
826 					    uint32_t resource_id)
827 {
828 	struct virtio_gpu_ctx_resource *cmd_p;
829 	struct virtio_gpu_vbuffer *vbuf;
830 
831 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
832 	memset(cmd_p, 0, sizeof(*cmd_p));
833 
834 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
835 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
836 	cmd_p->resource_id = cpu_to_le32(resource_id);
837 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
838 
839 }
840 
841 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
842 					    uint32_t ctx_id,
843 					    uint32_t resource_id)
844 {
845 	struct virtio_gpu_ctx_resource *cmd_p;
846 	struct virtio_gpu_vbuffer *vbuf;
847 
848 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
849 	memset(cmd_p, 0, sizeof(*cmd_p));
850 
851 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
852 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
853 	cmd_p->resource_id = cpu_to_le32(resource_id);
854 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
855 }
856 
857 void
858 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
859 				  struct virtio_gpu_object *bo,
860 				  struct virtio_gpu_object_params *params,
861 				  struct virtio_gpu_fence *fence)
862 {
863 	struct virtio_gpu_resource_create_3d *cmd_p;
864 	struct virtio_gpu_vbuffer *vbuf;
865 
866 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
867 	memset(cmd_p, 0, sizeof(*cmd_p));
868 
869 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
870 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
871 	cmd_p->format = cpu_to_le32(params->format);
872 	cmd_p->width = cpu_to_le32(params->width);
873 	cmd_p->height = cpu_to_le32(params->height);
874 
875 	cmd_p->target = cpu_to_le32(params->target);
876 	cmd_p->bind = cpu_to_le32(params->bind);
877 	cmd_p->depth = cpu_to_le32(params->depth);
878 	cmd_p->array_size = cpu_to_le32(params->array_size);
879 	cmd_p->last_level = cpu_to_le32(params->last_level);
880 	cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
881 	cmd_p->flags = cpu_to_le32(params->flags);
882 
883 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
884 	bo->created = true;
885 }
886 
887 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
888 					struct virtio_gpu_object *bo,
889 					uint32_t ctx_id,
890 					uint64_t offset, uint32_t level,
891 					struct virtio_gpu_box *box,
892 					struct virtio_gpu_fence *fence)
893 {
894 	struct virtio_gpu_transfer_host_3d *cmd_p;
895 	struct virtio_gpu_vbuffer *vbuf;
896 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
897 
898 	if (use_dma_api)
899 		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
900 				       bo->pages->sgl, bo->pages->nents,
901 				       DMA_TO_DEVICE);
902 
903 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
904 	memset(cmd_p, 0, sizeof(*cmd_p));
905 
906 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
907 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
908 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
909 	cmd_p->box = *box;
910 	cmd_p->offset = cpu_to_le64(offset);
911 	cmd_p->level = cpu_to_le32(level);
912 
913 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
914 }
915 
916 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
917 					  uint32_t resource_id, uint32_t ctx_id,
918 					  uint64_t offset, uint32_t level,
919 					  struct virtio_gpu_box *box,
920 					  struct virtio_gpu_fence *fence)
921 {
922 	struct virtio_gpu_transfer_host_3d *cmd_p;
923 	struct virtio_gpu_vbuffer *vbuf;
924 
925 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
926 	memset(cmd_p, 0, sizeof(*cmd_p));
927 
928 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
929 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
930 	cmd_p->resource_id = cpu_to_le32(resource_id);
931 	cmd_p->box = *box;
932 	cmd_p->offset = cpu_to_le64(offset);
933 	cmd_p->level = cpu_to_le32(level);
934 
935 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
936 }
937 
938 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
939 			   void *data, uint32_t data_size,
940 			   uint32_t ctx_id, struct virtio_gpu_fence *fence)
941 {
942 	struct virtio_gpu_cmd_submit *cmd_p;
943 	struct virtio_gpu_vbuffer *vbuf;
944 
945 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
946 	memset(cmd_p, 0, sizeof(*cmd_p));
947 
948 	vbuf->data_buf = data;
949 	vbuf->data_size = data_size;
950 
951 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
952 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
953 	cmd_p->size = cpu_to_le32(data_size);
954 
955 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
956 }
957 
958 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
959 			     struct virtio_gpu_object *obj,
960 			     struct virtio_gpu_fence *fence)
961 {
962 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
963 	struct virtio_gpu_mem_entry *ents;
964 	struct scatterlist *sg;
965 	int si, nents;
966 
967 	if (WARN_ON_ONCE(!obj->created))
968 		return -EINVAL;
969 
970 	if (!obj->pages) {
971 		int ret;
972 
973 		ret = virtio_gpu_object_get_sg_table(vgdev, obj);
974 		if (ret)
975 			return ret;
976 	}
977 
978 	if (use_dma_api) {
979 		obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
980 					 obj->pages->sgl, obj->pages->nents,
981 					 DMA_TO_DEVICE);
982 		nents = obj->mapped;
983 	} else {
984 		nents = obj->pages->nents;
985 	}
986 
987 	/* gets freed when the ring has consumed it */
988 	ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
989 			     GFP_KERNEL);
990 	if (!ents) {
991 		DRM_ERROR("failed to allocate ent list\n");
992 		return -ENOMEM;
993 	}
994 
995 	for_each_sg(obj->pages->sgl, sg, nents, si) {
996 		ents[si].addr = cpu_to_le64(use_dma_api
997 					    ? sg_dma_address(sg)
998 					    : sg_phys(sg));
999 		ents[si].length = cpu_to_le32(sg->length);
1000 		ents[si].padding = 0;
1001 	}
1002 
1003 	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1004 					       ents, nents,
1005 					       fence);
1006 	return 0;
1007 }
1008 
1009 void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
1010 			      struct virtio_gpu_object *obj)
1011 {
1012 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1013 
1014 	if (use_dma_api && obj->mapped) {
1015 		struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
1016 		/* detach backing and wait for the host process it ... */
1017 		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
1018 		dma_fence_wait(&fence->f, true);
1019 		dma_fence_put(&fence->f);
1020 
1021 		/* ... then tear down iommu mappings */
1022 		dma_unmap_sg(vgdev->vdev->dev.parent,
1023 			     obj->pages->sgl, obj->mapped,
1024 			     DMA_TO_DEVICE);
1025 		obj->mapped = 0;
1026 	} else {
1027 		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
1028 	}
1029 }
1030 
1031 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1032 			    struct virtio_gpu_output *output)
1033 {
1034 	struct virtio_gpu_vbuffer *vbuf;
1035 	struct virtio_gpu_update_cursor *cur_p;
1036 
1037 	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1038 	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1039 	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1040 	virtio_gpu_queue_cursor(vgdev, vbuf);
1041 }
1042