xref: /openbmc/linux/drivers/gpu/drm/virtio/virtgpu_vq.c (revision 05cf4fe738242183f1237f1b3a28b4479348c0a1)
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie <airlied@redhat.com>
7  *    Gerd Hoffmann <kraxel@redhat.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26  * OTHER DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <drm/drmP.h>
30 #include "virtgpu_drv.h"
31 #include <linux/virtio.h>
32 #include <linux/virtio_config.h>
33 #include <linux/virtio_ring.h>
34 
35 #define MAX_INLINE_CMD_SIZE   96
36 #define MAX_INLINE_RESP_SIZE  24
37 #define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
38 			       + MAX_INLINE_CMD_SIZE		 \
39 			       + MAX_INLINE_RESP_SIZE)
40 
41 void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
42 				uint32_t *resid)
43 {
44 	int handle;
45 
46 	idr_preload(GFP_KERNEL);
47 	spin_lock(&vgdev->resource_idr_lock);
48 	handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
49 	spin_unlock(&vgdev->resource_idr_lock);
50 	idr_preload_end();
51 	*resid = handle;
52 }
53 
54 void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
55 {
56 	spin_lock(&vgdev->resource_idr_lock);
57 	idr_remove(&vgdev->resource_idr, id);
58 	spin_unlock(&vgdev->resource_idr_lock);
59 }
60 
61 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
62 {
63 	struct drm_device *dev = vq->vdev->priv;
64 	struct virtio_gpu_device *vgdev = dev->dev_private;
65 
66 	schedule_work(&vgdev->ctrlq.dequeue_work);
67 }
68 
69 void virtio_gpu_cursor_ack(struct virtqueue *vq)
70 {
71 	struct drm_device *dev = vq->vdev->priv;
72 	struct virtio_gpu_device *vgdev = dev->dev_private;
73 
74 	schedule_work(&vgdev->cursorq.dequeue_work);
75 }
76 
77 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
78 {
79 	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
80 					 VBUFFER_SIZE,
81 					 __alignof__(struct virtio_gpu_vbuffer),
82 					 0, NULL);
83 	if (!vgdev->vbufs)
84 		return -ENOMEM;
85 	return 0;
86 }
87 
88 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
89 {
90 	kmem_cache_destroy(vgdev->vbufs);
91 	vgdev->vbufs = NULL;
92 }
93 
94 static struct virtio_gpu_vbuffer*
95 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
96 		    int size, int resp_size, void *resp_buf,
97 		    virtio_gpu_resp_cb resp_cb)
98 {
99 	struct virtio_gpu_vbuffer *vbuf;
100 
101 	vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL);
102 	if (!vbuf)
103 		return ERR_PTR(-ENOMEM);
104 	memset(vbuf, 0, VBUFFER_SIZE);
105 
106 	BUG_ON(size > MAX_INLINE_CMD_SIZE);
107 	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
108 	vbuf->size = size;
109 
110 	vbuf->resp_cb = resp_cb;
111 	vbuf->resp_size = resp_size;
112 	if (resp_size <= MAX_INLINE_RESP_SIZE)
113 		vbuf->resp_buf = (void *)vbuf->buf + size;
114 	else
115 		vbuf->resp_buf = resp_buf;
116 	BUG_ON(!vbuf->resp_buf);
117 	return vbuf;
118 }
119 
120 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
121 				  struct virtio_gpu_vbuffer **vbuffer_p,
122 				  int size)
123 {
124 	struct virtio_gpu_vbuffer *vbuf;
125 
126 	vbuf = virtio_gpu_get_vbuf(vgdev, size,
127 				   sizeof(struct virtio_gpu_ctrl_hdr),
128 				   NULL, NULL);
129 	if (IS_ERR(vbuf)) {
130 		*vbuffer_p = NULL;
131 		return ERR_CAST(vbuf);
132 	}
133 	*vbuffer_p = vbuf;
134 	return vbuf->buf;
135 }
136 
137 static struct virtio_gpu_update_cursor*
138 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
139 			struct virtio_gpu_vbuffer **vbuffer_p)
140 {
141 	struct virtio_gpu_vbuffer *vbuf;
142 
143 	vbuf = virtio_gpu_get_vbuf
144 		(vgdev, sizeof(struct virtio_gpu_update_cursor),
145 		 0, NULL, NULL);
146 	if (IS_ERR(vbuf)) {
147 		*vbuffer_p = NULL;
148 		return ERR_CAST(vbuf);
149 	}
150 	*vbuffer_p = vbuf;
151 	return (struct virtio_gpu_update_cursor *)vbuf->buf;
152 }
153 
154 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
155 				       virtio_gpu_resp_cb cb,
156 				       struct virtio_gpu_vbuffer **vbuffer_p,
157 				       int cmd_size, int resp_size,
158 				       void *resp_buf)
159 {
160 	struct virtio_gpu_vbuffer *vbuf;
161 
162 	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
163 				   resp_size, resp_buf, cb);
164 	if (IS_ERR(vbuf)) {
165 		*vbuffer_p = NULL;
166 		return ERR_CAST(vbuf);
167 	}
168 	*vbuffer_p = vbuf;
169 	return (struct virtio_gpu_command *)vbuf->buf;
170 }
171 
172 static void free_vbuf(struct virtio_gpu_device *vgdev,
173 		      struct virtio_gpu_vbuffer *vbuf)
174 {
175 	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
176 		kfree(vbuf->resp_buf);
177 	kfree(vbuf->data_buf);
178 	kmem_cache_free(vgdev->vbufs, vbuf);
179 }
180 
181 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
182 {
183 	struct virtio_gpu_vbuffer *vbuf;
184 	unsigned int len;
185 	int freed = 0;
186 
187 	while ((vbuf = virtqueue_get_buf(vq, &len))) {
188 		list_add_tail(&vbuf->list, reclaim_list);
189 		freed++;
190 	}
191 	if (freed == 0)
192 		DRM_DEBUG("Huh? zero vbufs reclaimed");
193 }
194 
195 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
196 {
197 	struct virtio_gpu_device *vgdev =
198 		container_of(work, struct virtio_gpu_device,
199 			     ctrlq.dequeue_work);
200 	struct list_head reclaim_list;
201 	struct virtio_gpu_vbuffer *entry, *tmp;
202 	struct virtio_gpu_ctrl_hdr *resp;
203 	u64 fence_id = 0;
204 
205 	INIT_LIST_HEAD(&reclaim_list);
206 	spin_lock(&vgdev->ctrlq.qlock);
207 	do {
208 		virtqueue_disable_cb(vgdev->ctrlq.vq);
209 		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
210 
211 	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
212 	spin_unlock(&vgdev->ctrlq.qlock);
213 
214 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
215 		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
216 		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
217 			DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
218 		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
219 			u64 f = le64_to_cpu(resp->fence_id);
220 
221 			if (fence_id > f) {
222 				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
223 					  __func__, fence_id, f);
224 			} else {
225 				fence_id = f;
226 			}
227 		}
228 		if (entry->resp_cb)
229 			entry->resp_cb(vgdev, entry);
230 
231 		list_del(&entry->list);
232 		free_vbuf(vgdev, entry);
233 	}
234 	wake_up(&vgdev->ctrlq.ack_queue);
235 
236 	if (fence_id)
237 		virtio_gpu_fence_event_process(vgdev, fence_id);
238 }
239 
240 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
241 {
242 	struct virtio_gpu_device *vgdev =
243 		container_of(work, struct virtio_gpu_device,
244 			     cursorq.dequeue_work);
245 	struct list_head reclaim_list;
246 	struct virtio_gpu_vbuffer *entry, *tmp;
247 
248 	INIT_LIST_HEAD(&reclaim_list);
249 	spin_lock(&vgdev->cursorq.qlock);
250 	do {
251 		virtqueue_disable_cb(vgdev->cursorq.vq);
252 		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
253 	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
254 	spin_unlock(&vgdev->cursorq.qlock);
255 
256 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
257 		list_del(&entry->list);
258 		free_vbuf(vgdev, entry);
259 	}
260 	wake_up(&vgdev->cursorq.ack_queue);
261 }
262 
263 static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
264 					       struct virtio_gpu_vbuffer *vbuf)
265 		__releases(&vgdev->ctrlq.qlock)
266 		__acquires(&vgdev->ctrlq.qlock)
267 {
268 	struct virtqueue *vq = vgdev->ctrlq.vq;
269 	struct scatterlist *sgs[3], vcmd, vout, vresp;
270 	int outcnt = 0, incnt = 0;
271 	int ret;
272 
273 	if (!vgdev->vqs_ready)
274 		return -ENODEV;
275 
276 	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
277 	sgs[outcnt + incnt] = &vcmd;
278 	outcnt++;
279 
280 	if (vbuf->data_size) {
281 		sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
282 		sgs[outcnt + incnt] = &vout;
283 		outcnt++;
284 	}
285 
286 	if (vbuf->resp_size) {
287 		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
288 		sgs[outcnt + incnt] = &vresp;
289 		incnt++;
290 	}
291 
292 retry:
293 	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
294 	if (ret == -ENOSPC) {
295 		spin_unlock(&vgdev->ctrlq.qlock);
296 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
297 		spin_lock(&vgdev->ctrlq.qlock);
298 		goto retry;
299 	} else {
300 		virtqueue_kick(vq);
301 	}
302 
303 	if (!ret)
304 		ret = vq->num_free;
305 	return ret;
306 }
307 
308 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
309 					struct virtio_gpu_vbuffer *vbuf)
310 {
311 	int rc;
312 
313 	spin_lock(&vgdev->ctrlq.qlock);
314 	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
315 	spin_unlock(&vgdev->ctrlq.qlock);
316 	return rc;
317 }
318 
319 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
320 					       struct virtio_gpu_vbuffer *vbuf,
321 					       struct virtio_gpu_ctrl_hdr *hdr,
322 					       struct virtio_gpu_fence **fence)
323 {
324 	struct virtqueue *vq = vgdev->ctrlq.vq;
325 	int rc;
326 
327 again:
328 	spin_lock(&vgdev->ctrlq.qlock);
329 
330 	/*
331 	 * Make sure we have enouth space in the virtqueue.  If not
332 	 * wait here until we have.
333 	 *
334 	 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
335 	 * to wait for free space, which can result in fence ids being
336 	 * submitted out-of-order.
337 	 */
338 	if (vq->num_free < 3) {
339 		spin_unlock(&vgdev->ctrlq.qlock);
340 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
341 		goto again;
342 	}
343 
344 	if (fence)
345 		virtio_gpu_fence_emit(vgdev, hdr, fence);
346 	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
347 	spin_unlock(&vgdev->ctrlq.qlock);
348 	return rc;
349 }
350 
351 static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
352 				   struct virtio_gpu_vbuffer *vbuf)
353 {
354 	struct virtqueue *vq = vgdev->cursorq.vq;
355 	struct scatterlist *sgs[1], ccmd;
356 	int ret;
357 	int outcnt;
358 
359 	if (!vgdev->vqs_ready)
360 		return -ENODEV;
361 
362 	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
363 	sgs[0] = &ccmd;
364 	outcnt = 1;
365 
366 	spin_lock(&vgdev->cursorq.qlock);
367 retry:
368 	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
369 	if (ret == -ENOSPC) {
370 		spin_unlock(&vgdev->cursorq.qlock);
371 		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
372 		spin_lock(&vgdev->cursorq.qlock);
373 		goto retry;
374 	} else {
375 		virtqueue_kick(vq);
376 	}
377 
378 	spin_unlock(&vgdev->cursorq.qlock);
379 
380 	if (!ret)
381 		ret = vq->num_free;
382 	return ret;
383 }
384 
385 /* just create gem objects for userspace and long lived objects,
386  * just use dma_alloced pages for the queue objects?
387  */
388 
389 /* create a basic resource */
390 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
391 				    uint32_t resource_id,
392 				    uint32_t format,
393 				    uint32_t width,
394 				    uint32_t height)
395 {
396 	struct virtio_gpu_resource_create_2d *cmd_p;
397 	struct virtio_gpu_vbuffer *vbuf;
398 
399 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
400 	memset(cmd_p, 0, sizeof(*cmd_p));
401 
402 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
403 	cmd_p->resource_id = cpu_to_le32(resource_id);
404 	cmd_p->format = cpu_to_le32(format);
405 	cmd_p->width = cpu_to_le32(width);
406 	cmd_p->height = cpu_to_le32(height);
407 
408 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
409 }
410 
411 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
412 				   uint32_t resource_id)
413 {
414 	struct virtio_gpu_resource_unref *cmd_p;
415 	struct virtio_gpu_vbuffer *vbuf;
416 
417 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
418 	memset(cmd_p, 0, sizeof(*cmd_p));
419 
420 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
421 	cmd_p->resource_id = cpu_to_le32(resource_id);
422 
423 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
424 }
425 
426 static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
427 						  uint32_t resource_id,
428 						  struct virtio_gpu_fence **fence)
429 {
430 	struct virtio_gpu_resource_detach_backing *cmd_p;
431 	struct virtio_gpu_vbuffer *vbuf;
432 
433 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
434 	memset(cmd_p, 0, sizeof(*cmd_p));
435 
436 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
437 	cmd_p->resource_id = cpu_to_le32(resource_id);
438 
439 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
440 }
441 
442 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
443 				uint32_t scanout_id, uint32_t resource_id,
444 				uint32_t width, uint32_t height,
445 				uint32_t x, uint32_t y)
446 {
447 	struct virtio_gpu_set_scanout *cmd_p;
448 	struct virtio_gpu_vbuffer *vbuf;
449 
450 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
451 	memset(cmd_p, 0, sizeof(*cmd_p));
452 
453 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
454 	cmd_p->resource_id = cpu_to_le32(resource_id);
455 	cmd_p->scanout_id = cpu_to_le32(scanout_id);
456 	cmd_p->r.width = cpu_to_le32(width);
457 	cmd_p->r.height = cpu_to_le32(height);
458 	cmd_p->r.x = cpu_to_le32(x);
459 	cmd_p->r.y = cpu_to_le32(y);
460 
461 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
462 }
463 
464 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
465 				   uint32_t resource_id,
466 				   uint32_t x, uint32_t y,
467 				   uint32_t width, uint32_t height)
468 {
469 	struct virtio_gpu_resource_flush *cmd_p;
470 	struct virtio_gpu_vbuffer *vbuf;
471 
472 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
473 	memset(cmd_p, 0, sizeof(*cmd_p));
474 
475 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
476 	cmd_p->resource_id = cpu_to_le32(resource_id);
477 	cmd_p->r.width = cpu_to_le32(width);
478 	cmd_p->r.height = cpu_to_le32(height);
479 	cmd_p->r.x = cpu_to_le32(x);
480 	cmd_p->r.y = cpu_to_le32(y);
481 
482 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
483 }
484 
485 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
486 					struct virtio_gpu_object *bo,
487 					uint64_t offset,
488 					__le32 width, __le32 height,
489 					__le32 x, __le32 y,
490 					struct virtio_gpu_fence **fence)
491 {
492 	struct virtio_gpu_transfer_to_host_2d *cmd_p;
493 	struct virtio_gpu_vbuffer *vbuf;
494 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
495 
496 	if (use_dma_api)
497 		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
498 				       bo->pages->sgl, bo->pages->nents,
499 				       DMA_TO_DEVICE);
500 
501 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
502 	memset(cmd_p, 0, sizeof(*cmd_p));
503 
504 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
505 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
506 	cmd_p->offset = cpu_to_le64(offset);
507 	cmd_p->r.width = width;
508 	cmd_p->r.height = height;
509 	cmd_p->r.x = x;
510 	cmd_p->r.y = y;
511 
512 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
513 }
514 
515 static void
516 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
517 				       uint32_t resource_id,
518 				       struct virtio_gpu_mem_entry *ents,
519 				       uint32_t nents,
520 				       struct virtio_gpu_fence **fence)
521 {
522 	struct virtio_gpu_resource_attach_backing *cmd_p;
523 	struct virtio_gpu_vbuffer *vbuf;
524 
525 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
526 	memset(cmd_p, 0, sizeof(*cmd_p));
527 
528 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
529 	cmd_p->resource_id = cpu_to_le32(resource_id);
530 	cmd_p->nr_entries = cpu_to_le32(nents);
531 
532 	vbuf->data_buf = ents;
533 	vbuf->data_size = sizeof(*ents) * nents;
534 
535 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
536 }
537 
538 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
539 					       struct virtio_gpu_vbuffer *vbuf)
540 {
541 	struct virtio_gpu_resp_display_info *resp =
542 		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
543 	int i;
544 
545 	spin_lock(&vgdev->display_info_lock);
546 	for (i = 0; i < vgdev->num_scanouts; i++) {
547 		vgdev->outputs[i].info = resp->pmodes[i];
548 		if (resp->pmodes[i].enabled) {
549 			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
550 				  le32_to_cpu(resp->pmodes[i].r.width),
551 				  le32_to_cpu(resp->pmodes[i].r.height),
552 				  le32_to_cpu(resp->pmodes[i].r.x),
553 				  le32_to_cpu(resp->pmodes[i].r.y));
554 		} else {
555 			DRM_DEBUG("output %d: disabled", i);
556 		}
557 	}
558 
559 	vgdev->display_info_pending = false;
560 	spin_unlock(&vgdev->display_info_lock);
561 	wake_up(&vgdev->resp_wq);
562 
563 	if (!drm_helper_hpd_irq_event(vgdev->ddev))
564 		drm_kms_helper_hotplug_event(vgdev->ddev);
565 }
566 
567 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
568 					      struct virtio_gpu_vbuffer *vbuf)
569 {
570 	struct virtio_gpu_get_capset_info *cmd =
571 		(struct virtio_gpu_get_capset_info *)vbuf->buf;
572 	struct virtio_gpu_resp_capset_info *resp =
573 		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
574 	int i = le32_to_cpu(cmd->capset_index);
575 
576 	spin_lock(&vgdev->display_info_lock);
577 	vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
578 	vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
579 	vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
580 	spin_unlock(&vgdev->display_info_lock);
581 	wake_up(&vgdev->resp_wq);
582 }
583 
584 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
585 				     struct virtio_gpu_vbuffer *vbuf)
586 {
587 	struct virtio_gpu_get_capset *cmd =
588 		(struct virtio_gpu_get_capset *)vbuf->buf;
589 	struct virtio_gpu_resp_capset *resp =
590 		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
591 	struct virtio_gpu_drv_cap_cache *cache_ent;
592 
593 	spin_lock(&vgdev->display_info_lock);
594 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
595 		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
596 		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
597 			memcpy(cache_ent->caps_cache, resp->capset_data,
598 			       cache_ent->size);
599 			atomic_set(&cache_ent->is_valid, 1);
600 			break;
601 		}
602 	}
603 	spin_unlock(&vgdev->display_info_lock);
604 	wake_up(&vgdev->resp_wq);
605 }
606 
607 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
608 {
609 	struct virtio_gpu_ctrl_hdr *cmd_p;
610 	struct virtio_gpu_vbuffer *vbuf;
611 	void *resp_buf;
612 
613 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
614 			   GFP_KERNEL);
615 	if (!resp_buf)
616 		return -ENOMEM;
617 
618 	cmd_p = virtio_gpu_alloc_cmd_resp
619 		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
620 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
621 		 resp_buf);
622 	memset(cmd_p, 0, sizeof(*cmd_p));
623 
624 	vgdev->display_info_pending = true;
625 	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
626 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
627 	return 0;
628 }
629 
630 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
631 {
632 	struct virtio_gpu_get_capset_info *cmd_p;
633 	struct virtio_gpu_vbuffer *vbuf;
634 	void *resp_buf;
635 
636 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
637 			   GFP_KERNEL);
638 	if (!resp_buf)
639 		return -ENOMEM;
640 
641 	cmd_p = virtio_gpu_alloc_cmd_resp
642 		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
643 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
644 		 resp_buf);
645 	memset(cmd_p, 0, sizeof(*cmd_p));
646 
647 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
648 	cmd_p->capset_index = cpu_to_le32(idx);
649 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
650 	return 0;
651 }
652 
653 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
654 			      int idx, int version,
655 			      struct virtio_gpu_drv_cap_cache **cache_p)
656 {
657 	struct virtio_gpu_get_capset *cmd_p;
658 	struct virtio_gpu_vbuffer *vbuf;
659 	int max_size;
660 	struct virtio_gpu_drv_cap_cache *cache_ent;
661 	void *resp_buf;
662 
663 	if (idx >= vgdev->num_capsets)
664 		return -EINVAL;
665 
666 	if (version > vgdev->capsets[idx].max_version)
667 		return -EINVAL;
668 
669 	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
670 	if (!cache_ent)
671 		return -ENOMEM;
672 
673 	max_size = vgdev->capsets[idx].max_size;
674 	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
675 	if (!cache_ent->caps_cache) {
676 		kfree(cache_ent);
677 		return -ENOMEM;
678 	}
679 
680 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
681 			   GFP_KERNEL);
682 	if (!resp_buf) {
683 		kfree(cache_ent->caps_cache);
684 		kfree(cache_ent);
685 		return -ENOMEM;
686 	}
687 
688 	cache_ent->version = version;
689 	cache_ent->id = vgdev->capsets[idx].id;
690 	atomic_set(&cache_ent->is_valid, 0);
691 	cache_ent->size = max_size;
692 	spin_lock(&vgdev->display_info_lock);
693 	list_add_tail(&cache_ent->head, &vgdev->cap_cache);
694 	spin_unlock(&vgdev->display_info_lock);
695 
696 	cmd_p = virtio_gpu_alloc_cmd_resp
697 		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
698 		 sizeof(struct virtio_gpu_resp_capset) + max_size,
699 		 resp_buf);
700 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
701 	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
702 	cmd_p->capset_version = cpu_to_le32(version);
703 	*cache_p = cache_ent;
704 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
705 
706 	return 0;
707 }
708 
709 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
710 				   uint32_t nlen, const char *name)
711 {
712 	struct virtio_gpu_ctx_create *cmd_p;
713 	struct virtio_gpu_vbuffer *vbuf;
714 
715 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
716 	memset(cmd_p, 0, sizeof(*cmd_p));
717 
718 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
719 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
720 	cmd_p->nlen = cpu_to_le32(nlen);
721 	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
722 	cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
723 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
724 }
725 
726 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
727 				    uint32_t id)
728 {
729 	struct virtio_gpu_ctx_destroy *cmd_p;
730 	struct virtio_gpu_vbuffer *vbuf;
731 
732 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
733 	memset(cmd_p, 0, sizeof(*cmd_p));
734 
735 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
736 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
737 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
738 }
739 
740 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
741 					    uint32_t ctx_id,
742 					    uint32_t resource_id)
743 {
744 	struct virtio_gpu_ctx_resource *cmd_p;
745 	struct virtio_gpu_vbuffer *vbuf;
746 
747 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
748 	memset(cmd_p, 0, sizeof(*cmd_p));
749 
750 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
751 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
752 	cmd_p->resource_id = cpu_to_le32(resource_id);
753 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
754 
755 }
756 
757 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
758 					    uint32_t ctx_id,
759 					    uint32_t resource_id)
760 {
761 	struct virtio_gpu_ctx_resource *cmd_p;
762 	struct virtio_gpu_vbuffer *vbuf;
763 
764 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
765 	memset(cmd_p, 0, sizeof(*cmd_p));
766 
767 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
768 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
769 	cmd_p->resource_id = cpu_to_le32(resource_id);
770 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
771 }
772 
773 void
774 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
775 				  struct virtio_gpu_resource_create_3d *rc_3d,
776 				  struct virtio_gpu_fence **fence)
777 {
778 	struct virtio_gpu_resource_create_3d *cmd_p;
779 	struct virtio_gpu_vbuffer *vbuf;
780 
781 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
782 	memset(cmd_p, 0, sizeof(*cmd_p));
783 
784 	*cmd_p = *rc_3d;
785 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
786 	cmd_p->hdr.flags = 0;
787 
788 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
789 }
790 
791 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
792 					struct virtio_gpu_object *bo,
793 					uint32_t ctx_id,
794 					uint64_t offset, uint32_t level,
795 					struct virtio_gpu_box *box,
796 					struct virtio_gpu_fence **fence)
797 {
798 	struct virtio_gpu_transfer_host_3d *cmd_p;
799 	struct virtio_gpu_vbuffer *vbuf;
800 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
801 
802 	if (use_dma_api)
803 		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
804 				       bo->pages->sgl, bo->pages->nents,
805 				       DMA_TO_DEVICE);
806 
807 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
808 	memset(cmd_p, 0, sizeof(*cmd_p));
809 
810 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
811 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
812 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
813 	cmd_p->box = *box;
814 	cmd_p->offset = cpu_to_le64(offset);
815 	cmd_p->level = cpu_to_le32(level);
816 
817 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
818 }
819 
820 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
821 					  uint32_t resource_id, uint32_t ctx_id,
822 					  uint64_t offset, uint32_t level,
823 					  struct virtio_gpu_box *box,
824 					  struct virtio_gpu_fence **fence)
825 {
826 	struct virtio_gpu_transfer_host_3d *cmd_p;
827 	struct virtio_gpu_vbuffer *vbuf;
828 
829 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
830 	memset(cmd_p, 0, sizeof(*cmd_p));
831 
832 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
833 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
834 	cmd_p->resource_id = cpu_to_le32(resource_id);
835 	cmd_p->box = *box;
836 	cmd_p->offset = cpu_to_le64(offset);
837 	cmd_p->level = cpu_to_le32(level);
838 
839 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
840 }
841 
842 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
843 			   void *data, uint32_t data_size,
844 			   uint32_t ctx_id, struct virtio_gpu_fence **fence)
845 {
846 	struct virtio_gpu_cmd_submit *cmd_p;
847 	struct virtio_gpu_vbuffer *vbuf;
848 
849 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
850 	memset(cmd_p, 0, sizeof(*cmd_p));
851 
852 	vbuf->data_buf = data;
853 	vbuf->data_size = data_size;
854 
855 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
856 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
857 	cmd_p->size = cpu_to_le32(data_size);
858 
859 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
860 }
861 
862 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
863 			     struct virtio_gpu_object *obj,
864 			     uint32_t resource_id,
865 			     struct virtio_gpu_fence **fence)
866 {
867 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
868 	struct virtio_gpu_mem_entry *ents;
869 	struct scatterlist *sg;
870 	int si, nents;
871 
872 	if (!obj->pages) {
873 		int ret;
874 
875 		ret = virtio_gpu_object_get_sg_table(vgdev, obj);
876 		if (ret)
877 			return ret;
878 	}
879 
880 	if (use_dma_api) {
881 		obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
882 					 obj->pages->sgl, obj->pages->nents,
883 					 DMA_TO_DEVICE);
884 		nents = obj->mapped;
885 	} else {
886 		nents = obj->pages->nents;
887 	}
888 
889 	/* gets freed when the ring has consumed it */
890 	ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
891 			     GFP_KERNEL);
892 	if (!ents) {
893 		DRM_ERROR("failed to allocate ent list\n");
894 		return -ENOMEM;
895 	}
896 
897 	for_each_sg(obj->pages->sgl, sg, nents, si) {
898 		ents[si].addr = cpu_to_le64(use_dma_api
899 					    ? sg_dma_address(sg)
900 					    : sg_phys(sg));
901 		ents[si].length = cpu_to_le32(sg->length);
902 		ents[si].padding = 0;
903 	}
904 
905 	virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
906 					       ents, nents,
907 					       fence);
908 	obj->hw_res_handle = resource_id;
909 	return 0;
910 }
911 
912 void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
913 			      struct virtio_gpu_object *obj)
914 {
915 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
916 	struct virtio_gpu_fence *fence;
917 
918 	if (use_dma_api && obj->mapped) {
919 		/* detach backing and wait for the host process it ... */
920 		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence);
921 		dma_fence_wait(&fence->f, true);
922 		dma_fence_put(&fence->f);
923 
924 		/* ... then tear down iommu mappings */
925 		dma_unmap_sg(vgdev->vdev->dev.parent,
926 			     obj->pages->sgl, obj->mapped,
927 			     DMA_TO_DEVICE);
928 		obj->mapped = 0;
929 	} else {
930 		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
931 	}
932 }
933 
934 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
935 			    struct virtio_gpu_output *output)
936 {
937 	struct virtio_gpu_vbuffer *vbuf;
938 	struct virtio_gpu_update_cursor *cur_p;
939 
940 	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
941 	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
942 	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
943 	virtio_gpu_queue_cursor(vgdev, vbuf);
944 }
945