1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie <airlied@redhat.com>
7  *    Gerd Hoffmann <kraxel@redhat.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26  * OTHER DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
33 
34 #include "virtgpu_drv.h"
35 #include "virtgpu_trace.h"
36 
37 #define MAX_INLINE_CMD_SIZE   96
38 #define MAX_INLINE_RESP_SIZE  24
39 #define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
40 			       + MAX_INLINE_CMD_SIZE		 \
41 			       + MAX_INLINE_RESP_SIZE)
42 
43 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
44 {
45 	struct drm_device *dev = vq->vdev->priv;
46 	struct virtio_gpu_device *vgdev = dev->dev_private;
47 
48 	schedule_work(&vgdev->ctrlq.dequeue_work);
49 }
50 
51 void virtio_gpu_cursor_ack(struct virtqueue *vq)
52 {
53 	struct drm_device *dev = vq->vdev->priv;
54 	struct virtio_gpu_device *vgdev = dev->dev_private;
55 
56 	schedule_work(&vgdev->cursorq.dequeue_work);
57 }
58 
59 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
60 {
61 	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
62 					 VBUFFER_SIZE,
63 					 __alignof__(struct virtio_gpu_vbuffer),
64 					 0, NULL);
65 	if (!vgdev->vbufs)
66 		return -ENOMEM;
67 	return 0;
68 }
69 
70 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
71 {
72 	kmem_cache_destroy(vgdev->vbufs);
73 	vgdev->vbufs = NULL;
74 }
75 
76 static struct virtio_gpu_vbuffer*
77 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
78 		    int size, int resp_size, void *resp_buf,
79 		    virtio_gpu_resp_cb resp_cb)
80 {
81 	struct virtio_gpu_vbuffer *vbuf;
82 
83 	vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
84 	if (!vbuf)
85 		return ERR_PTR(-ENOMEM);
86 
87 	BUG_ON(size > MAX_INLINE_CMD_SIZE);
88 	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
89 	vbuf->size = size;
90 
91 	vbuf->resp_cb = resp_cb;
92 	vbuf->resp_size = resp_size;
93 	if (resp_size <= MAX_INLINE_RESP_SIZE)
94 		vbuf->resp_buf = (void *)vbuf->buf + size;
95 	else
96 		vbuf->resp_buf = resp_buf;
97 	BUG_ON(!vbuf->resp_buf);
98 	return vbuf;
99 }
100 
101 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
102 				  struct virtio_gpu_vbuffer **vbuffer_p,
103 				  int size)
104 {
105 	struct virtio_gpu_vbuffer *vbuf;
106 
107 	vbuf = virtio_gpu_get_vbuf(vgdev, size,
108 				   sizeof(struct virtio_gpu_ctrl_hdr),
109 				   NULL, NULL);
110 	if (IS_ERR(vbuf)) {
111 		*vbuffer_p = NULL;
112 		return ERR_CAST(vbuf);
113 	}
114 	*vbuffer_p = vbuf;
115 	return vbuf->buf;
116 }
117 
118 static struct virtio_gpu_update_cursor*
119 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
120 			struct virtio_gpu_vbuffer **vbuffer_p)
121 {
122 	struct virtio_gpu_vbuffer *vbuf;
123 
124 	vbuf = virtio_gpu_get_vbuf
125 		(vgdev, sizeof(struct virtio_gpu_update_cursor),
126 		 0, NULL, NULL);
127 	if (IS_ERR(vbuf)) {
128 		*vbuffer_p = NULL;
129 		return ERR_CAST(vbuf);
130 	}
131 	*vbuffer_p = vbuf;
132 	return (struct virtio_gpu_update_cursor *)vbuf->buf;
133 }
134 
135 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
136 				       virtio_gpu_resp_cb cb,
137 				       struct virtio_gpu_vbuffer **vbuffer_p,
138 				       int cmd_size, int resp_size,
139 				       void *resp_buf)
140 {
141 	struct virtio_gpu_vbuffer *vbuf;
142 
143 	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
144 				   resp_size, resp_buf, cb);
145 	if (IS_ERR(vbuf)) {
146 		*vbuffer_p = NULL;
147 		return ERR_CAST(vbuf);
148 	}
149 	*vbuffer_p = vbuf;
150 	return (struct virtio_gpu_command *)vbuf->buf;
151 }
152 
153 static void free_vbuf(struct virtio_gpu_device *vgdev,
154 		      struct virtio_gpu_vbuffer *vbuf)
155 {
156 	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
157 		kfree(vbuf->resp_buf);
158 	kvfree(vbuf->data_buf);
159 	kmem_cache_free(vgdev->vbufs, vbuf);
160 }
161 
162 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
163 {
164 	struct virtio_gpu_vbuffer *vbuf;
165 	unsigned int len;
166 	int freed = 0;
167 
168 	while ((vbuf = virtqueue_get_buf(vq, &len))) {
169 		list_add_tail(&vbuf->list, reclaim_list);
170 		freed++;
171 	}
172 	if (freed == 0)
173 		DRM_DEBUG("Huh? zero vbufs reclaimed");
174 }
175 
176 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
177 {
178 	struct virtio_gpu_device *vgdev =
179 		container_of(work, struct virtio_gpu_device,
180 			     ctrlq.dequeue_work);
181 	struct list_head reclaim_list;
182 	struct virtio_gpu_vbuffer *entry, *tmp;
183 	struct virtio_gpu_ctrl_hdr *resp;
184 	u64 fence_id = 0;
185 
186 	INIT_LIST_HEAD(&reclaim_list);
187 	spin_lock(&vgdev->ctrlq.qlock);
188 	do {
189 		virtqueue_disable_cb(vgdev->ctrlq.vq);
190 		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
191 
192 	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
193 	spin_unlock(&vgdev->ctrlq.qlock);
194 
195 	list_for_each_entry(entry, &reclaim_list, list) {
196 		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
197 
198 		trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
199 
200 		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
201 			if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
202 				struct virtio_gpu_ctrl_hdr *cmd;
203 				cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
204 				DRM_ERROR("response 0x%x (command 0x%x)\n",
205 					  le32_to_cpu(resp->type),
206 					  le32_to_cpu(cmd->type));
207 			} else
208 				DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
209 		}
210 		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
211 			u64 f = le64_to_cpu(resp->fence_id);
212 
213 			if (fence_id > f) {
214 				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
215 					  __func__, fence_id, f);
216 			} else {
217 				fence_id = f;
218 			}
219 		}
220 		if (entry->resp_cb)
221 			entry->resp_cb(vgdev, entry);
222 	}
223 	wake_up(&vgdev->ctrlq.ack_queue);
224 
225 	if (fence_id)
226 		virtio_gpu_fence_event_process(vgdev, fence_id);
227 
228 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
229 		if (entry->objs)
230 			virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
231 		list_del(&entry->list);
232 		free_vbuf(vgdev, entry);
233 	}
234 }
235 
236 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
237 {
238 	struct virtio_gpu_device *vgdev =
239 		container_of(work, struct virtio_gpu_device,
240 			     cursorq.dequeue_work);
241 	struct list_head reclaim_list;
242 	struct virtio_gpu_vbuffer *entry, *tmp;
243 
244 	INIT_LIST_HEAD(&reclaim_list);
245 	spin_lock(&vgdev->cursorq.qlock);
246 	do {
247 		virtqueue_disable_cb(vgdev->cursorq.vq);
248 		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
249 	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
250 	spin_unlock(&vgdev->cursorq.qlock);
251 
252 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
253 		list_del(&entry->list);
254 		free_vbuf(vgdev, entry);
255 	}
256 	wake_up(&vgdev->cursorq.ack_queue);
257 }
258 
259 /* Create sg_table from a vmalloc'd buffer. */
260 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
261 {
262 	int ret, s, i;
263 	struct sg_table *sgt;
264 	struct scatterlist *sg;
265 	struct page *pg;
266 
267 	if (WARN_ON(!PAGE_ALIGNED(data)))
268 		return NULL;
269 
270 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
271 	if (!sgt)
272 		return NULL;
273 
274 	*sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
275 	ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
276 	if (ret) {
277 		kfree(sgt);
278 		return NULL;
279 	}
280 
281 	for_each_sg(sgt->sgl, sg, *sg_ents, i) {
282 		pg = vmalloc_to_page(data);
283 		if (!pg) {
284 			sg_free_table(sgt);
285 			kfree(sgt);
286 			return NULL;
287 		}
288 
289 		s = min_t(int, PAGE_SIZE, size);
290 		sg_set_page(sg, pg, s, 0);
291 
292 		size -= s;
293 		data += s;
294 	}
295 
296 	return sgt;
297 }
298 
299 static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
300 						struct virtio_gpu_vbuffer *vbuf,
301 						struct scatterlist *vout)
302 		__releases(&vgdev->ctrlq.qlock)
303 		__acquires(&vgdev->ctrlq.qlock)
304 {
305 	struct virtqueue *vq = vgdev->ctrlq.vq;
306 	struct scatterlist *sgs[3], vcmd, vresp;
307 	int outcnt = 0, incnt = 0;
308 	bool notify = false;
309 	int ret;
310 
311 	if (!vgdev->vqs_ready)
312 		return notify;
313 
314 	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
315 	sgs[outcnt + incnt] = &vcmd;
316 	outcnt++;
317 
318 	if (vout) {
319 		sgs[outcnt + incnt] = vout;
320 		outcnt++;
321 	}
322 
323 	if (vbuf->resp_size) {
324 		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
325 		sgs[outcnt + incnt] = &vresp;
326 		incnt++;
327 	}
328 
329 retry:
330 	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
331 	if (ret == -ENOSPC) {
332 		spin_unlock(&vgdev->ctrlq.qlock);
333 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
334 		spin_lock(&vgdev->ctrlq.qlock);
335 		goto retry;
336 	} else {
337 		trace_virtio_gpu_cmd_queue(vq,
338 			(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
339 
340 		notify = virtqueue_kick_prepare(vq);
341 	}
342 	return notify;
343 }
344 
345 static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
346 						struct virtio_gpu_vbuffer *vbuf,
347 						struct virtio_gpu_ctrl_hdr *hdr,
348 						struct virtio_gpu_fence *fence)
349 {
350 	struct virtqueue *vq = vgdev->ctrlq.vq;
351 	struct scatterlist *vout = NULL, sg;
352 	struct sg_table *sgt = NULL;
353 	bool notify;
354 	int outcnt = 0;
355 
356 	if (vbuf->data_size) {
357 		if (is_vmalloc_addr(vbuf->data_buf)) {
358 			sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
359 					     &outcnt);
360 			if (!sgt)
361 				return;
362 			vout = sgt->sgl;
363 		} else {
364 			sg_init_one(&sg, vbuf->data_buf, vbuf->data_size);
365 			vout = &sg;
366 			outcnt = 1;
367 		}
368 	}
369 
370 again:
371 	spin_lock(&vgdev->ctrlq.qlock);
372 
373 	/*
374 	 * Make sure we have enouth space in the virtqueue.  If not
375 	 * wait here until we have.
376 	 *
377 	 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
378 	 * to wait for free space, which can result in fence ids being
379 	 * submitted out-of-order.
380 	 */
381 	if (vq->num_free < 2 + outcnt) {
382 		spin_unlock(&vgdev->ctrlq.qlock);
383 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
384 		goto again;
385 	}
386 
387 	if (hdr && fence) {
388 		virtio_gpu_fence_emit(vgdev, hdr, fence);
389 		if (vbuf->objs) {
390 			virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
391 			virtio_gpu_array_unlock_resv(vbuf->objs);
392 		}
393 	}
394 	notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout);
395 	spin_unlock(&vgdev->ctrlq.qlock);
396 	if (notify)
397 		virtqueue_notify(vgdev->ctrlq.vq);
398 
399 	if (sgt) {
400 		sg_free_table(sgt);
401 		kfree(sgt);
402 	}
403 }
404 
405 static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
406 					 struct virtio_gpu_vbuffer *vbuf)
407 {
408 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL);
409 }
410 
411 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
412 				    struct virtio_gpu_vbuffer *vbuf)
413 {
414 	struct virtqueue *vq = vgdev->cursorq.vq;
415 	struct scatterlist *sgs[1], ccmd;
416 	bool notify;
417 	int ret;
418 	int outcnt;
419 
420 	if (!vgdev->vqs_ready)
421 		return;
422 
423 	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
424 	sgs[0] = &ccmd;
425 	outcnt = 1;
426 
427 	spin_lock(&vgdev->cursorq.qlock);
428 retry:
429 	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
430 	if (ret == -ENOSPC) {
431 		spin_unlock(&vgdev->cursorq.qlock);
432 		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
433 		spin_lock(&vgdev->cursorq.qlock);
434 		goto retry;
435 	} else {
436 		trace_virtio_gpu_cmd_queue(vq,
437 			(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
438 
439 		notify = virtqueue_kick_prepare(vq);
440 	}
441 
442 	spin_unlock(&vgdev->cursorq.qlock);
443 
444 	if (notify)
445 		virtqueue_notify(vq);
446 }
447 
448 /* just create gem objects for userspace and long lived objects,
449  * just use dma_alloced pages for the queue objects?
450  */
451 
452 /* create a basic resource */
453 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
454 				    struct virtio_gpu_object *bo,
455 				    struct virtio_gpu_object_params *params,
456 				    struct virtio_gpu_object_array *objs,
457 				    struct virtio_gpu_fence *fence)
458 {
459 	struct virtio_gpu_resource_create_2d *cmd_p;
460 	struct virtio_gpu_vbuffer *vbuf;
461 
462 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
463 	memset(cmd_p, 0, sizeof(*cmd_p));
464 	vbuf->objs = objs;
465 
466 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
467 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
468 	cmd_p->format = cpu_to_le32(params->format);
469 	cmd_p->width = cpu_to_le32(params->width);
470 	cmd_p->height = cpu_to_le32(params->height);
471 
472 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
473 	bo->created = true;
474 }
475 
476 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
477 				   uint32_t resource_id)
478 {
479 	struct virtio_gpu_resource_unref *cmd_p;
480 	struct virtio_gpu_vbuffer *vbuf;
481 
482 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
483 	memset(cmd_p, 0, sizeof(*cmd_p));
484 
485 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
486 	cmd_p->resource_id = cpu_to_le32(resource_id);
487 
488 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
489 }
490 
491 static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
492 						  uint32_t resource_id,
493 						  struct virtio_gpu_fence *fence)
494 {
495 	struct virtio_gpu_resource_detach_backing *cmd_p;
496 	struct virtio_gpu_vbuffer *vbuf;
497 
498 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
499 	memset(cmd_p, 0, sizeof(*cmd_p));
500 
501 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
502 	cmd_p->resource_id = cpu_to_le32(resource_id);
503 
504 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
505 }
506 
507 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
508 				uint32_t scanout_id, uint32_t resource_id,
509 				uint32_t width, uint32_t height,
510 				uint32_t x, uint32_t y)
511 {
512 	struct virtio_gpu_set_scanout *cmd_p;
513 	struct virtio_gpu_vbuffer *vbuf;
514 
515 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
516 	memset(cmd_p, 0, sizeof(*cmd_p));
517 
518 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
519 	cmd_p->resource_id = cpu_to_le32(resource_id);
520 	cmd_p->scanout_id = cpu_to_le32(scanout_id);
521 	cmd_p->r.width = cpu_to_le32(width);
522 	cmd_p->r.height = cpu_to_le32(height);
523 	cmd_p->r.x = cpu_to_le32(x);
524 	cmd_p->r.y = cpu_to_le32(y);
525 
526 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
527 }
528 
529 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
530 				   uint32_t resource_id,
531 				   uint32_t x, uint32_t y,
532 				   uint32_t width, uint32_t height)
533 {
534 	struct virtio_gpu_resource_flush *cmd_p;
535 	struct virtio_gpu_vbuffer *vbuf;
536 
537 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
538 	memset(cmd_p, 0, sizeof(*cmd_p));
539 
540 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
541 	cmd_p->resource_id = cpu_to_le32(resource_id);
542 	cmd_p->r.width = cpu_to_le32(width);
543 	cmd_p->r.height = cpu_to_le32(height);
544 	cmd_p->r.x = cpu_to_le32(x);
545 	cmd_p->r.y = cpu_to_le32(y);
546 
547 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
548 }
549 
550 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
551 					uint64_t offset,
552 					uint32_t width, uint32_t height,
553 					uint32_t x, uint32_t y,
554 					struct virtio_gpu_object_array *objs,
555 					struct virtio_gpu_fence *fence)
556 {
557 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
558 	struct virtio_gpu_transfer_to_host_2d *cmd_p;
559 	struct virtio_gpu_vbuffer *vbuf;
560 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
561 
562 	if (use_dma_api)
563 		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
564 				       bo->pages->sgl, bo->pages->nents,
565 				       DMA_TO_DEVICE);
566 
567 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
568 	memset(cmd_p, 0, sizeof(*cmd_p));
569 	vbuf->objs = objs;
570 
571 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
572 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
573 	cmd_p->offset = cpu_to_le64(offset);
574 	cmd_p->r.width = cpu_to_le32(width);
575 	cmd_p->r.height = cpu_to_le32(height);
576 	cmd_p->r.x = cpu_to_le32(x);
577 	cmd_p->r.y = cpu_to_le32(y);
578 
579 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
580 }
581 
582 static void
583 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
584 				       uint32_t resource_id,
585 				       struct virtio_gpu_mem_entry *ents,
586 				       uint32_t nents,
587 				       struct virtio_gpu_fence *fence)
588 {
589 	struct virtio_gpu_resource_attach_backing *cmd_p;
590 	struct virtio_gpu_vbuffer *vbuf;
591 
592 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
593 	memset(cmd_p, 0, sizeof(*cmd_p));
594 
595 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
596 	cmd_p->resource_id = cpu_to_le32(resource_id);
597 	cmd_p->nr_entries = cpu_to_le32(nents);
598 
599 	vbuf->data_buf = ents;
600 	vbuf->data_size = sizeof(*ents) * nents;
601 
602 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
603 }
604 
605 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
606 					       struct virtio_gpu_vbuffer *vbuf)
607 {
608 	struct virtio_gpu_resp_display_info *resp =
609 		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
610 	int i;
611 
612 	spin_lock(&vgdev->display_info_lock);
613 	for (i = 0; i < vgdev->num_scanouts; i++) {
614 		vgdev->outputs[i].info = resp->pmodes[i];
615 		if (resp->pmodes[i].enabled) {
616 			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
617 				  le32_to_cpu(resp->pmodes[i].r.width),
618 				  le32_to_cpu(resp->pmodes[i].r.height),
619 				  le32_to_cpu(resp->pmodes[i].r.x),
620 				  le32_to_cpu(resp->pmodes[i].r.y));
621 		} else {
622 			DRM_DEBUG("output %d: disabled", i);
623 		}
624 	}
625 
626 	vgdev->display_info_pending = false;
627 	spin_unlock(&vgdev->display_info_lock);
628 	wake_up(&vgdev->resp_wq);
629 
630 	if (!drm_helper_hpd_irq_event(vgdev->ddev))
631 		drm_kms_helper_hotplug_event(vgdev->ddev);
632 }
633 
634 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
635 					      struct virtio_gpu_vbuffer *vbuf)
636 {
637 	struct virtio_gpu_get_capset_info *cmd =
638 		(struct virtio_gpu_get_capset_info *)vbuf->buf;
639 	struct virtio_gpu_resp_capset_info *resp =
640 		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
641 	int i = le32_to_cpu(cmd->capset_index);
642 
643 	spin_lock(&vgdev->display_info_lock);
644 	vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
645 	vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
646 	vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
647 	spin_unlock(&vgdev->display_info_lock);
648 	wake_up(&vgdev->resp_wq);
649 }
650 
651 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
652 				     struct virtio_gpu_vbuffer *vbuf)
653 {
654 	struct virtio_gpu_get_capset *cmd =
655 		(struct virtio_gpu_get_capset *)vbuf->buf;
656 	struct virtio_gpu_resp_capset *resp =
657 		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
658 	struct virtio_gpu_drv_cap_cache *cache_ent;
659 
660 	spin_lock(&vgdev->display_info_lock);
661 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
662 		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
663 		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
664 			memcpy(cache_ent->caps_cache, resp->capset_data,
665 			       cache_ent->size);
666 			/* Copy must occur before is_valid is signalled. */
667 			smp_wmb();
668 			atomic_set(&cache_ent->is_valid, 1);
669 			break;
670 		}
671 	}
672 	spin_unlock(&vgdev->display_info_lock);
673 	wake_up_all(&vgdev->resp_wq);
674 }
675 
676 static int virtio_get_edid_block(void *data, u8 *buf,
677 				 unsigned int block, size_t len)
678 {
679 	struct virtio_gpu_resp_edid *resp = data;
680 	size_t start = block * EDID_LENGTH;
681 
682 	if (start + len > le32_to_cpu(resp->size))
683 		return -1;
684 	memcpy(buf, resp->edid + start, len);
685 	return 0;
686 }
687 
688 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
689 				       struct virtio_gpu_vbuffer *vbuf)
690 {
691 	struct virtio_gpu_cmd_get_edid *cmd =
692 		(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
693 	struct virtio_gpu_resp_edid *resp =
694 		(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
695 	uint32_t scanout = le32_to_cpu(cmd->scanout);
696 	struct virtio_gpu_output *output;
697 	struct edid *new_edid, *old_edid;
698 
699 	if (scanout >= vgdev->num_scanouts)
700 		return;
701 	output = vgdev->outputs + scanout;
702 
703 	new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
704 	drm_connector_update_edid_property(&output->conn, new_edid);
705 
706 	spin_lock(&vgdev->display_info_lock);
707 	old_edid = output->edid;
708 	output->edid = new_edid;
709 	spin_unlock(&vgdev->display_info_lock);
710 
711 	kfree(old_edid);
712 	wake_up(&vgdev->resp_wq);
713 }
714 
715 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
716 {
717 	struct virtio_gpu_ctrl_hdr *cmd_p;
718 	struct virtio_gpu_vbuffer *vbuf;
719 	void *resp_buf;
720 
721 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
722 			   GFP_KERNEL);
723 	if (!resp_buf)
724 		return -ENOMEM;
725 
726 	cmd_p = virtio_gpu_alloc_cmd_resp
727 		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
728 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
729 		 resp_buf);
730 	memset(cmd_p, 0, sizeof(*cmd_p));
731 
732 	vgdev->display_info_pending = true;
733 	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
734 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
735 	return 0;
736 }
737 
738 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
739 {
740 	struct virtio_gpu_get_capset_info *cmd_p;
741 	struct virtio_gpu_vbuffer *vbuf;
742 	void *resp_buf;
743 
744 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
745 			   GFP_KERNEL);
746 	if (!resp_buf)
747 		return -ENOMEM;
748 
749 	cmd_p = virtio_gpu_alloc_cmd_resp
750 		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
751 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
752 		 resp_buf);
753 	memset(cmd_p, 0, sizeof(*cmd_p));
754 
755 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
756 	cmd_p->capset_index = cpu_to_le32(idx);
757 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
758 	return 0;
759 }
760 
761 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
762 			      int idx, int version,
763 			      struct virtio_gpu_drv_cap_cache **cache_p)
764 {
765 	struct virtio_gpu_get_capset *cmd_p;
766 	struct virtio_gpu_vbuffer *vbuf;
767 	int max_size;
768 	struct virtio_gpu_drv_cap_cache *cache_ent;
769 	struct virtio_gpu_drv_cap_cache *search_ent;
770 	void *resp_buf;
771 
772 	*cache_p = NULL;
773 
774 	if (idx >= vgdev->num_capsets)
775 		return -EINVAL;
776 
777 	if (version > vgdev->capsets[idx].max_version)
778 		return -EINVAL;
779 
780 	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
781 	if (!cache_ent)
782 		return -ENOMEM;
783 
784 	max_size = vgdev->capsets[idx].max_size;
785 	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
786 	if (!cache_ent->caps_cache) {
787 		kfree(cache_ent);
788 		return -ENOMEM;
789 	}
790 
791 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
792 			   GFP_KERNEL);
793 	if (!resp_buf) {
794 		kfree(cache_ent->caps_cache);
795 		kfree(cache_ent);
796 		return -ENOMEM;
797 	}
798 
799 	cache_ent->version = version;
800 	cache_ent->id = vgdev->capsets[idx].id;
801 	atomic_set(&cache_ent->is_valid, 0);
802 	cache_ent->size = max_size;
803 	spin_lock(&vgdev->display_info_lock);
804 	/* Search while under lock in case it was added by another task. */
805 	list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
806 		if (search_ent->id == vgdev->capsets[idx].id &&
807 		    search_ent->version == version) {
808 			*cache_p = search_ent;
809 			break;
810 		}
811 	}
812 	if (!*cache_p)
813 		list_add_tail(&cache_ent->head, &vgdev->cap_cache);
814 	spin_unlock(&vgdev->display_info_lock);
815 
816 	if (*cache_p) {
817 		/* Entry was found, so free everything that was just created. */
818 		kfree(resp_buf);
819 		kfree(cache_ent->caps_cache);
820 		kfree(cache_ent);
821 		return 0;
822 	}
823 
824 	cmd_p = virtio_gpu_alloc_cmd_resp
825 		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
826 		 sizeof(struct virtio_gpu_resp_capset) + max_size,
827 		 resp_buf);
828 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
829 	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
830 	cmd_p->capset_version = cpu_to_le32(version);
831 	*cache_p = cache_ent;
832 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
833 
834 	return 0;
835 }
836 
837 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
838 {
839 	struct virtio_gpu_cmd_get_edid *cmd_p;
840 	struct virtio_gpu_vbuffer *vbuf;
841 	void *resp_buf;
842 	int scanout;
843 
844 	if (WARN_ON(!vgdev->has_edid))
845 		return -EINVAL;
846 
847 	for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
848 		resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
849 				   GFP_KERNEL);
850 		if (!resp_buf)
851 			return -ENOMEM;
852 
853 		cmd_p = virtio_gpu_alloc_cmd_resp
854 			(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
855 			 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
856 			 resp_buf);
857 		cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
858 		cmd_p->scanout = cpu_to_le32(scanout);
859 		virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
860 	}
861 
862 	return 0;
863 }
864 
865 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
866 				   uint32_t nlen, const char *name)
867 {
868 	struct virtio_gpu_ctx_create *cmd_p;
869 	struct virtio_gpu_vbuffer *vbuf;
870 
871 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
872 	memset(cmd_p, 0, sizeof(*cmd_p));
873 
874 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
875 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
876 	cmd_p->nlen = cpu_to_le32(nlen);
877 	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
878 	cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
879 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
880 }
881 
882 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
883 				    uint32_t id)
884 {
885 	struct virtio_gpu_ctx_destroy *cmd_p;
886 	struct virtio_gpu_vbuffer *vbuf;
887 
888 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
889 	memset(cmd_p, 0, sizeof(*cmd_p));
890 
891 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
892 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
893 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
894 }
895 
896 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
897 					    uint32_t ctx_id,
898 					    struct virtio_gpu_object_array *objs)
899 {
900 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
901 	struct virtio_gpu_ctx_resource *cmd_p;
902 	struct virtio_gpu_vbuffer *vbuf;
903 
904 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
905 	memset(cmd_p, 0, sizeof(*cmd_p));
906 	vbuf->objs = objs;
907 
908 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
909 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
910 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
911 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
912 
913 }
914 
915 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
916 					    uint32_t ctx_id,
917 					    struct virtio_gpu_object_array *objs)
918 {
919 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
920 	struct virtio_gpu_ctx_resource *cmd_p;
921 	struct virtio_gpu_vbuffer *vbuf;
922 
923 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
924 	memset(cmd_p, 0, sizeof(*cmd_p));
925 	vbuf->objs = objs;
926 
927 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
928 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
929 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
930 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
931 }
932 
933 void
934 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
935 				  struct virtio_gpu_object *bo,
936 				  struct virtio_gpu_object_params *params,
937 				  struct virtio_gpu_object_array *objs,
938 				  struct virtio_gpu_fence *fence)
939 {
940 	struct virtio_gpu_resource_create_3d *cmd_p;
941 	struct virtio_gpu_vbuffer *vbuf;
942 
943 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
944 	memset(cmd_p, 0, sizeof(*cmd_p));
945 	vbuf->objs = objs;
946 
947 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
948 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
949 	cmd_p->format = cpu_to_le32(params->format);
950 	cmd_p->width = cpu_to_le32(params->width);
951 	cmd_p->height = cpu_to_le32(params->height);
952 
953 	cmd_p->target = cpu_to_le32(params->target);
954 	cmd_p->bind = cpu_to_le32(params->bind);
955 	cmd_p->depth = cpu_to_le32(params->depth);
956 	cmd_p->array_size = cpu_to_le32(params->array_size);
957 	cmd_p->last_level = cpu_to_le32(params->last_level);
958 	cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
959 	cmd_p->flags = cpu_to_le32(params->flags);
960 
961 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
962 	bo->created = true;
963 }
964 
965 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
966 					uint32_t ctx_id,
967 					uint64_t offset, uint32_t level,
968 					struct virtio_gpu_box *box,
969 					struct virtio_gpu_object_array *objs,
970 					struct virtio_gpu_fence *fence)
971 {
972 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
973 	struct virtio_gpu_transfer_host_3d *cmd_p;
974 	struct virtio_gpu_vbuffer *vbuf;
975 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
976 
977 	if (use_dma_api)
978 		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
979 				       bo->pages->sgl, bo->pages->nents,
980 				       DMA_TO_DEVICE);
981 
982 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
983 	memset(cmd_p, 0, sizeof(*cmd_p));
984 
985 	vbuf->objs = objs;
986 
987 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
988 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
989 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
990 	cmd_p->box = *box;
991 	cmd_p->offset = cpu_to_le64(offset);
992 	cmd_p->level = cpu_to_le32(level);
993 
994 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
995 }
996 
997 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
998 					  uint32_t ctx_id,
999 					  uint64_t offset, uint32_t level,
1000 					  struct virtio_gpu_box *box,
1001 					  struct virtio_gpu_object_array *objs,
1002 					  struct virtio_gpu_fence *fence)
1003 {
1004 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1005 	struct virtio_gpu_transfer_host_3d *cmd_p;
1006 	struct virtio_gpu_vbuffer *vbuf;
1007 
1008 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1009 	memset(cmd_p, 0, sizeof(*cmd_p));
1010 
1011 	vbuf->objs = objs;
1012 
1013 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1014 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1015 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1016 	cmd_p->box = *box;
1017 	cmd_p->offset = cpu_to_le64(offset);
1018 	cmd_p->level = cpu_to_le32(level);
1019 
1020 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1021 }
1022 
1023 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1024 			   void *data, uint32_t data_size,
1025 			   uint32_t ctx_id,
1026 			   struct virtio_gpu_object_array *objs,
1027 			   struct virtio_gpu_fence *fence)
1028 {
1029 	struct virtio_gpu_cmd_submit *cmd_p;
1030 	struct virtio_gpu_vbuffer *vbuf;
1031 
1032 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1033 	memset(cmd_p, 0, sizeof(*cmd_p));
1034 
1035 	vbuf->data_buf = data;
1036 	vbuf->data_size = data_size;
1037 	vbuf->objs = objs;
1038 
1039 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1040 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1041 	cmd_p->size = cpu_to_le32(data_size);
1042 
1043 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1044 }
1045 
1046 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1047 			     struct virtio_gpu_object *obj,
1048 			     struct virtio_gpu_fence *fence)
1049 {
1050 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1051 	struct virtio_gpu_mem_entry *ents;
1052 	struct scatterlist *sg;
1053 	int si, nents, ret;
1054 
1055 	if (WARN_ON_ONCE(!obj->created))
1056 		return -EINVAL;
1057 	if (WARN_ON_ONCE(obj->pages))
1058 		return -EINVAL;
1059 
1060 	ret = drm_gem_shmem_pin(&obj->base.base);
1061 	if (ret < 0)
1062 		return -EINVAL;
1063 
1064 	obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base);
1065 	if (obj->pages == NULL) {
1066 		drm_gem_shmem_unpin(&obj->base.base);
1067 		return -EINVAL;
1068 	}
1069 
1070 	if (use_dma_api) {
1071 		obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
1072 					 obj->pages->sgl, obj->pages->nents,
1073 					 DMA_TO_DEVICE);
1074 		nents = obj->mapped;
1075 	} else {
1076 		nents = obj->pages->nents;
1077 	}
1078 
1079 	/* gets freed when the ring has consumed it */
1080 	ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
1081 			     GFP_KERNEL);
1082 	if (!ents) {
1083 		DRM_ERROR("failed to allocate ent list\n");
1084 		return -ENOMEM;
1085 	}
1086 
1087 	for_each_sg(obj->pages->sgl, sg, nents, si) {
1088 		ents[si].addr = cpu_to_le64(use_dma_api
1089 					    ? sg_dma_address(sg)
1090 					    : sg_phys(sg));
1091 		ents[si].length = cpu_to_le32(sg->length);
1092 		ents[si].padding = 0;
1093 	}
1094 
1095 	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1096 					       ents, nents,
1097 					       fence);
1098 	return 0;
1099 }
1100 
1101 void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
1102 			      struct virtio_gpu_object *obj)
1103 {
1104 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1105 
1106 	if (WARN_ON_ONCE(!obj->pages))
1107 		return;
1108 
1109 	if (use_dma_api && obj->mapped) {
1110 		struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
1111 		/* detach backing and wait for the host process it ... */
1112 		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
1113 		dma_fence_wait(&fence->f, true);
1114 		dma_fence_put(&fence->f);
1115 
1116 		/* ... then tear down iommu mappings */
1117 		dma_unmap_sg(vgdev->vdev->dev.parent,
1118 			     obj->pages->sgl, obj->mapped,
1119 			     DMA_TO_DEVICE);
1120 		obj->mapped = 0;
1121 	} else {
1122 		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
1123 	}
1124 
1125 	sg_free_table(obj->pages);
1126 	obj->pages = NULL;
1127 
1128 	drm_gem_shmem_unpin(&obj->base.base);
1129 }
1130 
1131 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1132 			    struct virtio_gpu_output *output)
1133 {
1134 	struct virtio_gpu_vbuffer *vbuf;
1135 	struct virtio_gpu_update_cursor *cur_p;
1136 
1137 	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1138 	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1139 	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1140 	virtio_gpu_queue_cursor(vgdev, vbuf);
1141 }
1142