1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie <airlied@redhat.com>
7  *    Gerd Hoffmann <kraxel@redhat.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26  * OTHER DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
33 
34 #include "virtgpu_drv.h"
35 #include "virtgpu_trace.h"
36 
37 #define MAX_INLINE_CMD_SIZE   96
38 #define MAX_INLINE_RESP_SIZE  24
39 #define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
40 			       + MAX_INLINE_CMD_SIZE		 \
41 			       + MAX_INLINE_RESP_SIZE)
42 
43 static void convert_to_hw_box(struct virtio_gpu_box *dst,
44 			      const struct drm_virtgpu_3d_box *src)
45 {
46 	dst->x = cpu_to_le32(src->x);
47 	dst->y = cpu_to_le32(src->y);
48 	dst->z = cpu_to_le32(src->z);
49 	dst->w = cpu_to_le32(src->w);
50 	dst->h = cpu_to_le32(src->h);
51 	dst->d = cpu_to_le32(src->d);
52 }
53 
54 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
55 {
56 	struct drm_device *dev = vq->vdev->priv;
57 	struct virtio_gpu_device *vgdev = dev->dev_private;
58 
59 	schedule_work(&vgdev->ctrlq.dequeue_work);
60 }
61 
62 void virtio_gpu_cursor_ack(struct virtqueue *vq)
63 {
64 	struct drm_device *dev = vq->vdev->priv;
65 	struct virtio_gpu_device *vgdev = dev->dev_private;
66 
67 	schedule_work(&vgdev->cursorq.dequeue_work);
68 }
69 
70 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
71 {
72 	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
73 					 VBUFFER_SIZE,
74 					 __alignof__(struct virtio_gpu_vbuffer),
75 					 0, NULL);
76 	if (!vgdev->vbufs)
77 		return -ENOMEM;
78 	return 0;
79 }
80 
81 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
82 {
83 	kmem_cache_destroy(vgdev->vbufs);
84 	vgdev->vbufs = NULL;
85 }
86 
87 static struct virtio_gpu_vbuffer*
88 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
89 		    int size, int resp_size, void *resp_buf,
90 		    virtio_gpu_resp_cb resp_cb)
91 {
92 	struct virtio_gpu_vbuffer *vbuf;
93 
94 	vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
95 	if (!vbuf)
96 		return ERR_PTR(-ENOMEM);
97 
98 	BUG_ON(size > MAX_INLINE_CMD_SIZE ||
99 	       size < sizeof(struct virtio_gpu_ctrl_hdr));
100 	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
101 	vbuf->size = size;
102 
103 	vbuf->resp_cb = resp_cb;
104 	vbuf->resp_size = resp_size;
105 	if (resp_size <= MAX_INLINE_RESP_SIZE)
106 		vbuf->resp_buf = (void *)vbuf->buf + size;
107 	else
108 		vbuf->resp_buf = resp_buf;
109 	BUG_ON(!vbuf->resp_buf);
110 	return vbuf;
111 }
112 
113 static struct virtio_gpu_ctrl_hdr *
114 virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
115 {
116 	/* this assumes a vbuf contains a command that starts with a
117 	 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
118 	 * virtqueues.
119 	 */
120 	return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
121 }
122 
123 static struct virtio_gpu_update_cursor*
124 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
125 			struct virtio_gpu_vbuffer **vbuffer_p)
126 {
127 	struct virtio_gpu_vbuffer *vbuf;
128 
129 	vbuf = virtio_gpu_get_vbuf
130 		(vgdev, sizeof(struct virtio_gpu_update_cursor),
131 		 0, NULL, NULL);
132 	if (IS_ERR(vbuf)) {
133 		*vbuffer_p = NULL;
134 		return ERR_CAST(vbuf);
135 	}
136 	*vbuffer_p = vbuf;
137 	return (struct virtio_gpu_update_cursor *)vbuf->buf;
138 }
139 
140 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
141 				       virtio_gpu_resp_cb cb,
142 				       struct virtio_gpu_vbuffer **vbuffer_p,
143 				       int cmd_size, int resp_size,
144 				       void *resp_buf)
145 {
146 	struct virtio_gpu_vbuffer *vbuf;
147 
148 	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
149 				   resp_size, resp_buf, cb);
150 	if (IS_ERR(vbuf)) {
151 		*vbuffer_p = NULL;
152 		return ERR_CAST(vbuf);
153 	}
154 	*vbuffer_p = vbuf;
155 	return (struct virtio_gpu_command *)vbuf->buf;
156 }
157 
158 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
159 				  struct virtio_gpu_vbuffer **vbuffer_p,
160 				  int size)
161 {
162 	return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
163 					 sizeof(struct virtio_gpu_ctrl_hdr),
164 					 NULL);
165 }
166 
167 static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
168 				     struct virtio_gpu_vbuffer **vbuffer_p,
169 				     int size,
170 				     virtio_gpu_resp_cb cb)
171 {
172 	return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
173 					 sizeof(struct virtio_gpu_ctrl_hdr),
174 					 NULL);
175 }
176 
177 static void free_vbuf(struct virtio_gpu_device *vgdev,
178 		      struct virtio_gpu_vbuffer *vbuf)
179 {
180 	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
181 		kfree(vbuf->resp_buf);
182 	kvfree(vbuf->data_buf);
183 	kmem_cache_free(vgdev->vbufs, vbuf);
184 }
185 
186 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
187 {
188 	struct virtio_gpu_vbuffer *vbuf;
189 	unsigned int len;
190 	int freed = 0;
191 
192 	while ((vbuf = virtqueue_get_buf(vq, &len))) {
193 		list_add_tail(&vbuf->list, reclaim_list);
194 		freed++;
195 	}
196 	if (freed == 0)
197 		DRM_DEBUG("Huh? zero vbufs reclaimed");
198 }
199 
200 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
201 {
202 	struct virtio_gpu_device *vgdev =
203 		container_of(work, struct virtio_gpu_device,
204 			     ctrlq.dequeue_work);
205 	struct list_head reclaim_list;
206 	struct virtio_gpu_vbuffer *entry, *tmp;
207 	struct virtio_gpu_ctrl_hdr *resp;
208 	u64 fence_id = 0;
209 
210 	INIT_LIST_HEAD(&reclaim_list);
211 	spin_lock(&vgdev->ctrlq.qlock);
212 	do {
213 		virtqueue_disable_cb(vgdev->ctrlq.vq);
214 		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
215 
216 	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
217 	spin_unlock(&vgdev->ctrlq.qlock);
218 
219 	list_for_each_entry(entry, &reclaim_list, list) {
220 		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
221 
222 		trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
223 
224 		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
225 			if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
226 				struct virtio_gpu_ctrl_hdr *cmd;
227 				cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
228 				DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
229 						      le32_to_cpu(resp->type),
230 						      le32_to_cpu(cmd->type));
231 			} else
232 				DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
233 		}
234 		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
235 			u64 f = le64_to_cpu(resp->fence_id);
236 
237 			if (fence_id > f) {
238 				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
239 					  __func__, fence_id, f);
240 			} else {
241 				fence_id = f;
242 			}
243 		}
244 		if (entry->resp_cb)
245 			entry->resp_cb(vgdev, entry);
246 	}
247 	wake_up(&vgdev->ctrlq.ack_queue);
248 
249 	if (fence_id)
250 		virtio_gpu_fence_event_process(vgdev, fence_id);
251 
252 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
253 		if (entry->objs)
254 			virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
255 		list_del(&entry->list);
256 		free_vbuf(vgdev, entry);
257 	}
258 }
259 
260 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
261 {
262 	struct virtio_gpu_device *vgdev =
263 		container_of(work, struct virtio_gpu_device,
264 			     cursorq.dequeue_work);
265 	struct list_head reclaim_list;
266 	struct virtio_gpu_vbuffer *entry, *tmp;
267 
268 	INIT_LIST_HEAD(&reclaim_list);
269 	spin_lock(&vgdev->cursorq.qlock);
270 	do {
271 		virtqueue_disable_cb(vgdev->cursorq.vq);
272 		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
273 	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
274 	spin_unlock(&vgdev->cursorq.qlock);
275 
276 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
277 		list_del(&entry->list);
278 		free_vbuf(vgdev, entry);
279 	}
280 	wake_up(&vgdev->cursorq.ack_queue);
281 }
282 
283 /* Create sg_table from a vmalloc'd buffer. */
284 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
285 {
286 	int ret, s, i;
287 	struct sg_table *sgt;
288 	struct scatterlist *sg;
289 	struct page *pg;
290 
291 	if (WARN_ON(!PAGE_ALIGNED(data)))
292 		return NULL;
293 
294 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
295 	if (!sgt)
296 		return NULL;
297 
298 	*sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
299 	ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
300 	if (ret) {
301 		kfree(sgt);
302 		return NULL;
303 	}
304 
305 	for_each_sg(sgt->sgl, sg, *sg_ents, i) {
306 		pg = vmalloc_to_page(data);
307 		if (!pg) {
308 			sg_free_table(sgt);
309 			kfree(sgt);
310 			return NULL;
311 		}
312 
313 		s = min_t(int, PAGE_SIZE, size);
314 		sg_set_page(sg, pg, s, 0);
315 
316 		size -= s;
317 		data += s;
318 	}
319 
320 	return sgt;
321 }
322 
323 static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
324 				     struct virtio_gpu_vbuffer *vbuf,
325 				     struct virtio_gpu_fence *fence,
326 				     int elemcnt,
327 				     struct scatterlist **sgs,
328 				     int outcnt,
329 				     int incnt)
330 {
331 	struct virtqueue *vq = vgdev->ctrlq.vq;
332 	int ret, idx;
333 
334 	if (!drm_dev_enter(vgdev->ddev, &idx)) {
335 		if (fence && vbuf->objs)
336 			virtio_gpu_array_unlock_resv(vbuf->objs);
337 		free_vbuf(vgdev, vbuf);
338 		return -1;
339 	}
340 
341 	if (vgdev->has_indirect)
342 		elemcnt = 1;
343 
344 again:
345 	spin_lock(&vgdev->ctrlq.qlock);
346 
347 	if (vq->num_free < elemcnt) {
348 		spin_unlock(&vgdev->ctrlq.qlock);
349 		virtio_gpu_notify(vgdev);
350 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
351 		goto again;
352 	}
353 
354 	/* now that the position of the vbuf in the virtqueue is known, we can
355 	 * finally set the fence id
356 	 */
357 	if (fence) {
358 		virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
359 				      fence);
360 		if (vbuf->objs) {
361 			virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
362 			virtio_gpu_array_unlock_resv(vbuf->objs);
363 		}
364 	}
365 
366 	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
367 	WARN_ON(ret);
368 
369 	trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
370 
371 	atomic_inc(&vgdev->pending_commands);
372 
373 	spin_unlock(&vgdev->ctrlq.qlock);
374 
375 	drm_dev_exit(idx);
376 	return 0;
377 }
378 
379 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
380 					       struct virtio_gpu_vbuffer *vbuf,
381 					       struct virtio_gpu_fence *fence)
382 {
383 	struct scatterlist *sgs[3], vcmd, vout, vresp;
384 	struct sg_table *sgt = NULL;
385 	int elemcnt = 0, outcnt = 0, incnt = 0, ret;
386 
387 	/* set up vcmd */
388 	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
389 	elemcnt++;
390 	sgs[outcnt] = &vcmd;
391 	outcnt++;
392 
393 	/* set up vout */
394 	if (vbuf->data_size) {
395 		if (is_vmalloc_addr(vbuf->data_buf)) {
396 			int sg_ents;
397 			sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
398 					     &sg_ents);
399 			if (!sgt) {
400 				if (fence && vbuf->objs)
401 					virtio_gpu_array_unlock_resv(vbuf->objs);
402 				return -1;
403 			}
404 
405 			elemcnt += sg_ents;
406 			sgs[outcnt] = sgt->sgl;
407 		} else {
408 			sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
409 			elemcnt++;
410 			sgs[outcnt] = &vout;
411 		}
412 		outcnt++;
413 	}
414 
415 	/* set up vresp */
416 	if (vbuf->resp_size) {
417 		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
418 		elemcnt++;
419 		sgs[outcnt + incnt] = &vresp;
420 		incnt++;
421 	}
422 
423 	ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
424 					incnt);
425 
426 	if (sgt) {
427 		sg_free_table(sgt);
428 		kfree(sgt);
429 	}
430 	return ret;
431 }
432 
433 void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
434 {
435 	bool notify;
436 
437 	if (!atomic_read(&vgdev->pending_commands))
438 		return;
439 
440 	spin_lock(&vgdev->ctrlq.qlock);
441 	atomic_set(&vgdev->pending_commands, 0);
442 	notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
443 	spin_unlock(&vgdev->ctrlq.qlock);
444 
445 	if (notify)
446 		virtqueue_notify(vgdev->ctrlq.vq);
447 }
448 
449 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
450 					struct virtio_gpu_vbuffer *vbuf)
451 {
452 	return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
453 }
454 
455 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
456 				    struct virtio_gpu_vbuffer *vbuf)
457 {
458 	struct virtqueue *vq = vgdev->cursorq.vq;
459 	struct scatterlist *sgs[1], ccmd;
460 	int idx, ret, outcnt;
461 	bool notify;
462 
463 	if (!drm_dev_enter(vgdev->ddev, &idx)) {
464 		free_vbuf(vgdev, vbuf);
465 		return;
466 	}
467 
468 	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
469 	sgs[0] = &ccmd;
470 	outcnt = 1;
471 
472 	spin_lock(&vgdev->cursorq.qlock);
473 retry:
474 	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
475 	if (ret == -ENOSPC) {
476 		spin_unlock(&vgdev->cursorq.qlock);
477 		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
478 		spin_lock(&vgdev->cursorq.qlock);
479 		goto retry;
480 	} else {
481 		trace_virtio_gpu_cmd_queue(vq,
482 			virtio_gpu_vbuf_ctrl_hdr(vbuf));
483 
484 		notify = virtqueue_kick_prepare(vq);
485 	}
486 
487 	spin_unlock(&vgdev->cursorq.qlock);
488 
489 	if (notify)
490 		virtqueue_notify(vq);
491 
492 	drm_dev_exit(idx);
493 }
494 
495 /* just create gem objects for userspace and long lived objects,
496  * just use dma_alloced pages for the queue objects?
497  */
498 
499 /* create a basic resource */
500 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
501 				    struct virtio_gpu_object *bo,
502 				    struct virtio_gpu_object_params *params,
503 				    struct virtio_gpu_object_array *objs,
504 				    struct virtio_gpu_fence *fence)
505 {
506 	struct virtio_gpu_resource_create_2d *cmd_p;
507 	struct virtio_gpu_vbuffer *vbuf;
508 
509 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
510 	memset(cmd_p, 0, sizeof(*cmd_p));
511 	vbuf->objs = objs;
512 
513 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
514 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
515 	cmd_p->format = cpu_to_le32(params->format);
516 	cmd_p->width = cpu_to_le32(params->width);
517 	cmd_p->height = cpu_to_le32(params->height);
518 
519 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
520 	bo->created = true;
521 }
522 
523 static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
524 				    struct virtio_gpu_vbuffer *vbuf)
525 {
526 	struct virtio_gpu_object *bo;
527 
528 	bo = vbuf->resp_cb_data;
529 	vbuf->resp_cb_data = NULL;
530 
531 	virtio_gpu_cleanup_object(bo);
532 }
533 
534 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
535 				   struct virtio_gpu_object *bo)
536 {
537 	struct virtio_gpu_resource_unref *cmd_p;
538 	struct virtio_gpu_vbuffer *vbuf;
539 	int ret;
540 
541 	cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
542 					virtio_gpu_cmd_unref_cb);
543 	memset(cmd_p, 0, sizeof(*cmd_p));
544 
545 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
546 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
547 
548 	vbuf->resp_cb_data = bo;
549 	ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
550 	if (ret < 0)
551 		virtio_gpu_cleanup_object(bo);
552 }
553 
554 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
555 				uint32_t scanout_id, uint32_t resource_id,
556 				uint32_t width, uint32_t height,
557 				uint32_t x, uint32_t y)
558 {
559 	struct virtio_gpu_set_scanout *cmd_p;
560 	struct virtio_gpu_vbuffer *vbuf;
561 
562 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
563 	memset(cmd_p, 0, sizeof(*cmd_p));
564 
565 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
566 	cmd_p->resource_id = cpu_to_le32(resource_id);
567 	cmd_p->scanout_id = cpu_to_le32(scanout_id);
568 	cmd_p->r.width = cpu_to_le32(width);
569 	cmd_p->r.height = cpu_to_le32(height);
570 	cmd_p->r.x = cpu_to_le32(x);
571 	cmd_p->r.y = cpu_to_le32(y);
572 
573 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
574 }
575 
576 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
577 				   uint32_t resource_id,
578 				   uint32_t x, uint32_t y,
579 				   uint32_t width, uint32_t height)
580 {
581 	struct virtio_gpu_resource_flush *cmd_p;
582 	struct virtio_gpu_vbuffer *vbuf;
583 
584 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
585 	memset(cmd_p, 0, sizeof(*cmd_p));
586 
587 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
588 	cmd_p->resource_id = cpu_to_le32(resource_id);
589 	cmd_p->r.width = cpu_to_le32(width);
590 	cmd_p->r.height = cpu_to_le32(height);
591 	cmd_p->r.x = cpu_to_le32(x);
592 	cmd_p->r.y = cpu_to_le32(y);
593 
594 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
595 }
596 
597 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
598 					uint64_t offset,
599 					uint32_t width, uint32_t height,
600 					uint32_t x, uint32_t y,
601 					struct virtio_gpu_object_array *objs,
602 					struct virtio_gpu_fence *fence)
603 {
604 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
605 	struct virtio_gpu_transfer_to_host_2d *cmd_p;
606 	struct virtio_gpu_vbuffer *vbuf;
607 	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
608 	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
609 
610 	if (use_dma_api)
611 		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
612 				       shmem->pages->sgl, shmem->pages->nents,
613 				       DMA_TO_DEVICE);
614 
615 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
616 	memset(cmd_p, 0, sizeof(*cmd_p));
617 	vbuf->objs = objs;
618 
619 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
620 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
621 	cmd_p->offset = cpu_to_le64(offset);
622 	cmd_p->r.width = cpu_to_le32(width);
623 	cmd_p->r.height = cpu_to_le32(height);
624 	cmd_p->r.x = cpu_to_le32(x);
625 	cmd_p->r.y = cpu_to_le32(y);
626 
627 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
628 }
629 
630 static void
631 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
632 				       uint32_t resource_id,
633 				       struct virtio_gpu_mem_entry *ents,
634 				       uint32_t nents,
635 				       struct virtio_gpu_fence *fence)
636 {
637 	struct virtio_gpu_resource_attach_backing *cmd_p;
638 	struct virtio_gpu_vbuffer *vbuf;
639 
640 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
641 	memset(cmd_p, 0, sizeof(*cmd_p));
642 
643 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
644 	cmd_p->resource_id = cpu_to_le32(resource_id);
645 	cmd_p->nr_entries = cpu_to_le32(nents);
646 
647 	vbuf->data_buf = ents;
648 	vbuf->data_size = sizeof(*ents) * nents;
649 
650 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
651 }
652 
653 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
654 					       struct virtio_gpu_vbuffer *vbuf)
655 {
656 	struct virtio_gpu_resp_display_info *resp =
657 		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
658 	int i;
659 
660 	spin_lock(&vgdev->display_info_lock);
661 	for (i = 0; i < vgdev->num_scanouts; i++) {
662 		vgdev->outputs[i].info = resp->pmodes[i];
663 		if (resp->pmodes[i].enabled) {
664 			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
665 				  le32_to_cpu(resp->pmodes[i].r.width),
666 				  le32_to_cpu(resp->pmodes[i].r.height),
667 				  le32_to_cpu(resp->pmodes[i].r.x),
668 				  le32_to_cpu(resp->pmodes[i].r.y));
669 		} else {
670 			DRM_DEBUG("output %d: disabled", i);
671 		}
672 	}
673 
674 	vgdev->display_info_pending = false;
675 	spin_unlock(&vgdev->display_info_lock);
676 	wake_up(&vgdev->resp_wq);
677 
678 	if (!drm_helper_hpd_irq_event(vgdev->ddev))
679 		drm_kms_helper_hotplug_event(vgdev->ddev);
680 }
681 
682 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
683 					      struct virtio_gpu_vbuffer *vbuf)
684 {
685 	struct virtio_gpu_get_capset_info *cmd =
686 		(struct virtio_gpu_get_capset_info *)vbuf->buf;
687 	struct virtio_gpu_resp_capset_info *resp =
688 		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
689 	int i = le32_to_cpu(cmd->capset_index);
690 
691 	spin_lock(&vgdev->display_info_lock);
692 	if (vgdev->capsets) {
693 		vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
694 		vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
695 		vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
696 	} else {
697 		DRM_ERROR("invalid capset memory.");
698 	}
699 	spin_unlock(&vgdev->display_info_lock);
700 	wake_up(&vgdev->resp_wq);
701 }
702 
703 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
704 				     struct virtio_gpu_vbuffer *vbuf)
705 {
706 	struct virtio_gpu_get_capset *cmd =
707 		(struct virtio_gpu_get_capset *)vbuf->buf;
708 	struct virtio_gpu_resp_capset *resp =
709 		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
710 	struct virtio_gpu_drv_cap_cache *cache_ent;
711 
712 	spin_lock(&vgdev->display_info_lock);
713 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
714 		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
715 		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
716 			memcpy(cache_ent->caps_cache, resp->capset_data,
717 			       cache_ent->size);
718 			/* Copy must occur before is_valid is signalled. */
719 			smp_wmb();
720 			atomic_set(&cache_ent->is_valid, 1);
721 			break;
722 		}
723 	}
724 	spin_unlock(&vgdev->display_info_lock);
725 	wake_up_all(&vgdev->resp_wq);
726 }
727 
728 static int virtio_get_edid_block(void *data, u8 *buf,
729 				 unsigned int block, size_t len)
730 {
731 	struct virtio_gpu_resp_edid *resp = data;
732 	size_t start = block * EDID_LENGTH;
733 
734 	if (start + len > le32_to_cpu(resp->size))
735 		return -1;
736 	memcpy(buf, resp->edid + start, len);
737 	return 0;
738 }
739 
740 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
741 				       struct virtio_gpu_vbuffer *vbuf)
742 {
743 	struct virtio_gpu_cmd_get_edid *cmd =
744 		(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
745 	struct virtio_gpu_resp_edid *resp =
746 		(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
747 	uint32_t scanout = le32_to_cpu(cmd->scanout);
748 	struct virtio_gpu_output *output;
749 	struct edid *new_edid, *old_edid;
750 
751 	if (scanout >= vgdev->num_scanouts)
752 		return;
753 	output = vgdev->outputs + scanout;
754 
755 	new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
756 	drm_connector_update_edid_property(&output->conn, new_edid);
757 
758 	spin_lock(&vgdev->display_info_lock);
759 	old_edid = output->edid;
760 	output->edid = new_edid;
761 	spin_unlock(&vgdev->display_info_lock);
762 
763 	kfree(old_edid);
764 	wake_up(&vgdev->resp_wq);
765 }
766 
767 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
768 {
769 	struct virtio_gpu_ctrl_hdr *cmd_p;
770 	struct virtio_gpu_vbuffer *vbuf;
771 	void *resp_buf;
772 
773 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
774 			   GFP_KERNEL);
775 	if (!resp_buf)
776 		return -ENOMEM;
777 
778 	cmd_p = virtio_gpu_alloc_cmd_resp
779 		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
780 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
781 		 resp_buf);
782 	memset(cmd_p, 0, sizeof(*cmd_p));
783 
784 	vgdev->display_info_pending = true;
785 	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
786 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
787 	return 0;
788 }
789 
790 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
791 {
792 	struct virtio_gpu_get_capset_info *cmd_p;
793 	struct virtio_gpu_vbuffer *vbuf;
794 	void *resp_buf;
795 
796 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
797 			   GFP_KERNEL);
798 	if (!resp_buf)
799 		return -ENOMEM;
800 
801 	cmd_p = virtio_gpu_alloc_cmd_resp
802 		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
803 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
804 		 resp_buf);
805 	memset(cmd_p, 0, sizeof(*cmd_p));
806 
807 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
808 	cmd_p->capset_index = cpu_to_le32(idx);
809 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
810 	return 0;
811 }
812 
813 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
814 			      int idx, int version,
815 			      struct virtio_gpu_drv_cap_cache **cache_p)
816 {
817 	struct virtio_gpu_get_capset *cmd_p;
818 	struct virtio_gpu_vbuffer *vbuf;
819 	int max_size;
820 	struct virtio_gpu_drv_cap_cache *cache_ent;
821 	struct virtio_gpu_drv_cap_cache *search_ent;
822 	void *resp_buf;
823 
824 	*cache_p = NULL;
825 
826 	if (idx >= vgdev->num_capsets)
827 		return -EINVAL;
828 
829 	if (version > vgdev->capsets[idx].max_version)
830 		return -EINVAL;
831 
832 	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
833 	if (!cache_ent)
834 		return -ENOMEM;
835 
836 	max_size = vgdev->capsets[idx].max_size;
837 	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
838 	if (!cache_ent->caps_cache) {
839 		kfree(cache_ent);
840 		return -ENOMEM;
841 	}
842 
843 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
844 			   GFP_KERNEL);
845 	if (!resp_buf) {
846 		kfree(cache_ent->caps_cache);
847 		kfree(cache_ent);
848 		return -ENOMEM;
849 	}
850 
851 	cache_ent->version = version;
852 	cache_ent->id = vgdev->capsets[idx].id;
853 	atomic_set(&cache_ent->is_valid, 0);
854 	cache_ent->size = max_size;
855 	spin_lock(&vgdev->display_info_lock);
856 	/* Search while under lock in case it was added by another task. */
857 	list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
858 		if (search_ent->id == vgdev->capsets[idx].id &&
859 		    search_ent->version == version) {
860 			*cache_p = search_ent;
861 			break;
862 		}
863 	}
864 	if (!*cache_p)
865 		list_add_tail(&cache_ent->head, &vgdev->cap_cache);
866 	spin_unlock(&vgdev->display_info_lock);
867 
868 	if (*cache_p) {
869 		/* Entry was found, so free everything that was just created. */
870 		kfree(resp_buf);
871 		kfree(cache_ent->caps_cache);
872 		kfree(cache_ent);
873 		return 0;
874 	}
875 
876 	cmd_p = virtio_gpu_alloc_cmd_resp
877 		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
878 		 sizeof(struct virtio_gpu_resp_capset) + max_size,
879 		 resp_buf);
880 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
881 	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
882 	cmd_p->capset_version = cpu_to_le32(version);
883 	*cache_p = cache_ent;
884 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
885 
886 	return 0;
887 }
888 
889 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
890 {
891 	struct virtio_gpu_cmd_get_edid *cmd_p;
892 	struct virtio_gpu_vbuffer *vbuf;
893 	void *resp_buf;
894 	int scanout;
895 
896 	if (WARN_ON(!vgdev->has_edid))
897 		return -EINVAL;
898 
899 	for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
900 		resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
901 				   GFP_KERNEL);
902 		if (!resp_buf)
903 			return -ENOMEM;
904 
905 		cmd_p = virtio_gpu_alloc_cmd_resp
906 			(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
907 			 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
908 			 resp_buf);
909 		cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
910 		cmd_p->scanout = cpu_to_le32(scanout);
911 		virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
912 	}
913 
914 	return 0;
915 }
916 
917 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
918 				   uint32_t nlen, const char *name)
919 {
920 	struct virtio_gpu_ctx_create *cmd_p;
921 	struct virtio_gpu_vbuffer *vbuf;
922 
923 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
924 	memset(cmd_p, 0, sizeof(*cmd_p));
925 
926 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
927 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
928 	cmd_p->nlen = cpu_to_le32(nlen);
929 	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
930 	cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
931 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
932 }
933 
934 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
935 				    uint32_t id)
936 {
937 	struct virtio_gpu_ctx_destroy *cmd_p;
938 	struct virtio_gpu_vbuffer *vbuf;
939 
940 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
941 	memset(cmd_p, 0, sizeof(*cmd_p));
942 
943 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
944 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
945 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
946 }
947 
948 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
949 					    uint32_t ctx_id,
950 					    struct virtio_gpu_object_array *objs)
951 {
952 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
953 	struct virtio_gpu_ctx_resource *cmd_p;
954 	struct virtio_gpu_vbuffer *vbuf;
955 
956 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
957 	memset(cmd_p, 0, sizeof(*cmd_p));
958 	vbuf->objs = objs;
959 
960 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
961 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
962 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
963 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
964 }
965 
966 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
967 					    uint32_t ctx_id,
968 					    struct virtio_gpu_object_array *objs)
969 {
970 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
971 	struct virtio_gpu_ctx_resource *cmd_p;
972 	struct virtio_gpu_vbuffer *vbuf;
973 
974 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
975 	memset(cmd_p, 0, sizeof(*cmd_p));
976 	vbuf->objs = objs;
977 
978 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
979 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
980 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
981 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
982 }
983 
984 void
985 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
986 				  struct virtio_gpu_object *bo,
987 				  struct virtio_gpu_object_params *params,
988 				  struct virtio_gpu_object_array *objs,
989 				  struct virtio_gpu_fence *fence)
990 {
991 	struct virtio_gpu_resource_create_3d *cmd_p;
992 	struct virtio_gpu_vbuffer *vbuf;
993 
994 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
995 	memset(cmd_p, 0, sizeof(*cmd_p));
996 	vbuf->objs = objs;
997 
998 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
999 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1000 	cmd_p->format = cpu_to_le32(params->format);
1001 	cmd_p->width = cpu_to_le32(params->width);
1002 	cmd_p->height = cpu_to_le32(params->height);
1003 
1004 	cmd_p->target = cpu_to_le32(params->target);
1005 	cmd_p->bind = cpu_to_le32(params->bind);
1006 	cmd_p->depth = cpu_to_le32(params->depth);
1007 	cmd_p->array_size = cpu_to_le32(params->array_size);
1008 	cmd_p->last_level = cpu_to_le32(params->last_level);
1009 	cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1010 	cmd_p->flags = cpu_to_le32(params->flags);
1011 
1012 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1013 
1014 	bo->created = true;
1015 }
1016 
1017 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1018 					uint32_t ctx_id,
1019 					uint64_t offset, uint32_t level,
1020 					struct drm_virtgpu_3d_box *box,
1021 					struct virtio_gpu_object_array *objs,
1022 					struct virtio_gpu_fence *fence)
1023 {
1024 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1025 	struct virtio_gpu_transfer_host_3d *cmd_p;
1026 	struct virtio_gpu_vbuffer *vbuf;
1027 	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1028 	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
1029 
1030 	if (use_dma_api)
1031 		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
1032 				       shmem->pages->sgl, shmem->pages->nents,
1033 				       DMA_TO_DEVICE);
1034 
1035 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1036 	memset(cmd_p, 0, sizeof(*cmd_p));
1037 
1038 	vbuf->objs = objs;
1039 
1040 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1041 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1042 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1043 	convert_to_hw_box(&cmd_p->box, box);
1044 	cmd_p->offset = cpu_to_le64(offset);
1045 	cmd_p->level = cpu_to_le32(level);
1046 
1047 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1048 }
1049 
1050 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1051 					  uint32_t ctx_id,
1052 					  uint64_t offset, uint32_t level,
1053 					  struct drm_virtgpu_3d_box *box,
1054 					  struct virtio_gpu_object_array *objs,
1055 					  struct virtio_gpu_fence *fence)
1056 {
1057 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1058 	struct virtio_gpu_transfer_host_3d *cmd_p;
1059 	struct virtio_gpu_vbuffer *vbuf;
1060 
1061 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1062 	memset(cmd_p, 0, sizeof(*cmd_p));
1063 
1064 	vbuf->objs = objs;
1065 
1066 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1067 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1068 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1069 	convert_to_hw_box(&cmd_p->box, box);
1070 	cmd_p->offset = cpu_to_le64(offset);
1071 	cmd_p->level = cpu_to_le32(level);
1072 
1073 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1074 }
1075 
1076 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1077 			   void *data, uint32_t data_size,
1078 			   uint32_t ctx_id,
1079 			   struct virtio_gpu_object_array *objs,
1080 			   struct virtio_gpu_fence *fence)
1081 {
1082 	struct virtio_gpu_cmd_submit *cmd_p;
1083 	struct virtio_gpu_vbuffer *vbuf;
1084 
1085 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1086 	memset(cmd_p, 0, sizeof(*cmd_p));
1087 
1088 	vbuf->data_buf = data;
1089 	vbuf->data_size = data_size;
1090 	vbuf->objs = objs;
1091 
1092 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1093 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1094 	cmd_p->size = cpu_to_le32(data_size);
1095 
1096 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1097 }
1098 
1099 void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1100 			      struct virtio_gpu_object *obj,
1101 			      struct virtio_gpu_mem_entry *ents,
1102 			      unsigned int nents)
1103 {
1104 	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1105 					       ents, nents, NULL);
1106 }
1107 
1108 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1109 			    struct virtio_gpu_output *output)
1110 {
1111 	struct virtio_gpu_vbuffer *vbuf;
1112 	struct virtio_gpu_update_cursor *cur_p;
1113 
1114 	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1115 	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1116 	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1117 	virtio_gpu_queue_cursor(vgdev, vbuf);
1118 }
1119 
1120 static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1121 					    struct virtio_gpu_vbuffer *vbuf)
1122 {
1123 	struct virtio_gpu_object *obj =
1124 		gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1125 	struct virtio_gpu_resp_resource_uuid *resp =
1126 		(struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
1127 	uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1128 
1129 	spin_lock(&vgdev->resource_export_lock);
1130 	WARN_ON(obj->uuid_state != STATE_INITIALIZING);
1131 
1132 	if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
1133 	    obj->uuid_state == STATE_INITIALIZING) {
1134 		memcpy(&obj->uuid.b, resp->uuid, sizeof(obj->uuid.b));
1135 		obj->uuid_state = STATE_OK;
1136 	} else {
1137 		obj->uuid_state = STATE_ERR;
1138 	}
1139 	spin_unlock(&vgdev->resource_export_lock);
1140 
1141 	wake_up_all(&vgdev->resp_wq);
1142 }
1143 
1144 int
1145 virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1146 				    struct virtio_gpu_object_array *objs)
1147 {
1148 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1149 	struct virtio_gpu_resource_assign_uuid *cmd_p;
1150 	struct virtio_gpu_vbuffer *vbuf;
1151 	struct virtio_gpu_resp_resource_uuid *resp_buf;
1152 
1153 	resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1154 	if (!resp_buf) {
1155 		spin_lock(&vgdev->resource_export_lock);
1156 		bo->uuid_state = STATE_ERR;
1157 		spin_unlock(&vgdev->resource_export_lock);
1158 		virtio_gpu_array_put_free(objs);
1159 		return -ENOMEM;
1160 	}
1161 
1162 	cmd_p = virtio_gpu_alloc_cmd_resp
1163 		(vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1164 		 sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
1165 	memset(cmd_p, 0, sizeof(*cmd_p));
1166 
1167 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
1168 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1169 
1170 	vbuf->objs = objs;
1171 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1172 	return 0;
1173 }
1174