1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie <airlied@redhat.com>
7  *    Gerd Hoffmann <kraxel@redhat.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26  * OTHER DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
33 
34 #include <drm/drm_edid.h>
35 
36 #include "virtgpu_drv.h"
37 #include "virtgpu_trace.h"
38 
39 #define MAX_INLINE_CMD_SIZE   96
40 #define MAX_INLINE_RESP_SIZE  24
41 #define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
42 			       + MAX_INLINE_CMD_SIZE		 \
43 			       + MAX_INLINE_RESP_SIZE)
44 
45 static void convert_to_hw_box(struct virtio_gpu_box *dst,
46 			      const struct drm_virtgpu_3d_box *src)
47 {
48 	dst->x = cpu_to_le32(src->x);
49 	dst->y = cpu_to_le32(src->y);
50 	dst->z = cpu_to_le32(src->z);
51 	dst->w = cpu_to_le32(src->w);
52 	dst->h = cpu_to_le32(src->h);
53 	dst->d = cpu_to_le32(src->d);
54 }
55 
56 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
57 {
58 	struct drm_device *dev = vq->vdev->priv;
59 	struct virtio_gpu_device *vgdev = dev->dev_private;
60 
61 	schedule_work(&vgdev->ctrlq.dequeue_work);
62 }
63 
64 void virtio_gpu_cursor_ack(struct virtqueue *vq)
65 {
66 	struct drm_device *dev = vq->vdev->priv;
67 	struct virtio_gpu_device *vgdev = dev->dev_private;
68 
69 	schedule_work(&vgdev->cursorq.dequeue_work);
70 }
71 
72 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
73 {
74 	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
75 					 VBUFFER_SIZE,
76 					 __alignof__(struct virtio_gpu_vbuffer),
77 					 0, NULL);
78 	if (!vgdev->vbufs)
79 		return -ENOMEM;
80 	return 0;
81 }
82 
83 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
84 {
85 	kmem_cache_destroy(vgdev->vbufs);
86 	vgdev->vbufs = NULL;
87 }
88 
89 static struct virtio_gpu_vbuffer*
90 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
91 		    int size, int resp_size, void *resp_buf,
92 		    virtio_gpu_resp_cb resp_cb)
93 {
94 	struct virtio_gpu_vbuffer *vbuf;
95 
96 	vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
97 
98 	BUG_ON(size > MAX_INLINE_CMD_SIZE ||
99 	       size < sizeof(struct virtio_gpu_ctrl_hdr));
100 	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
101 	vbuf->size = size;
102 
103 	vbuf->resp_cb = resp_cb;
104 	vbuf->resp_size = resp_size;
105 	if (resp_size <= MAX_INLINE_RESP_SIZE)
106 		vbuf->resp_buf = (void *)vbuf->buf + size;
107 	else
108 		vbuf->resp_buf = resp_buf;
109 	BUG_ON(!vbuf->resp_buf);
110 	return vbuf;
111 }
112 
113 static struct virtio_gpu_ctrl_hdr *
114 virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
115 {
116 	/* this assumes a vbuf contains a command that starts with a
117 	 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
118 	 * virtqueues.
119 	 */
120 	return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
121 }
122 
123 static struct virtio_gpu_update_cursor*
124 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
125 			struct virtio_gpu_vbuffer **vbuffer_p)
126 {
127 	struct virtio_gpu_vbuffer *vbuf;
128 
129 	vbuf = virtio_gpu_get_vbuf
130 		(vgdev, sizeof(struct virtio_gpu_update_cursor),
131 		 0, NULL, NULL);
132 	if (IS_ERR(vbuf)) {
133 		*vbuffer_p = NULL;
134 		return ERR_CAST(vbuf);
135 	}
136 	*vbuffer_p = vbuf;
137 	return (struct virtio_gpu_update_cursor *)vbuf->buf;
138 }
139 
140 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
141 				       virtio_gpu_resp_cb cb,
142 				       struct virtio_gpu_vbuffer **vbuffer_p,
143 				       int cmd_size, int resp_size,
144 				       void *resp_buf)
145 {
146 	struct virtio_gpu_vbuffer *vbuf;
147 
148 	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
149 				   resp_size, resp_buf, cb);
150 	*vbuffer_p = vbuf;
151 	return (struct virtio_gpu_command *)vbuf->buf;
152 }
153 
154 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
155 				  struct virtio_gpu_vbuffer **vbuffer_p,
156 				  int size)
157 {
158 	return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
159 					 sizeof(struct virtio_gpu_ctrl_hdr),
160 					 NULL);
161 }
162 
163 static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
164 				     struct virtio_gpu_vbuffer **vbuffer_p,
165 				     int size,
166 				     virtio_gpu_resp_cb cb)
167 {
168 	return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
169 					 sizeof(struct virtio_gpu_ctrl_hdr),
170 					 NULL);
171 }
172 
173 static void free_vbuf(struct virtio_gpu_device *vgdev,
174 		      struct virtio_gpu_vbuffer *vbuf)
175 {
176 	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
177 		kfree(vbuf->resp_buf);
178 	kvfree(vbuf->data_buf);
179 	kmem_cache_free(vgdev->vbufs, vbuf);
180 }
181 
182 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
183 {
184 	struct virtio_gpu_vbuffer *vbuf;
185 	unsigned int len;
186 	int freed = 0;
187 
188 	while ((vbuf = virtqueue_get_buf(vq, &len))) {
189 		list_add_tail(&vbuf->list, reclaim_list);
190 		freed++;
191 	}
192 	if (freed == 0)
193 		DRM_DEBUG("Huh? zero vbufs reclaimed");
194 }
195 
196 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
197 {
198 	struct virtio_gpu_device *vgdev =
199 		container_of(work, struct virtio_gpu_device,
200 			     ctrlq.dequeue_work);
201 	struct list_head reclaim_list;
202 	struct virtio_gpu_vbuffer *entry, *tmp;
203 	struct virtio_gpu_ctrl_hdr *resp;
204 	u64 fence_id;
205 
206 	INIT_LIST_HEAD(&reclaim_list);
207 	spin_lock(&vgdev->ctrlq.qlock);
208 	do {
209 		virtqueue_disable_cb(vgdev->ctrlq.vq);
210 		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
211 
212 	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
213 	spin_unlock(&vgdev->ctrlq.qlock);
214 
215 	list_for_each_entry(entry, &reclaim_list, list) {
216 		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
217 
218 		trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
219 
220 		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
221 			if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
222 				struct virtio_gpu_ctrl_hdr *cmd;
223 				cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
224 				DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
225 						      le32_to_cpu(resp->type),
226 						      le32_to_cpu(cmd->type));
227 			} else
228 				DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
229 		}
230 		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
231 			fence_id = le64_to_cpu(resp->fence_id);
232 			virtio_gpu_fence_event_process(vgdev, fence_id);
233 		}
234 		if (entry->resp_cb)
235 			entry->resp_cb(vgdev, entry);
236 	}
237 	wake_up(&vgdev->ctrlq.ack_queue);
238 
239 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
240 		if (entry->objs)
241 			virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
242 		list_del(&entry->list);
243 		free_vbuf(vgdev, entry);
244 	}
245 }
246 
247 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
248 {
249 	struct virtio_gpu_device *vgdev =
250 		container_of(work, struct virtio_gpu_device,
251 			     cursorq.dequeue_work);
252 	struct list_head reclaim_list;
253 	struct virtio_gpu_vbuffer *entry, *tmp;
254 
255 	INIT_LIST_HEAD(&reclaim_list);
256 	spin_lock(&vgdev->cursorq.qlock);
257 	do {
258 		virtqueue_disable_cb(vgdev->cursorq.vq);
259 		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
260 	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
261 	spin_unlock(&vgdev->cursorq.qlock);
262 
263 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
264 		list_del(&entry->list);
265 		free_vbuf(vgdev, entry);
266 	}
267 	wake_up(&vgdev->cursorq.ack_queue);
268 }
269 
270 /* Create sg_table from a vmalloc'd buffer. */
271 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
272 {
273 	int ret, s, i;
274 	struct sg_table *sgt;
275 	struct scatterlist *sg;
276 	struct page *pg;
277 
278 	if (WARN_ON(!PAGE_ALIGNED(data)))
279 		return NULL;
280 
281 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
282 	if (!sgt)
283 		return NULL;
284 
285 	*sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
286 	ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
287 	if (ret) {
288 		kfree(sgt);
289 		return NULL;
290 	}
291 
292 	for_each_sgtable_sg(sgt, sg, i) {
293 		pg = vmalloc_to_page(data);
294 		if (!pg) {
295 			sg_free_table(sgt);
296 			kfree(sgt);
297 			return NULL;
298 		}
299 
300 		s = min_t(int, PAGE_SIZE, size);
301 		sg_set_page(sg, pg, s, 0);
302 
303 		size -= s;
304 		data += s;
305 	}
306 
307 	return sgt;
308 }
309 
310 static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
311 				     struct virtio_gpu_vbuffer *vbuf,
312 				     struct virtio_gpu_fence *fence,
313 				     int elemcnt,
314 				     struct scatterlist **sgs,
315 				     int outcnt,
316 				     int incnt)
317 {
318 	struct virtqueue *vq = vgdev->ctrlq.vq;
319 	int ret, idx;
320 
321 	if (!drm_dev_enter(vgdev->ddev, &idx)) {
322 		if (fence && vbuf->objs)
323 			virtio_gpu_array_unlock_resv(vbuf->objs);
324 		free_vbuf(vgdev, vbuf);
325 		return -ENODEV;
326 	}
327 
328 	if (vgdev->has_indirect)
329 		elemcnt = 1;
330 
331 again:
332 	spin_lock(&vgdev->ctrlq.qlock);
333 
334 	if (vq->num_free < elemcnt) {
335 		spin_unlock(&vgdev->ctrlq.qlock);
336 		virtio_gpu_notify(vgdev);
337 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
338 		goto again;
339 	}
340 
341 	/* now that the position of the vbuf in the virtqueue is known, we can
342 	 * finally set the fence id
343 	 */
344 	if (fence) {
345 		virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
346 				      fence);
347 		if (vbuf->objs) {
348 			virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
349 			virtio_gpu_array_unlock_resv(vbuf->objs);
350 		}
351 	}
352 
353 	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
354 	WARN_ON(ret);
355 
356 	trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
357 
358 	atomic_inc(&vgdev->pending_commands);
359 
360 	spin_unlock(&vgdev->ctrlq.qlock);
361 
362 	drm_dev_exit(idx);
363 	return 0;
364 }
365 
366 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
367 					       struct virtio_gpu_vbuffer *vbuf,
368 					       struct virtio_gpu_fence *fence)
369 {
370 	struct scatterlist *sgs[3], vcmd, vout, vresp;
371 	struct sg_table *sgt = NULL;
372 	int elemcnt = 0, outcnt = 0, incnt = 0, ret;
373 
374 	/* set up vcmd */
375 	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
376 	elemcnt++;
377 	sgs[outcnt] = &vcmd;
378 	outcnt++;
379 
380 	/* set up vout */
381 	if (vbuf->data_size) {
382 		if (is_vmalloc_addr(vbuf->data_buf)) {
383 			int sg_ents;
384 			sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
385 					     &sg_ents);
386 			if (!sgt) {
387 				if (fence && vbuf->objs)
388 					virtio_gpu_array_unlock_resv(vbuf->objs);
389 				return -ENOMEM;
390 			}
391 
392 			elemcnt += sg_ents;
393 			sgs[outcnt] = sgt->sgl;
394 		} else {
395 			sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
396 			elemcnt++;
397 			sgs[outcnt] = &vout;
398 		}
399 		outcnt++;
400 	}
401 
402 	/* set up vresp */
403 	if (vbuf->resp_size) {
404 		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
405 		elemcnt++;
406 		sgs[outcnt + incnt] = &vresp;
407 		incnt++;
408 	}
409 
410 	ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
411 					incnt);
412 
413 	if (sgt) {
414 		sg_free_table(sgt);
415 		kfree(sgt);
416 	}
417 	return ret;
418 }
419 
420 void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
421 {
422 	bool notify;
423 
424 	if (!atomic_read(&vgdev->pending_commands))
425 		return;
426 
427 	spin_lock(&vgdev->ctrlq.qlock);
428 	atomic_set(&vgdev->pending_commands, 0);
429 	notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
430 	spin_unlock(&vgdev->ctrlq.qlock);
431 
432 	if (notify)
433 		virtqueue_notify(vgdev->ctrlq.vq);
434 }
435 
436 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
437 					struct virtio_gpu_vbuffer *vbuf)
438 {
439 	return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
440 }
441 
442 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
443 				    struct virtio_gpu_vbuffer *vbuf)
444 {
445 	struct virtqueue *vq = vgdev->cursorq.vq;
446 	struct scatterlist *sgs[1], ccmd;
447 	int idx, ret, outcnt;
448 	bool notify;
449 
450 	if (!drm_dev_enter(vgdev->ddev, &idx)) {
451 		free_vbuf(vgdev, vbuf);
452 		return;
453 	}
454 
455 	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
456 	sgs[0] = &ccmd;
457 	outcnt = 1;
458 
459 	spin_lock(&vgdev->cursorq.qlock);
460 retry:
461 	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
462 	if (ret == -ENOSPC) {
463 		spin_unlock(&vgdev->cursorq.qlock);
464 		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
465 		spin_lock(&vgdev->cursorq.qlock);
466 		goto retry;
467 	} else {
468 		trace_virtio_gpu_cmd_queue(vq,
469 			virtio_gpu_vbuf_ctrl_hdr(vbuf));
470 
471 		notify = virtqueue_kick_prepare(vq);
472 	}
473 
474 	spin_unlock(&vgdev->cursorq.qlock);
475 
476 	if (notify)
477 		virtqueue_notify(vq);
478 
479 	drm_dev_exit(idx);
480 }
481 
482 /* just create gem objects for userspace and long lived objects,
483  * just use dma_alloced pages for the queue objects?
484  */
485 
486 /* create a basic resource */
487 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
488 				    struct virtio_gpu_object *bo,
489 				    struct virtio_gpu_object_params *params,
490 				    struct virtio_gpu_object_array *objs,
491 				    struct virtio_gpu_fence *fence)
492 {
493 	struct virtio_gpu_resource_create_2d *cmd_p;
494 	struct virtio_gpu_vbuffer *vbuf;
495 
496 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
497 	memset(cmd_p, 0, sizeof(*cmd_p));
498 	vbuf->objs = objs;
499 
500 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
501 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
502 	cmd_p->format = cpu_to_le32(params->format);
503 	cmd_p->width = cpu_to_le32(params->width);
504 	cmd_p->height = cpu_to_le32(params->height);
505 
506 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
507 	bo->created = true;
508 }
509 
510 static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
511 				    struct virtio_gpu_vbuffer *vbuf)
512 {
513 	struct virtio_gpu_object *bo;
514 
515 	bo = vbuf->resp_cb_data;
516 	vbuf->resp_cb_data = NULL;
517 
518 	virtio_gpu_cleanup_object(bo);
519 }
520 
521 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
522 				   struct virtio_gpu_object *bo)
523 {
524 	struct virtio_gpu_resource_unref *cmd_p;
525 	struct virtio_gpu_vbuffer *vbuf;
526 	int ret;
527 
528 	cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
529 					virtio_gpu_cmd_unref_cb);
530 	memset(cmd_p, 0, sizeof(*cmd_p));
531 
532 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
533 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
534 
535 	vbuf->resp_cb_data = bo;
536 	ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
537 	if (ret < 0)
538 		virtio_gpu_cleanup_object(bo);
539 }
540 
541 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
542 				uint32_t scanout_id, uint32_t resource_id,
543 				uint32_t width, uint32_t height,
544 				uint32_t x, uint32_t y)
545 {
546 	struct virtio_gpu_set_scanout *cmd_p;
547 	struct virtio_gpu_vbuffer *vbuf;
548 
549 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
550 	memset(cmd_p, 0, sizeof(*cmd_p));
551 
552 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
553 	cmd_p->resource_id = cpu_to_le32(resource_id);
554 	cmd_p->scanout_id = cpu_to_le32(scanout_id);
555 	cmd_p->r.width = cpu_to_le32(width);
556 	cmd_p->r.height = cpu_to_le32(height);
557 	cmd_p->r.x = cpu_to_le32(x);
558 	cmd_p->r.y = cpu_to_le32(y);
559 
560 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
561 }
562 
563 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
564 				   uint32_t resource_id,
565 				   uint32_t x, uint32_t y,
566 				   uint32_t width, uint32_t height,
567 				   struct virtio_gpu_object_array *objs,
568 				   struct virtio_gpu_fence *fence)
569 {
570 	struct virtio_gpu_resource_flush *cmd_p;
571 	struct virtio_gpu_vbuffer *vbuf;
572 
573 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
574 	memset(cmd_p, 0, sizeof(*cmd_p));
575 	vbuf->objs = objs;
576 
577 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
578 	cmd_p->resource_id = cpu_to_le32(resource_id);
579 	cmd_p->r.width = cpu_to_le32(width);
580 	cmd_p->r.height = cpu_to_le32(height);
581 	cmd_p->r.x = cpu_to_le32(x);
582 	cmd_p->r.y = cpu_to_le32(y);
583 
584 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
585 }
586 
587 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
588 					uint64_t offset,
589 					uint32_t width, uint32_t height,
590 					uint32_t x, uint32_t y,
591 					struct virtio_gpu_object_array *objs,
592 					struct virtio_gpu_fence *fence)
593 {
594 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
595 	struct virtio_gpu_transfer_to_host_2d *cmd_p;
596 	struct virtio_gpu_vbuffer *vbuf;
597 	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
598 
599 	if (virtio_gpu_is_shmem(bo) && use_dma_api)
600 		dma_sync_sgtable_for_device(&vgdev->vdev->dev,
601 					    bo->base.sgt, DMA_TO_DEVICE);
602 
603 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
604 	memset(cmd_p, 0, sizeof(*cmd_p));
605 	vbuf->objs = objs;
606 
607 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
608 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
609 	cmd_p->offset = cpu_to_le64(offset);
610 	cmd_p->r.width = cpu_to_le32(width);
611 	cmd_p->r.height = cpu_to_le32(height);
612 	cmd_p->r.x = cpu_to_le32(x);
613 	cmd_p->r.y = cpu_to_le32(y);
614 
615 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
616 }
617 
618 static void
619 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
620 				       uint32_t resource_id,
621 				       struct virtio_gpu_mem_entry *ents,
622 				       uint32_t nents,
623 				       struct virtio_gpu_fence *fence)
624 {
625 	struct virtio_gpu_resource_attach_backing *cmd_p;
626 	struct virtio_gpu_vbuffer *vbuf;
627 
628 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
629 	memset(cmd_p, 0, sizeof(*cmd_p));
630 
631 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
632 	cmd_p->resource_id = cpu_to_le32(resource_id);
633 	cmd_p->nr_entries = cpu_to_le32(nents);
634 
635 	vbuf->data_buf = ents;
636 	vbuf->data_size = sizeof(*ents) * nents;
637 
638 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
639 }
640 
641 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
642 					       struct virtio_gpu_vbuffer *vbuf)
643 {
644 	struct virtio_gpu_resp_display_info *resp =
645 		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
646 	int i;
647 
648 	spin_lock(&vgdev->display_info_lock);
649 	for (i = 0; i < vgdev->num_scanouts; i++) {
650 		vgdev->outputs[i].info = resp->pmodes[i];
651 		if (resp->pmodes[i].enabled) {
652 			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
653 				  le32_to_cpu(resp->pmodes[i].r.width),
654 				  le32_to_cpu(resp->pmodes[i].r.height),
655 				  le32_to_cpu(resp->pmodes[i].r.x),
656 				  le32_to_cpu(resp->pmodes[i].r.y));
657 		} else {
658 			DRM_DEBUG("output %d: disabled", i);
659 		}
660 	}
661 
662 	vgdev->display_info_pending = false;
663 	spin_unlock(&vgdev->display_info_lock);
664 	wake_up(&vgdev->resp_wq);
665 
666 	if (!drm_helper_hpd_irq_event(vgdev->ddev))
667 		drm_kms_helper_hotplug_event(vgdev->ddev);
668 }
669 
670 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
671 					      struct virtio_gpu_vbuffer *vbuf)
672 {
673 	struct virtio_gpu_get_capset_info *cmd =
674 		(struct virtio_gpu_get_capset_info *)vbuf->buf;
675 	struct virtio_gpu_resp_capset_info *resp =
676 		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
677 	int i = le32_to_cpu(cmd->capset_index);
678 
679 	spin_lock(&vgdev->display_info_lock);
680 	if (vgdev->capsets) {
681 		vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
682 		vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
683 		vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
684 	} else {
685 		DRM_ERROR("invalid capset memory.");
686 	}
687 	spin_unlock(&vgdev->display_info_lock);
688 	wake_up(&vgdev->resp_wq);
689 }
690 
691 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
692 				     struct virtio_gpu_vbuffer *vbuf)
693 {
694 	struct virtio_gpu_get_capset *cmd =
695 		(struct virtio_gpu_get_capset *)vbuf->buf;
696 	struct virtio_gpu_resp_capset *resp =
697 		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
698 	struct virtio_gpu_drv_cap_cache *cache_ent;
699 
700 	spin_lock(&vgdev->display_info_lock);
701 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
702 		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
703 		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
704 			memcpy(cache_ent->caps_cache, resp->capset_data,
705 			       cache_ent->size);
706 			/* Copy must occur before is_valid is signalled. */
707 			smp_wmb();
708 			atomic_set(&cache_ent->is_valid, 1);
709 			break;
710 		}
711 	}
712 	spin_unlock(&vgdev->display_info_lock);
713 	wake_up_all(&vgdev->resp_wq);
714 }
715 
716 static int virtio_get_edid_block(void *data, u8 *buf,
717 				 unsigned int block, size_t len)
718 {
719 	struct virtio_gpu_resp_edid *resp = data;
720 	size_t start = block * EDID_LENGTH;
721 
722 	if (start + len > le32_to_cpu(resp->size))
723 		return -EINVAL;
724 	memcpy(buf, resp->edid + start, len);
725 	return 0;
726 }
727 
728 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
729 				       struct virtio_gpu_vbuffer *vbuf)
730 {
731 	struct virtio_gpu_cmd_get_edid *cmd =
732 		(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
733 	struct virtio_gpu_resp_edid *resp =
734 		(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
735 	uint32_t scanout = le32_to_cpu(cmd->scanout);
736 	struct virtio_gpu_output *output;
737 	struct edid *new_edid, *old_edid;
738 
739 	if (scanout >= vgdev->num_scanouts)
740 		return;
741 	output = vgdev->outputs + scanout;
742 
743 	new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
744 	drm_connector_update_edid_property(&output->conn, new_edid);
745 
746 	spin_lock(&vgdev->display_info_lock);
747 	old_edid = output->edid;
748 	output->edid = new_edid;
749 	spin_unlock(&vgdev->display_info_lock);
750 
751 	kfree(old_edid);
752 	wake_up(&vgdev->resp_wq);
753 }
754 
755 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
756 {
757 	struct virtio_gpu_ctrl_hdr *cmd_p;
758 	struct virtio_gpu_vbuffer *vbuf;
759 	void *resp_buf;
760 
761 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
762 			   GFP_KERNEL);
763 	if (!resp_buf)
764 		return -ENOMEM;
765 
766 	cmd_p = virtio_gpu_alloc_cmd_resp
767 		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
768 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
769 		 resp_buf);
770 	memset(cmd_p, 0, sizeof(*cmd_p));
771 
772 	vgdev->display_info_pending = true;
773 	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
774 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
775 	return 0;
776 }
777 
778 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
779 {
780 	struct virtio_gpu_get_capset_info *cmd_p;
781 	struct virtio_gpu_vbuffer *vbuf;
782 	void *resp_buf;
783 
784 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
785 			   GFP_KERNEL);
786 	if (!resp_buf)
787 		return -ENOMEM;
788 
789 	cmd_p = virtio_gpu_alloc_cmd_resp
790 		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
791 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
792 		 resp_buf);
793 	memset(cmd_p, 0, sizeof(*cmd_p));
794 
795 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
796 	cmd_p->capset_index = cpu_to_le32(idx);
797 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
798 	return 0;
799 }
800 
801 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
802 			      int idx, int version,
803 			      struct virtio_gpu_drv_cap_cache **cache_p)
804 {
805 	struct virtio_gpu_get_capset *cmd_p;
806 	struct virtio_gpu_vbuffer *vbuf;
807 	int max_size;
808 	struct virtio_gpu_drv_cap_cache *cache_ent;
809 	struct virtio_gpu_drv_cap_cache *search_ent;
810 	void *resp_buf;
811 
812 	*cache_p = NULL;
813 
814 	if (idx >= vgdev->num_capsets)
815 		return -EINVAL;
816 
817 	if (version > vgdev->capsets[idx].max_version)
818 		return -EINVAL;
819 
820 	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
821 	if (!cache_ent)
822 		return -ENOMEM;
823 
824 	max_size = vgdev->capsets[idx].max_size;
825 	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
826 	if (!cache_ent->caps_cache) {
827 		kfree(cache_ent);
828 		return -ENOMEM;
829 	}
830 
831 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
832 			   GFP_KERNEL);
833 	if (!resp_buf) {
834 		kfree(cache_ent->caps_cache);
835 		kfree(cache_ent);
836 		return -ENOMEM;
837 	}
838 
839 	cache_ent->version = version;
840 	cache_ent->id = vgdev->capsets[idx].id;
841 	atomic_set(&cache_ent->is_valid, 0);
842 	cache_ent->size = max_size;
843 	spin_lock(&vgdev->display_info_lock);
844 	/* Search while under lock in case it was added by another task. */
845 	list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
846 		if (search_ent->id == vgdev->capsets[idx].id &&
847 		    search_ent->version == version) {
848 			*cache_p = search_ent;
849 			break;
850 		}
851 	}
852 	if (!*cache_p)
853 		list_add_tail(&cache_ent->head, &vgdev->cap_cache);
854 	spin_unlock(&vgdev->display_info_lock);
855 
856 	if (*cache_p) {
857 		/* Entry was found, so free everything that was just created. */
858 		kfree(resp_buf);
859 		kfree(cache_ent->caps_cache);
860 		kfree(cache_ent);
861 		return 0;
862 	}
863 
864 	cmd_p = virtio_gpu_alloc_cmd_resp
865 		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
866 		 sizeof(struct virtio_gpu_resp_capset) + max_size,
867 		 resp_buf);
868 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
869 	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
870 	cmd_p->capset_version = cpu_to_le32(version);
871 	*cache_p = cache_ent;
872 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
873 
874 	return 0;
875 }
876 
877 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
878 {
879 	struct virtio_gpu_cmd_get_edid *cmd_p;
880 	struct virtio_gpu_vbuffer *vbuf;
881 	void *resp_buf;
882 	int scanout;
883 
884 	if (WARN_ON(!vgdev->has_edid))
885 		return -EINVAL;
886 
887 	for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
888 		resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
889 				   GFP_KERNEL);
890 		if (!resp_buf)
891 			return -ENOMEM;
892 
893 		cmd_p = virtio_gpu_alloc_cmd_resp
894 			(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
895 			 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
896 			 resp_buf);
897 		cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
898 		cmd_p->scanout = cpu_to_le32(scanout);
899 		virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
900 	}
901 
902 	return 0;
903 }
904 
905 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
906 				   uint32_t context_init, uint32_t nlen,
907 				   const char *name)
908 {
909 	struct virtio_gpu_ctx_create *cmd_p;
910 	struct virtio_gpu_vbuffer *vbuf;
911 
912 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
913 	memset(cmd_p, 0, sizeof(*cmd_p));
914 
915 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
916 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
917 	cmd_p->nlen = cpu_to_le32(nlen);
918 	cmd_p->context_init = cpu_to_le32(context_init);
919 	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
920 	cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
921 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
922 }
923 
924 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
925 				    uint32_t id)
926 {
927 	struct virtio_gpu_ctx_destroy *cmd_p;
928 	struct virtio_gpu_vbuffer *vbuf;
929 
930 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
931 	memset(cmd_p, 0, sizeof(*cmd_p));
932 
933 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
934 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
935 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
936 }
937 
938 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
939 					    uint32_t ctx_id,
940 					    struct virtio_gpu_object_array *objs)
941 {
942 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
943 	struct virtio_gpu_ctx_resource *cmd_p;
944 	struct virtio_gpu_vbuffer *vbuf;
945 
946 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
947 	memset(cmd_p, 0, sizeof(*cmd_p));
948 	vbuf->objs = objs;
949 
950 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
951 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
952 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
953 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
954 }
955 
956 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
957 					    uint32_t ctx_id,
958 					    struct virtio_gpu_object_array *objs)
959 {
960 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
961 	struct virtio_gpu_ctx_resource *cmd_p;
962 	struct virtio_gpu_vbuffer *vbuf;
963 
964 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
965 	memset(cmd_p, 0, sizeof(*cmd_p));
966 	vbuf->objs = objs;
967 
968 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
969 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
970 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
971 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
972 }
973 
974 void
975 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
976 				  struct virtio_gpu_object *bo,
977 				  struct virtio_gpu_object_params *params,
978 				  struct virtio_gpu_object_array *objs,
979 				  struct virtio_gpu_fence *fence)
980 {
981 	struct virtio_gpu_resource_create_3d *cmd_p;
982 	struct virtio_gpu_vbuffer *vbuf;
983 
984 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
985 	memset(cmd_p, 0, sizeof(*cmd_p));
986 	vbuf->objs = objs;
987 
988 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
989 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
990 	cmd_p->format = cpu_to_le32(params->format);
991 	cmd_p->width = cpu_to_le32(params->width);
992 	cmd_p->height = cpu_to_le32(params->height);
993 
994 	cmd_p->target = cpu_to_le32(params->target);
995 	cmd_p->bind = cpu_to_le32(params->bind);
996 	cmd_p->depth = cpu_to_le32(params->depth);
997 	cmd_p->array_size = cpu_to_le32(params->array_size);
998 	cmd_p->last_level = cpu_to_le32(params->last_level);
999 	cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1000 	cmd_p->flags = cpu_to_le32(params->flags);
1001 
1002 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1003 
1004 	bo->created = true;
1005 }
1006 
1007 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1008 					uint32_t ctx_id,
1009 					uint64_t offset, uint32_t level,
1010 					uint32_t stride,
1011 					uint32_t layer_stride,
1012 					struct drm_virtgpu_3d_box *box,
1013 					struct virtio_gpu_object_array *objs,
1014 					struct virtio_gpu_fence *fence)
1015 {
1016 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1017 	struct virtio_gpu_transfer_host_3d *cmd_p;
1018 	struct virtio_gpu_vbuffer *vbuf;
1019 	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1020 
1021 	if (virtio_gpu_is_shmem(bo) && use_dma_api)
1022 		dma_sync_sgtable_for_device(&vgdev->vdev->dev,
1023 					    bo->base.sgt, DMA_TO_DEVICE);
1024 
1025 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1026 	memset(cmd_p, 0, sizeof(*cmd_p));
1027 
1028 	vbuf->objs = objs;
1029 
1030 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1031 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1032 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1033 	convert_to_hw_box(&cmd_p->box, box);
1034 	cmd_p->offset = cpu_to_le64(offset);
1035 	cmd_p->level = cpu_to_le32(level);
1036 	cmd_p->stride = cpu_to_le32(stride);
1037 	cmd_p->layer_stride = cpu_to_le32(layer_stride);
1038 
1039 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1040 }
1041 
1042 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1043 					  uint32_t ctx_id,
1044 					  uint64_t offset, uint32_t level,
1045 					  uint32_t stride,
1046 					  uint32_t layer_stride,
1047 					  struct drm_virtgpu_3d_box *box,
1048 					  struct virtio_gpu_object_array *objs,
1049 					  struct virtio_gpu_fence *fence)
1050 {
1051 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1052 	struct virtio_gpu_transfer_host_3d *cmd_p;
1053 	struct virtio_gpu_vbuffer *vbuf;
1054 
1055 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1056 	memset(cmd_p, 0, sizeof(*cmd_p));
1057 
1058 	vbuf->objs = objs;
1059 
1060 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1061 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1062 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1063 	convert_to_hw_box(&cmd_p->box, box);
1064 	cmd_p->offset = cpu_to_le64(offset);
1065 	cmd_p->level = cpu_to_le32(level);
1066 	cmd_p->stride = cpu_to_le32(stride);
1067 	cmd_p->layer_stride = cpu_to_le32(layer_stride);
1068 
1069 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1070 }
1071 
1072 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1073 			   void *data, uint32_t data_size,
1074 			   uint32_t ctx_id,
1075 			   struct virtio_gpu_object_array *objs,
1076 			   struct virtio_gpu_fence *fence)
1077 {
1078 	struct virtio_gpu_cmd_submit *cmd_p;
1079 	struct virtio_gpu_vbuffer *vbuf;
1080 
1081 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1082 	memset(cmd_p, 0, sizeof(*cmd_p));
1083 
1084 	vbuf->data_buf = data;
1085 	vbuf->data_size = data_size;
1086 	vbuf->objs = objs;
1087 
1088 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1089 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1090 	cmd_p->size = cpu_to_le32(data_size);
1091 
1092 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1093 }
1094 
1095 void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1096 			      struct virtio_gpu_object *obj,
1097 			      struct virtio_gpu_mem_entry *ents,
1098 			      unsigned int nents)
1099 {
1100 	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1101 					       ents, nents, NULL);
1102 }
1103 
1104 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1105 			    struct virtio_gpu_output *output)
1106 {
1107 	struct virtio_gpu_vbuffer *vbuf;
1108 	struct virtio_gpu_update_cursor *cur_p;
1109 
1110 	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1111 	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1112 	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1113 	virtio_gpu_queue_cursor(vgdev, vbuf);
1114 }
1115 
1116 static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1117 					    struct virtio_gpu_vbuffer *vbuf)
1118 {
1119 	struct virtio_gpu_object *obj =
1120 		gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1121 	struct virtio_gpu_resp_resource_uuid *resp =
1122 		(struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
1123 	uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1124 
1125 	spin_lock(&vgdev->resource_export_lock);
1126 	WARN_ON(obj->uuid_state != STATE_INITIALIZING);
1127 
1128 	if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
1129 	    obj->uuid_state == STATE_INITIALIZING) {
1130 		import_uuid(&obj->uuid, resp->uuid);
1131 		obj->uuid_state = STATE_OK;
1132 	} else {
1133 		obj->uuid_state = STATE_ERR;
1134 	}
1135 	spin_unlock(&vgdev->resource_export_lock);
1136 
1137 	wake_up_all(&vgdev->resp_wq);
1138 }
1139 
1140 int
1141 virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1142 				    struct virtio_gpu_object_array *objs)
1143 {
1144 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1145 	struct virtio_gpu_resource_assign_uuid *cmd_p;
1146 	struct virtio_gpu_vbuffer *vbuf;
1147 	struct virtio_gpu_resp_resource_uuid *resp_buf;
1148 
1149 	resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1150 	if (!resp_buf) {
1151 		spin_lock(&vgdev->resource_export_lock);
1152 		bo->uuid_state = STATE_ERR;
1153 		spin_unlock(&vgdev->resource_export_lock);
1154 		virtio_gpu_array_put_free(objs);
1155 		return -ENOMEM;
1156 	}
1157 
1158 	cmd_p = virtio_gpu_alloc_cmd_resp
1159 		(vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1160 		 sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
1161 	memset(cmd_p, 0, sizeof(*cmd_p));
1162 
1163 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
1164 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1165 
1166 	vbuf->objs = objs;
1167 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1168 	return 0;
1169 }
1170 
1171 static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
1172 					   struct virtio_gpu_vbuffer *vbuf)
1173 {
1174 	struct virtio_gpu_object *bo =
1175 		gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1176 	struct virtio_gpu_resp_map_info *resp =
1177 		(struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
1178 	struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
1179 	uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1180 
1181 	spin_lock(&vgdev->host_visible_lock);
1182 
1183 	if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
1184 		vram->map_info = resp->map_info;
1185 		vram->map_state = STATE_OK;
1186 	} else {
1187 		vram->map_state = STATE_ERR;
1188 	}
1189 
1190 	spin_unlock(&vgdev->host_visible_lock);
1191 	wake_up_all(&vgdev->resp_wq);
1192 }
1193 
1194 int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
1195 		       struct virtio_gpu_object_array *objs, uint64_t offset)
1196 {
1197 	struct virtio_gpu_resource_map_blob *cmd_p;
1198 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1199 	struct virtio_gpu_vbuffer *vbuf;
1200 	struct virtio_gpu_resp_map_info *resp_buf;
1201 
1202 	resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1203 	if (!resp_buf)
1204 		return -ENOMEM;
1205 
1206 	cmd_p = virtio_gpu_alloc_cmd_resp
1207 		(vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
1208 		 sizeof(struct virtio_gpu_resp_map_info), resp_buf);
1209 	memset(cmd_p, 0, sizeof(*cmd_p));
1210 
1211 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
1212 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1213 	cmd_p->offset = cpu_to_le64(offset);
1214 	vbuf->objs = objs;
1215 
1216 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1217 	return 0;
1218 }
1219 
1220 void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
1221 			  struct virtio_gpu_object *bo)
1222 {
1223 	struct virtio_gpu_resource_unmap_blob *cmd_p;
1224 	struct virtio_gpu_vbuffer *vbuf;
1225 
1226 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1227 	memset(cmd_p, 0, sizeof(*cmd_p));
1228 
1229 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
1230 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1231 
1232 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1233 }
1234 
1235 void
1236 virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
1237 				    struct virtio_gpu_object *bo,
1238 				    struct virtio_gpu_object_params *params,
1239 				    struct virtio_gpu_mem_entry *ents,
1240 				    uint32_t nents)
1241 {
1242 	struct virtio_gpu_resource_create_blob *cmd_p;
1243 	struct virtio_gpu_vbuffer *vbuf;
1244 
1245 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1246 	memset(cmd_p, 0, sizeof(*cmd_p));
1247 
1248 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
1249 	cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
1250 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1251 	cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
1252 	cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
1253 	cmd_p->blob_id = cpu_to_le64(params->blob_id);
1254 	cmd_p->size = cpu_to_le64(params->size);
1255 	cmd_p->nr_entries = cpu_to_le32(nents);
1256 
1257 	vbuf->data_buf = ents;
1258 	vbuf->data_size = sizeof(*ents) * nents;
1259 
1260 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1261 	bo->created = true;
1262 }
1263 
1264 void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
1265 				     uint32_t scanout_id,
1266 				     struct virtio_gpu_object *bo,
1267 				     struct drm_framebuffer *fb,
1268 				     uint32_t width, uint32_t height,
1269 				     uint32_t x, uint32_t y)
1270 {
1271 	uint32_t i;
1272 	struct virtio_gpu_set_scanout_blob *cmd_p;
1273 	struct virtio_gpu_vbuffer *vbuf;
1274 	uint32_t format = virtio_gpu_translate_format(fb->format->format);
1275 
1276 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1277 	memset(cmd_p, 0, sizeof(*cmd_p));
1278 
1279 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
1280 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1281 	cmd_p->scanout_id = cpu_to_le32(scanout_id);
1282 
1283 	cmd_p->format = cpu_to_le32(format);
1284 	cmd_p->width  = cpu_to_le32(fb->width);
1285 	cmd_p->height = cpu_to_le32(fb->height);
1286 
1287 	for (i = 0; i < 4; i++) {
1288 		cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
1289 		cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
1290 	}
1291 
1292 	cmd_p->r.width = cpu_to_le32(width);
1293 	cmd_p->r.height = cpu_to_le32(height);
1294 	cmd_p->r.x = cpu_to_le32(x);
1295 	cmd_p->r.y = cpu_to_le32(y);
1296 
1297 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1298 }
1299