1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie <airlied@redhat.com>
7  *    Gerd Hoffmann <kraxel@redhat.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26  * OTHER DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
33 
34 #include "virtgpu_drv.h"
35 #include "virtgpu_trace.h"
36 
37 #define MAX_INLINE_CMD_SIZE   96
38 #define MAX_INLINE_RESP_SIZE  24
39 #define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
40 			       + MAX_INLINE_CMD_SIZE		 \
41 			       + MAX_INLINE_RESP_SIZE)
42 
43 static void convert_to_hw_box(struct virtio_gpu_box *dst,
44 			      const struct drm_virtgpu_3d_box *src)
45 {
46 	dst->x = cpu_to_le32(src->x);
47 	dst->y = cpu_to_le32(src->y);
48 	dst->z = cpu_to_le32(src->z);
49 	dst->w = cpu_to_le32(src->w);
50 	dst->h = cpu_to_le32(src->h);
51 	dst->d = cpu_to_le32(src->d);
52 }
53 
54 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
55 {
56 	struct drm_device *dev = vq->vdev->priv;
57 	struct virtio_gpu_device *vgdev = dev->dev_private;
58 
59 	schedule_work(&vgdev->ctrlq.dequeue_work);
60 }
61 
62 void virtio_gpu_cursor_ack(struct virtqueue *vq)
63 {
64 	struct drm_device *dev = vq->vdev->priv;
65 	struct virtio_gpu_device *vgdev = dev->dev_private;
66 
67 	schedule_work(&vgdev->cursorq.dequeue_work);
68 }
69 
70 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
71 {
72 	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
73 					 VBUFFER_SIZE,
74 					 __alignof__(struct virtio_gpu_vbuffer),
75 					 0, NULL);
76 	if (!vgdev->vbufs)
77 		return -ENOMEM;
78 	return 0;
79 }
80 
81 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
82 {
83 	kmem_cache_destroy(vgdev->vbufs);
84 	vgdev->vbufs = NULL;
85 }
86 
87 static struct virtio_gpu_vbuffer*
88 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
89 		    int size, int resp_size, void *resp_buf,
90 		    virtio_gpu_resp_cb resp_cb)
91 {
92 	struct virtio_gpu_vbuffer *vbuf;
93 
94 	vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
95 
96 	BUG_ON(size > MAX_INLINE_CMD_SIZE ||
97 	       size < sizeof(struct virtio_gpu_ctrl_hdr));
98 	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
99 	vbuf->size = size;
100 
101 	vbuf->resp_cb = resp_cb;
102 	vbuf->resp_size = resp_size;
103 	if (resp_size <= MAX_INLINE_RESP_SIZE)
104 		vbuf->resp_buf = (void *)vbuf->buf + size;
105 	else
106 		vbuf->resp_buf = resp_buf;
107 	BUG_ON(!vbuf->resp_buf);
108 	return vbuf;
109 }
110 
111 static struct virtio_gpu_ctrl_hdr *
112 virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
113 {
114 	/* this assumes a vbuf contains a command that starts with a
115 	 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
116 	 * virtqueues.
117 	 */
118 	return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
119 }
120 
121 static struct virtio_gpu_update_cursor*
122 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
123 			struct virtio_gpu_vbuffer **vbuffer_p)
124 {
125 	struct virtio_gpu_vbuffer *vbuf;
126 
127 	vbuf = virtio_gpu_get_vbuf
128 		(vgdev, sizeof(struct virtio_gpu_update_cursor),
129 		 0, NULL, NULL);
130 	if (IS_ERR(vbuf)) {
131 		*vbuffer_p = NULL;
132 		return ERR_CAST(vbuf);
133 	}
134 	*vbuffer_p = vbuf;
135 	return (struct virtio_gpu_update_cursor *)vbuf->buf;
136 }
137 
138 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
139 				       virtio_gpu_resp_cb cb,
140 				       struct virtio_gpu_vbuffer **vbuffer_p,
141 				       int cmd_size, int resp_size,
142 				       void *resp_buf)
143 {
144 	struct virtio_gpu_vbuffer *vbuf;
145 
146 	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
147 				   resp_size, resp_buf, cb);
148 	*vbuffer_p = vbuf;
149 	return (struct virtio_gpu_command *)vbuf->buf;
150 }
151 
152 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
153 				  struct virtio_gpu_vbuffer **vbuffer_p,
154 				  int size)
155 {
156 	return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
157 					 sizeof(struct virtio_gpu_ctrl_hdr),
158 					 NULL);
159 }
160 
161 static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
162 				     struct virtio_gpu_vbuffer **vbuffer_p,
163 				     int size,
164 				     virtio_gpu_resp_cb cb)
165 {
166 	return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
167 					 sizeof(struct virtio_gpu_ctrl_hdr),
168 					 NULL);
169 }
170 
171 static void free_vbuf(struct virtio_gpu_device *vgdev,
172 		      struct virtio_gpu_vbuffer *vbuf)
173 {
174 	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
175 		kfree(vbuf->resp_buf);
176 	kvfree(vbuf->data_buf);
177 	kmem_cache_free(vgdev->vbufs, vbuf);
178 }
179 
180 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
181 {
182 	struct virtio_gpu_vbuffer *vbuf;
183 	unsigned int len;
184 	int freed = 0;
185 
186 	while ((vbuf = virtqueue_get_buf(vq, &len))) {
187 		list_add_tail(&vbuf->list, reclaim_list);
188 		freed++;
189 	}
190 	if (freed == 0)
191 		DRM_DEBUG("Huh? zero vbufs reclaimed");
192 }
193 
194 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
195 {
196 	struct virtio_gpu_device *vgdev =
197 		container_of(work, struct virtio_gpu_device,
198 			     ctrlq.dequeue_work);
199 	struct list_head reclaim_list;
200 	struct virtio_gpu_vbuffer *entry, *tmp;
201 	struct virtio_gpu_ctrl_hdr *resp;
202 	u64 fence_id;
203 
204 	INIT_LIST_HEAD(&reclaim_list);
205 	spin_lock(&vgdev->ctrlq.qlock);
206 	do {
207 		virtqueue_disable_cb(vgdev->ctrlq.vq);
208 		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
209 
210 	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
211 	spin_unlock(&vgdev->ctrlq.qlock);
212 
213 	list_for_each_entry(entry, &reclaim_list, list) {
214 		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
215 
216 		trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
217 
218 		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
219 			if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
220 				struct virtio_gpu_ctrl_hdr *cmd;
221 				cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
222 				DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
223 						      le32_to_cpu(resp->type),
224 						      le32_to_cpu(cmd->type));
225 			} else
226 				DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
227 		}
228 		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
229 			fence_id = le64_to_cpu(resp->fence_id);
230 			virtio_gpu_fence_event_process(vgdev, fence_id);
231 		}
232 		if (entry->resp_cb)
233 			entry->resp_cb(vgdev, entry);
234 	}
235 	wake_up(&vgdev->ctrlq.ack_queue);
236 
237 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
238 		if (entry->objs)
239 			virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
240 		list_del(&entry->list);
241 		free_vbuf(vgdev, entry);
242 	}
243 }
244 
245 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
246 {
247 	struct virtio_gpu_device *vgdev =
248 		container_of(work, struct virtio_gpu_device,
249 			     cursorq.dequeue_work);
250 	struct list_head reclaim_list;
251 	struct virtio_gpu_vbuffer *entry, *tmp;
252 
253 	INIT_LIST_HEAD(&reclaim_list);
254 	spin_lock(&vgdev->cursorq.qlock);
255 	do {
256 		virtqueue_disable_cb(vgdev->cursorq.vq);
257 		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
258 	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
259 	spin_unlock(&vgdev->cursorq.qlock);
260 
261 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
262 		list_del(&entry->list);
263 		free_vbuf(vgdev, entry);
264 	}
265 	wake_up(&vgdev->cursorq.ack_queue);
266 }
267 
268 /* Create sg_table from a vmalloc'd buffer. */
269 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
270 {
271 	int ret, s, i;
272 	struct sg_table *sgt;
273 	struct scatterlist *sg;
274 	struct page *pg;
275 
276 	if (WARN_ON(!PAGE_ALIGNED(data)))
277 		return NULL;
278 
279 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
280 	if (!sgt)
281 		return NULL;
282 
283 	*sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
284 	ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
285 	if (ret) {
286 		kfree(sgt);
287 		return NULL;
288 	}
289 
290 	for_each_sgtable_sg(sgt, sg, i) {
291 		pg = vmalloc_to_page(data);
292 		if (!pg) {
293 			sg_free_table(sgt);
294 			kfree(sgt);
295 			return NULL;
296 		}
297 
298 		s = min_t(int, PAGE_SIZE, size);
299 		sg_set_page(sg, pg, s, 0);
300 
301 		size -= s;
302 		data += s;
303 	}
304 
305 	return sgt;
306 }
307 
308 static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
309 				     struct virtio_gpu_vbuffer *vbuf,
310 				     struct virtio_gpu_fence *fence,
311 				     int elemcnt,
312 				     struct scatterlist **sgs,
313 				     int outcnt,
314 				     int incnt)
315 {
316 	struct virtqueue *vq = vgdev->ctrlq.vq;
317 	int ret, idx;
318 
319 	if (!drm_dev_enter(vgdev->ddev, &idx)) {
320 		if (fence && vbuf->objs)
321 			virtio_gpu_array_unlock_resv(vbuf->objs);
322 		free_vbuf(vgdev, vbuf);
323 		return -1;
324 	}
325 
326 	if (vgdev->has_indirect)
327 		elemcnt = 1;
328 
329 again:
330 	spin_lock(&vgdev->ctrlq.qlock);
331 
332 	if (vq->num_free < elemcnt) {
333 		spin_unlock(&vgdev->ctrlq.qlock);
334 		virtio_gpu_notify(vgdev);
335 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
336 		goto again;
337 	}
338 
339 	/* now that the position of the vbuf in the virtqueue is known, we can
340 	 * finally set the fence id
341 	 */
342 	if (fence) {
343 		virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
344 				      fence);
345 		if (vbuf->objs) {
346 			virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
347 			virtio_gpu_array_unlock_resv(vbuf->objs);
348 		}
349 	}
350 
351 	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
352 	WARN_ON(ret);
353 
354 	trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
355 
356 	atomic_inc(&vgdev->pending_commands);
357 
358 	spin_unlock(&vgdev->ctrlq.qlock);
359 
360 	drm_dev_exit(idx);
361 	return 0;
362 }
363 
364 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
365 					       struct virtio_gpu_vbuffer *vbuf,
366 					       struct virtio_gpu_fence *fence)
367 {
368 	struct scatterlist *sgs[3], vcmd, vout, vresp;
369 	struct sg_table *sgt = NULL;
370 	int elemcnt = 0, outcnt = 0, incnt = 0, ret;
371 
372 	/* set up vcmd */
373 	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
374 	elemcnt++;
375 	sgs[outcnt] = &vcmd;
376 	outcnt++;
377 
378 	/* set up vout */
379 	if (vbuf->data_size) {
380 		if (is_vmalloc_addr(vbuf->data_buf)) {
381 			int sg_ents;
382 			sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
383 					     &sg_ents);
384 			if (!sgt) {
385 				if (fence && vbuf->objs)
386 					virtio_gpu_array_unlock_resv(vbuf->objs);
387 				return -1;
388 			}
389 
390 			elemcnt += sg_ents;
391 			sgs[outcnt] = sgt->sgl;
392 		} else {
393 			sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
394 			elemcnt++;
395 			sgs[outcnt] = &vout;
396 		}
397 		outcnt++;
398 	}
399 
400 	/* set up vresp */
401 	if (vbuf->resp_size) {
402 		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
403 		elemcnt++;
404 		sgs[outcnt + incnt] = &vresp;
405 		incnt++;
406 	}
407 
408 	ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
409 					incnt);
410 
411 	if (sgt) {
412 		sg_free_table(sgt);
413 		kfree(sgt);
414 	}
415 	return ret;
416 }
417 
418 void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
419 {
420 	bool notify;
421 
422 	if (!atomic_read(&vgdev->pending_commands))
423 		return;
424 
425 	spin_lock(&vgdev->ctrlq.qlock);
426 	atomic_set(&vgdev->pending_commands, 0);
427 	notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
428 	spin_unlock(&vgdev->ctrlq.qlock);
429 
430 	if (notify)
431 		virtqueue_notify(vgdev->ctrlq.vq);
432 }
433 
434 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
435 					struct virtio_gpu_vbuffer *vbuf)
436 {
437 	return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
438 }
439 
440 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
441 				    struct virtio_gpu_vbuffer *vbuf)
442 {
443 	struct virtqueue *vq = vgdev->cursorq.vq;
444 	struct scatterlist *sgs[1], ccmd;
445 	int idx, ret, outcnt;
446 	bool notify;
447 
448 	if (!drm_dev_enter(vgdev->ddev, &idx)) {
449 		free_vbuf(vgdev, vbuf);
450 		return;
451 	}
452 
453 	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
454 	sgs[0] = &ccmd;
455 	outcnt = 1;
456 
457 	spin_lock(&vgdev->cursorq.qlock);
458 retry:
459 	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
460 	if (ret == -ENOSPC) {
461 		spin_unlock(&vgdev->cursorq.qlock);
462 		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
463 		spin_lock(&vgdev->cursorq.qlock);
464 		goto retry;
465 	} else {
466 		trace_virtio_gpu_cmd_queue(vq,
467 			virtio_gpu_vbuf_ctrl_hdr(vbuf));
468 
469 		notify = virtqueue_kick_prepare(vq);
470 	}
471 
472 	spin_unlock(&vgdev->cursorq.qlock);
473 
474 	if (notify)
475 		virtqueue_notify(vq);
476 
477 	drm_dev_exit(idx);
478 }
479 
480 /* just create gem objects for userspace and long lived objects,
481  * just use dma_alloced pages for the queue objects?
482  */
483 
484 /* create a basic resource */
485 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
486 				    struct virtio_gpu_object *bo,
487 				    struct virtio_gpu_object_params *params,
488 				    struct virtio_gpu_object_array *objs,
489 				    struct virtio_gpu_fence *fence)
490 {
491 	struct virtio_gpu_resource_create_2d *cmd_p;
492 	struct virtio_gpu_vbuffer *vbuf;
493 
494 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
495 	memset(cmd_p, 0, sizeof(*cmd_p));
496 	vbuf->objs = objs;
497 
498 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
499 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
500 	cmd_p->format = cpu_to_le32(params->format);
501 	cmd_p->width = cpu_to_le32(params->width);
502 	cmd_p->height = cpu_to_le32(params->height);
503 
504 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
505 	bo->created = true;
506 }
507 
508 static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
509 				    struct virtio_gpu_vbuffer *vbuf)
510 {
511 	struct virtio_gpu_object *bo;
512 
513 	bo = vbuf->resp_cb_data;
514 	vbuf->resp_cb_data = NULL;
515 
516 	virtio_gpu_cleanup_object(bo);
517 }
518 
519 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
520 				   struct virtio_gpu_object *bo)
521 {
522 	struct virtio_gpu_resource_unref *cmd_p;
523 	struct virtio_gpu_vbuffer *vbuf;
524 	int ret;
525 
526 	cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
527 					virtio_gpu_cmd_unref_cb);
528 	memset(cmd_p, 0, sizeof(*cmd_p));
529 
530 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
531 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
532 
533 	vbuf->resp_cb_data = bo;
534 	ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
535 	if (ret < 0)
536 		virtio_gpu_cleanup_object(bo);
537 }
538 
539 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
540 				uint32_t scanout_id, uint32_t resource_id,
541 				uint32_t width, uint32_t height,
542 				uint32_t x, uint32_t y)
543 {
544 	struct virtio_gpu_set_scanout *cmd_p;
545 	struct virtio_gpu_vbuffer *vbuf;
546 
547 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
548 	memset(cmd_p, 0, sizeof(*cmd_p));
549 
550 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
551 	cmd_p->resource_id = cpu_to_le32(resource_id);
552 	cmd_p->scanout_id = cpu_to_le32(scanout_id);
553 	cmd_p->r.width = cpu_to_le32(width);
554 	cmd_p->r.height = cpu_to_le32(height);
555 	cmd_p->r.x = cpu_to_le32(x);
556 	cmd_p->r.y = cpu_to_le32(y);
557 
558 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
559 }
560 
561 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
562 				   uint32_t resource_id,
563 				   uint32_t x, uint32_t y,
564 				   uint32_t width, uint32_t height,
565 				   struct virtio_gpu_object_array *objs,
566 				   struct virtio_gpu_fence *fence)
567 {
568 	struct virtio_gpu_resource_flush *cmd_p;
569 	struct virtio_gpu_vbuffer *vbuf;
570 
571 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
572 	memset(cmd_p, 0, sizeof(*cmd_p));
573 	vbuf->objs = objs;
574 
575 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
576 	cmd_p->resource_id = cpu_to_le32(resource_id);
577 	cmd_p->r.width = cpu_to_le32(width);
578 	cmd_p->r.height = cpu_to_le32(height);
579 	cmd_p->r.x = cpu_to_le32(x);
580 	cmd_p->r.y = cpu_to_le32(y);
581 
582 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
583 }
584 
585 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
586 					uint64_t offset,
587 					uint32_t width, uint32_t height,
588 					uint32_t x, uint32_t y,
589 					struct virtio_gpu_object_array *objs,
590 					struct virtio_gpu_fence *fence)
591 {
592 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
593 	struct virtio_gpu_transfer_to_host_2d *cmd_p;
594 	struct virtio_gpu_vbuffer *vbuf;
595 	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
596 	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
597 
598 	if (use_dma_api)
599 		dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
600 					    shmem->pages, DMA_TO_DEVICE);
601 
602 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
603 	memset(cmd_p, 0, sizeof(*cmd_p));
604 	vbuf->objs = objs;
605 
606 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
607 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
608 	cmd_p->offset = cpu_to_le64(offset);
609 	cmd_p->r.width = cpu_to_le32(width);
610 	cmd_p->r.height = cpu_to_le32(height);
611 	cmd_p->r.x = cpu_to_le32(x);
612 	cmd_p->r.y = cpu_to_le32(y);
613 
614 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
615 }
616 
617 static void
618 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
619 				       uint32_t resource_id,
620 				       struct virtio_gpu_mem_entry *ents,
621 				       uint32_t nents,
622 				       struct virtio_gpu_fence *fence)
623 {
624 	struct virtio_gpu_resource_attach_backing *cmd_p;
625 	struct virtio_gpu_vbuffer *vbuf;
626 
627 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
628 	memset(cmd_p, 0, sizeof(*cmd_p));
629 
630 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
631 	cmd_p->resource_id = cpu_to_le32(resource_id);
632 	cmd_p->nr_entries = cpu_to_le32(nents);
633 
634 	vbuf->data_buf = ents;
635 	vbuf->data_size = sizeof(*ents) * nents;
636 
637 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
638 }
639 
640 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
641 					       struct virtio_gpu_vbuffer *vbuf)
642 {
643 	struct virtio_gpu_resp_display_info *resp =
644 		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
645 	int i;
646 
647 	spin_lock(&vgdev->display_info_lock);
648 	for (i = 0; i < vgdev->num_scanouts; i++) {
649 		vgdev->outputs[i].info = resp->pmodes[i];
650 		if (resp->pmodes[i].enabled) {
651 			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
652 				  le32_to_cpu(resp->pmodes[i].r.width),
653 				  le32_to_cpu(resp->pmodes[i].r.height),
654 				  le32_to_cpu(resp->pmodes[i].r.x),
655 				  le32_to_cpu(resp->pmodes[i].r.y));
656 		} else {
657 			DRM_DEBUG("output %d: disabled", i);
658 		}
659 	}
660 
661 	vgdev->display_info_pending = false;
662 	spin_unlock(&vgdev->display_info_lock);
663 	wake_up(&vgdev->resp_wq);
664 
665 	if (!drm_helper_hpd_irq_event(vgdev->ddev))
666 		drm_kms_helper_hotplug_event(vgdev->ddev);
667 }
668 
669 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
670 					      struct virtio_gpu_vbuffer *vbuf)
671 {
672 	struct virtio_gpu_get_capset_info *cmd =
673 		(struct virtio_gpu_get_capset_info *)vbuf->buf;
674 	struct virtio_gpu_resp_capset_info *resp =
675 		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
676 	int i = le32_to_cpu(cmd->capset_index);
677 
678 	spin_lock(&vgdev->display_info_lock);
679 	if (vgdev->capsets) {
680 		vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
681 		vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
682 		vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
683 	} else {
684 		DRM_ERROR("invalid capset memory.");
685 	}
686 	spin_unlock(&vgdev->display_info_lock);
687 	wake_up(&vgdev->resp_wq);
688 }
689 
690 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
691 				     struct virtio_gpu_vbuffer *vbuf)
692 {
693 	struct virtio_gpu_get_capset *cmd =
694 		(struct virtio_gpu_get_capset *)vbuf->buf;
695 	struct virtio_gpu_resp_capset *resp =
696 		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
697 	struct virtio_gpu_drv_cap_cache *cache_ent;
698 
699 	spin_lock(&vgdev->display_info_lock);
700 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
701 		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
702 		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
703 			memcpy(cache_ent->caps_cache, resp->capset_data,
704 			       cache_ent->size);
705 			/* Copy must occur before is_valid is signalled. */
706 			smp_wmb();
707 			atomic_set(&cache_ent->is_valid, 1);
708 			break;
709 		}
710 	}
711 	spin_unlock(&vgdev->display_info_lock);
712 	wake_up_all(&vgdev->resp_wq);
713 }
714 
715 static int virtio_get_edid_block(void *data, u8 *buf,
716 				 unsigned int block, size_t len)
717 {
718 	struct virtio_gpu_resp_edid *resp = data;
719 	size_t start = block * EDID_LENGTH;
720 
721 	if (start + len > le32_to_cpu(resp->size))
722 		return -1;
723 	memcpy(buf, resp->edid + start, len);
724 	return 0;
725 }
726 
727 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
728 				       struct virtio_gpu_vbuffer *vbuf)
729 {
730 	struct virtio_gpu_cmd_get_edid *cmd =
731 		(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
732 	struct virtio_gpu_resp_edid *resp =
733 		(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
734 	uint32_t scanout = le32_to_cpu(cmd->scanout);
735 	struct virtio_gpu_output *output;
736 	struct edid *new_edid, *old_edid;
737 
738 	if (scanout >= vgdev->num_scanouts)
739 		return;
740 	output = vgdev->outputs + scanout;
741 
742 	new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
743 	drm_connector_update_edid_property(&output->conn, new_edid);
744 
745 	spin_lock(&vgdev->display_info_lock);
746 	old_edid = output->edid;
747 	output->edid = new_edid;
748 	spin_unlock(&vgdev->display_info_lock);
749 
750 	kfree(old_edid);
751 	wake_up(&vgdev->resp_wq);
752 }
753 
754 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
755 {
756 	struct virtio_gpu_ctrl_hdr *cmd_p;
757 	struct virtio_gpu_vbuffer *vbuf;
758 	void *resp_buf;
759 
760 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
761 			   GFP_KERNEL);
762 	if (!resp_buf)
763 		return -ENOMEM;
764 
765 	cmd_p = virtio_gpu_alloc_cmd_resp
766 		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
767 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
768 		 resp_buf);
769 	memset(cmd_p, 0, sizeof(*cmd_p));
770 
771 	vgdev->display_info_pending = true;
772 	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
773 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
774 	return 0;
775 }
776 
777 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
778 {
779 	struct virtio_gpu_get_capset_info *cmd_p;
780 	struct virtio_gpu_vbuffer *vbuf;
781 	void *resp_buf;
782 
783 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
784 			   GFP_KERNEL);
785 	if (!resp_buf)
786 		return -ENOMEM;
787 
788 	cmd_p = virtio_gpu_alloc_cmd_resp
789 		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
790 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
791 		 resp_buf);
792 	memset(cmd_p, 0, sizeof(*cmd_p));
793 
794 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
795 	cmd_p->capset_index = cpu_to_le32(idx);
796 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
797 	return 0;
798 }
799 
800 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
801 			      int idx, int version,
802 			      struct virtio_gpu_drv_cap_cache **cache_p)
803 {
804 	struct virtio_gpu_get_capset *cmd_p;
805 	struct virtio_gpu_vbuffer *vbuf;
806 	int max_size;
807 	struct virtio_gpu_drv_cap_cache *cache_ent;
808 	struct virtio_gpu_drv_cap_cache *search_ent;
809 	void *resp_buf;
810 
811 	*cache_p = NULL;
812 
813 	if (idx >= vgdev->num_capsets)
814 		return -EINVAL;
815 
816 	if (version > vgdev->capsets[idx].max_version)
817 		return -EINVAL;
818 
819 	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
820 	if (!cache_ent)
821 		return -ENOMEM;
822 
823 	max_size = vgdev->capsets[idx].max_size;
824 	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
825 	if (!cache_ent->caps_cache) {
826 		kfree(cache_ent);
827 		return -ENOMEM;
828 	}
829 
830 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
831 			   GFP_KERNEL);
832 	if (!resp_buf) {
833 		kfree(cache_ent->caps_cache);
834 		kfree(cache_ent);
835 		return -ENOMEM;
836 	}
837 
838 	cache_ent->version = version;
839 	cache_ent->id = vgdev->capsets[idx].id;
840 	atomic_set(&cache_ent->is_valid, 0);
841 	cache_ent->size = max_size;
842 	spin_lock(&vgdev->display_info_lock);
843 	/* Search while under lock in case it was added by another task. */
844 	list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
845 		if (search_ent->id == vgdev->capsets[idx].id &&
846 		    search_ent->version == version) {
847 			*cache_p = search_ent;
848 			break;
849 		}
850 	}
851 	if (!*cache_p)
852 		list_add_tail(&cache_ent->head, &vgdev->cap_cache);
853 	spin_unlock(&vgdev->display_info_lock);
854 
855 	if (*cache_p) {
856 		/* Entry was found, so free everything that was just created. */
857 		kfree(resp_buf);
858 		kfree(cache_ent->caps_cache);
859 		kfree(cache_ent);
860 		return 0;
861 	}
862 
863 	cmd_p = virtio_gpu_alloc_cmd_resp
864 		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
865 		 sizeof(struct virtio_gpu_resp_capset) + max_size,
866 		 resp_buf);
867 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
868 	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
869 	cmd_p->capset_version = cpu_to_le32(version);
870 	*cache_p = cache_ent;
871 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
872 
873 	return 0;
874 }
875 
876 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
877 {
878 	struct virtio_gpu_cmd_get_edid *cmd_p;
879 	struct virtio_gpu_vbuffer *vbuf;
880 	void *resp_buf;
881 	int scanout;
882 
883 	if (WARN_ON(!vgdev->has_edid))
884 		return -EINVAL;
885 
886 	for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
887 		resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
888 				   GFP_KERNEL);
889 		if (!resp_buf)
890 			return -ENOMEM;
891 
892 		cmd_p = virtio_gpu_alloc_cmd_resp
893 			(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
894 			 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
895 			 resp_buf);
896 		cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
897 		cmd_p->scanout = cpu_to_le32(scanout);
898 		virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
899 	}
900 
901 	return 0;
902 }
903 
904 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
905 				   uint32_t context_init, uint32_t nlen,
906 				   const char *name)
907 {
908 	struct virtio_gpu_ctx_create *cmd_p;
909 	struct virtio_gpu_vbuffer *vbuf;
910 
911 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
912 	memset(cmd_p, 0, sizeof(*cmd_p));
913 
914 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
915 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
916 	cmd_p->nlen = cpu_to_le32(nlen);
917 	cmd_p->context_init = cpu_to_le32(context_init);
918 	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
919 	cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
920 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
921 }
922 
923 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
924 				    uint32_t id)
925 {
926 	struct virtio_gpu_ctx_destroy *cmd_p;
927 	struct virtio_gpu_vbuffer *vbuf;
928 
929 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
930 	memset(cmd_p, 0, sizeof(*cmd_p));
931 
932 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
933 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
934 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
935 }
936 
937 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
938 					    uint32_t ctx_id,
939 					    struct virtio_gpu_object_array *objs)
940 {
941 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
942 	struct virtio_gpu_ctx_resource *cmd_p;
943 	struct virtio_gpu_vbuffer *vbuf;
944 
945 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
946 	memset(cmd_p, 0, sizeof(*cmd_p));
947 	vbuf->objs = objs;
948 
949 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
950 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
951 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
952 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
953 }
954 
955 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
956 					    uint32_t ctx_id,
957 					    struct virtio_gpu_object_array *objs)
958 {
959 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
960 	struct virtio_gpu_ctx_resource *cmd_p;
961 	struct virtio_gpu_vbuffer *vbuf;
962 
963 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
964 	memset(cmd_p, 0, sizeof(*cmd_p));
965 	vbuf->objs = objs;
966 
967 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
968 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
969 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
970 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
971 }
972 
973 void
974 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
975 				  struct virtio_gpu_object *bo,
976 				  struct virtio_gpu_object_params *params,
977 				  struct virtio_gpu_object_array *objs,
978 				  struct virtio_gpu_fence *fence)
979 {
980 	struct virtio_gpu_resource_create_3d *cmd_p;
981 	struct virtio_gpu_vbuffer *vbuf;
982 
983 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
984 	memset(cmd_p, 0, sizeof(*cmd_p));
985 	vbuf->objs = objs;
986 
987 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
988 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
989 	cmd_p->format = cpu_to_le32(params->format);
990 	cmd_p->width = cpu_to_le32(params->width);
991 	cmd_p->height = cpu_to_le32(params->height);
992 
993 	cmd_p->target = cpu_to_le32(params->target);
994 	cmd_p->bind = cpu_to_le32(params->bind);
995 	cmd_p->depth = cpu_to_le32(params->depth);
996 	cmd_p->array_size = cpu_to_le32(params->array_size);
997 	cmd_p->last_level = cpu_to_le32(params->last_level);
998 	cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
999 	cmd_p->flags = cpu_to_le32(params->flags);
1000 
1001 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1002 
1003 	bo->created = true;
1004 }
1005 
1006 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1007 					uint32_t ctx_id,
1008 					uint64_t offset, uint32_t level,
1009 					uint32_t stride,
1010 					uint32_t layer_stride,
1011 					struct drm_virtgpu_3d_box *box,
1012 					struct virtio_gpu_object_array *objs,
1013 					struct virtio_gpu_fence *fence)
1014 {
1015 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1016 	struct virtio_gpu_transfer_host_3d *cmd_p;
1017 	struct virtio_gpu_vbuffer *vbuf;
1018 	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1019 
1020 	if (virtio_gpu_is_shmem(bo) && use_dma_api) {
1021 		struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
1022 		dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
1023 					    shmem->pages, DMA_TO_DEVICE);
1024 	}
1025 
1026 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1027 	memset(cmd_p, 0, sizeof(*cmd_p));
1028 
1029 	vbuf->objs = objs;
1030 
1031 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1032 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1033 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1034 	convert_to_hw_box(&cmd_p->box, box);
1035 	cmd_p->offset = cpu_to_le64(offset);
1036 	cmd_p->level = cpu_to_le32(level);
1037 	cmd_p->stride = cpu_to_le32(stride);
1038 	cmd_p->layer_stride = cpu_to_le32(layer_stride);
1039 
1040 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1041 }
1042 
1043 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1044 					  uint32_t ctx_id,
1045 					  uint64_t offset, uint32_t level,
1046 					  uint32_t stride,
1047 					  uint32_t layer_stride,
1048 					  struct drm_virtgpu_3d_box *box,
1049 					  struct virtio_gpu_object_array *objs,
1050 					  struct virtio_gpu_fence *fence)
1051 {
1052 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1053 	struct virtio_gpu_transfer_host_3d *cmd_p;
1054 	struct virtio_gpu_vbuffer *vbuf;
1055 
1056 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1057 	memset(cmd_p, 0, sizeof(*cmd_p));
1058 
1059 	vbuf->objs = objs;
1060 
1061 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1062 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1063 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1064 	convert_to_hw_box(&cmd_p->box, box);
1065 	cmd_p->offset = cpu_to_le64(offset);
1066 	cmd_p->level = cpu_to_le32(level);
1067 	cmd_p->stride = cpu_to_le32(stride);
1068 	cmd_p->layer_stride = cpu_to_le32(layer_stride);
1069 
1070 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1071 }
1072 
1073 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1074 			   void *data, uint32_t data_size,
1075 			   uint32_t ctx_id,
1076 			   struct virtio_gpu_object_array *objs,
1077 			   struct virtio_gpu_fence *fence)
1078 {
1079 	struct virtio_gpu_cmd_submit *cmd_p;
1080 	struct virtio_gpu_vbuffer *vbuf;
1081 
1082 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1083 	memset(cmd_p, 0, sizeof(*cmd_p));
1084 
1085 	vbuf->data_buf = data;
1086 	vbuf->data_size = data_size;
1087 	vbuf->objs = objs;
1088 
1089 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1090 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1091 	cmd_p->size = cpu_to_le32(data_size);
1092 
1093 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1094 }
1095 
1096 void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1097 			      struct virtio_gpu_object *obj,
1098 			      struct virtio_gpu_mem_entry *ents,
1099 			      unsigned int nents)
1100 {
1101 	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1102 					       ents, nents, NULL);
1103 }
1104 
1105 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1106 			    struct virtio_gpu_output *output)
1107 {
1108 	struct virtio_gpu_vbuffer *vbuf;
1109 	struct virtio_gpu_update_cursor *cur_p;
1110 
1111 	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1112 	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1113 	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1114 	virtio_gpu_queue_cursor(vgdev, vbuf);
1115 }
1116 
1117 static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1118 					    struct virtio_gpu_vbuffer *vbuf)
1119 {
1120 	struct virtio_gpu_object *obj =
1121 		gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1122 	struct virtio_gpu_resp_resource_uuid *resp =
1123 		(struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
1124 	uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1125 
1126 	spin_lock(&vgdev->resource_export_lock);
1127 	WARN_ON(obj->uuid_state != STATE_INITIALIZING);
1128 
1129 	if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
1130 	    obj->uuid_state == STATE_INITIALIZING) {
1131 		import_uuid(&obj->uuid, resp->uuid);
1132 		obj->uuid_state = STATE_OK;
1133 	} else {
1134 		obj->uuid_state = STATE_ERR;
1135 	}
1136 	spin_unlock(&vgdev->resource_export_lock);
1137 
1138 	wake_up_all(&vgdev->resp_wq);
1139 }
1140 
1141 int
1142 virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1143 				    struct virtio_gpu_object_array *objs)
1144 {
1145 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1146 	struct virtio_gpu_resource_assign_uuid *cmd_p;
1147 	struct virtio_gpu_vbuffer *vbuf;
1148 	struct virtio_gpu_resp_resource_uuid *resp_buf;
1149 
1150 	resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1151 	if (!resp_buf) {
1152 		spin_lock(&vgdev->resource_export_lock);
1153 		bo->uuid_state = STATE_ERR;
1154 		spin_unlock(&vgdev->resource_export_lock);
1155 		virtio_gpu_array_put_free(objs);
1156 		return -ENOMEM;
1157 	}
1158 
1159 	cmd_p = virtio_gpu_alloc_cmd_resp
1160 		(vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1161 		 sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
1162 	memset(cmd_p, 0, sizeof(*cmd_p));
1163 
1164 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
1165 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1166 
1167 	vbuf->objs = objs;
1168 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1169 	return 0;
1170 }
1171 
1172 static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
1173 					   struct virtio_gpu_vbuffer *vbuf)
1174 {
1175 	struct virtio_gpu_object *bo =
1176 		gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1177 	struct virtio_gpu_resp_map_info *resp =
1178 		(struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
1179 	struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
1180 	uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1181 
1182 	spin_lock(&vgdev->host_visible_lock);
1183 
1184 	if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
1185 		vram->map_info = resp->map_info;
1186 		vram->map_state = STATE_OK;
1187 	} else {
1188 		vram->map_state = STATE_ERR;
1189 	}
1190 
1191 	spin_unlock(&vgdev->host_visible_lock);
1192 	wake_up_all(&vgdev->resp_wq);
1193 }
1194 
1195 int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
1196 		       struct virtio_gpu_object_array *objs, uint64_t offset)
1197 {
1198 	struct virtio_gpu_resource_map_blob *cmd_p;
1199 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1200 	struct virtio_gpu_vbuffer *vbuf;
1201 	struct virtio_gpu_resp_map_info *resp_buf;
1202 
1203 	resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1204 	if (!resp_buf)
1205 		return -ENOMEM;
1206 
1207 	cmd_p = virtio_gpu_alloc_cmd_resp
1208 		(vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
1209 		 sizeof(struct virtio_gpu_resp_map_info), resp_buf);
1210 	memset(cmd_p, 0, sizeof(*cmd_p));
1211 
1212 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
1213 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1214 	cmd_p->offset = cpu_to_le64(offset);
1215 	vbuf->objs = objs;
1216 
1217 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1218 	return 0;
1219 }
1220 
1221 void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
1222 			  struct virtio_gpu_object *bo)
1223 {
1224 	struct virtio_gpu_resource_unmap_blob *cmd_p;
1225 	struct virtio_gpu_vbuffer *vbuf;
1226 
1227 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1228 	memset(cmd_p, 0, sizeof(*cmd_p));
1229 
1230 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
1231 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1232 
1233 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1234 }
1235 
1236 void
1237 virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
1238 				    struct virtio_gpu_object *bo,
1239 				    struct virtio_gpu_object_params *params,
1240 				    struct virtio_gpu_mem_entry *ents,
1241 				    uint32_t nents)
1242 {
1243 	struct virtio_gpu_resource_create_blob *cmd_p;
1244 	struct virtio_gpu_vbuffer *vbuf;
1245 
1246 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1247 	memset(cmd_p, 0, sizeof(*cmd_p));
1248 
1249 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
1250 	cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
1251 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1252 	cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
1253 	cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
1254 	cmd_p->blob_id = cpu_to_le64(params->blob_id);
1255 	cmd_p->size = cpu_to_le64(params->size);
1256 	cmd_p->nr_entries = cpu_to_le32(nents);
1257 
1258 	vbuf->data_buf = ents;
1259 	vbuf->data_size = sizeof(*ents) * nents;
1260 
1261 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1262 	bo->created = true;
1263 }
1264 
1265 void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
1266 				     uint32_t scanout_id,
1267 				     struct virtio_gpu_object *bo,
1268 				     struct drm_framebuffer *fb,
1269 				     uint32_t width, uint32_t height,
1270 				     uint32_t x, uint32_t y)
1271 {
1272 	uint32_t i;
1273 	struct virtio_gpu_set_scanout_blob *cmd_p;
1274 	struct virtio_gpu_vbuffer *vbuf;
1275 	uint32_t format = virtio_gpu_translate_format(fb->format->format);
1276 
1277 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1278 	memset(cmd_p, 0, sizeof(*cmd_p));
1279 
1280 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
1281 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1282 	cmd_p->scanout_id = cpu_to_le32(scanout_id);
1283 
1284 	cmd_p->format = cpu_to_le32(format);
1285 	cmd_p->width  = cpu_to_le32(fb->width);
1286 	cmd_p->height = cpu_to_le32(fb->height);
1287 
1288 	for (i = 0; i < 4; i++) {
1289 		cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
1290 		cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
1291 	}
1292 
1293 	cmd_p->r.width = cpu_to_le32(width);
1294 	cmd_p->r.height = cpu_to_le32(height);
1295 	cmd_p->r.x = cpu_to_le32(x);
1296 	cmd_p->r.y = cpu_to_le32(y);
1297 
1298 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1299 }
1300