1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie <airlied@redhat.com>
7  *    Gerd Hoffmann <kraxel@redhat.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26  * OTHER DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
33 
34 #include <drm/drm_edid.h>
35 
36 #include "virtgpu_drv.h"
37 #include "virtgpu_trace.h"
38 
39 #define MAX_INLINE_CMD_SIZE   96
40 #define MAX_INLINE_RESP_SIZE  24
41 #define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
42 			       + MAX_INLINE_CMD_SIZE		 \
43 			       + MAX_INLINE_RESP_SIZE)
44 
45 static void convert_to_hw_box(struct virtio_gpu_box *dst,
46 			      const struct drm_virtgpu_3d_box *src)
47 {
48 	dst->x = cpu_to_le32(src->x);
49 	dst->y = cpu_to_le32(src->y);
50 	dst->z = cpu_to_le32(src->z);
51 	dst->w = cpu_to_le32(src->w);
52 	dst->h = cpu_to_le32(src->h);
53 	dst->d = cpu_to_le32(src->d);
54 }
55 
56 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
57 {
58 	struct drm_device *dev = vq->vdev->priv;
59 	struct virtio_gpu_device *vgdev = dev->dev_private;
60 
61 	schedule_work(&vgdev->ctrlq.dequeue_work);
62 }
63 
64 void virtio_gpu_cursor_ack(struct virtqueue *vq)
65 {
66 	struct drm_device *dev = vq->vdev->priv;
67 	struct virtio_gpu_device *vgdev = dev->dev_private;
68 
69 	schedule_work(&vgdev->cursorq.dequeue_work);
70 }
71 
72 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
73 {
74 	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
75 					 VBUFFER_SIZE,
76 					 __alignof__(struct virtio_gpu_vbuffer),
77 					 0, NULL);
78 	if (!vgdev->vbufs)
79 		return -ENOMEM;
80 	return 0;
81 }
82 
83 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
84 {
85 	kmem_cache_destroy(vgdev->vbufs);
86 	vgdev->vbufs = NULL;
87 }
88 
89 static struct virtio_gpu_vbuffer*
90 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
91 		    int size, int resp_size, void *resp_buf,
92 		    virtio_gpu_resp_cb resp_cb)
93 {
94 	struct virtio_gpu_vbuffer *vbuf;
95 
96 	vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
97 
98 	BUG_ON(size > MAX_INLINE_CMD_SIZE ||
99 	       size < sizeof(struct virtio_gpu_ctrl_hdr));
100 	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
101 	vbuf->size = size;
102 
103 	vbuf->resp_cb = resp_cb;
104 	vbuf->resp_size = resp_size;
105 	if (resp_size <= MAX_INLINE_RESP_SIZE)
106 		vbuf->resp_buf = (void *)vbuf->buf + size;
107 	else
108 		vbuf->resp_buf = resp_buf;
109 	BUG_ON(!vbuf->resp_buf);
110 	return vbuf;
111 }
112 
113 static struct virtio_gpu_ctrl_hdr *
114 virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
115 {
116 	/* this assumes a vbuf contains a command that starts with a
117 	 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
118 	 * virtqueues.
119 	 */
120 	return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
121 }
122 
123 static struct virtio_gpu_update_cursor*
124 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
125 			struct virtio_gpu_vbuffer **vbuffer_p)
126 {
127 	struct virtio_gpu_vbuffer *vbuf;
128 
129 	vbuf = virtio_gpu_get_vbuf
130 		(vgdev, sizeof(struct virtio_gpu_update_cursor),
131 		 0, NULL, NULL);
132 	if (IS_ERR(vbuf)) {
133 		*vbuffer_p = NULL;
134 		return ERR_CAST(vbuf);
135 	}
136 	*vbuffer_p = vbuf;
137 	return (struct virtio_gpu_update_cursor *)vbuf->buf;
138 }
139 
140 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
141 				       virtio_gpu_resp_cb cb,
142 				       struct virtio_gpu_vbuffer **vbuffer_p,
143 				       int cmd_size, int resp_size,
144 				       void *resp_buf)
145 {
146 	struct virtio_gpu_vbuffer *vbuf;
147 
148 	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
149 				   resp_size, resp_buf, cb);
150 	*vbuffer_p = vbuf;
151 	return (struct virtio_gpu_command *)vbuf->buf;
152 }
153 
154 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
155 				  struct virtio_gpu_vbuffer **vbuffer_p,
156 				  int size)
157 {
158 	return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
159 					 sizeof(struct virtio_gpu_ctrl_hdr),
160 					 NULL);
161 }
162 
163 static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
164 				     struct virtio_gpu_vbuffer **vbuffer_p,
165 				     int size,
166 				     virtio_gpu_resp_cb cb)
167 {
168 	return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
169 					 sizeof(struct virtio_gpu_ctrl_hdr),
170 					 NULL);
171 }
172 
173 static void free_vbuf(struct virtio_gpu_device *vgdev,
174 		      struct virtio_gpu_vbuffer *vbuf)
175 {
176 	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
177 		kfree(vbuf->resp_buf);
178 	kvfree(vbuf->data_buf);
179 	kmem_cache_free(vgdev->vbufs, vbuf);
180 }
181 
182 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
183 {
184 	struct virtio_gpu_vbuffer *vbuf;
185 	unsigned int len;
186 	int freed = 0;
187 
188 	while ((vbuf = virtqueue_get_buf(vq, &len))) {
189 		list_add_tail(&vbuf->list, reclaim_list);
190 		freed++;
191 	}
192 	if (freed == 0)
193 		DRM_DEBUG("Huh? zero vbufs reclaimed");
194 }
195 
196 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
197 {
198 	struct virtio_gpu_device *vgdev =
199 		container_of(work, struct virtio_gpu_device,
200 			     ctrlq.dequeue_work);
201 	struct list_head reclaim_list;
202 	struct virtio_gpu_vbuffer *entry, *tmp;
203 	struct virtio_gpu_ctrl_hdr *resp;
204 	u64 fence_id;
205 
206 	INIT_LIST_HEAD(&reclaim_list);
207 	spin_lock(&vgdev->ctrlq.qlock);
208 	do {
209 		virtqueue_disable_cb(vgdev->ctrlq.vq);
210 		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
211 
212 	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
213 	spin_unlock(&vgdev->ctrlq.qlock);
214 
215 	list_for_each_entry(entry, &reclaim_list, list) {
216 		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
217 
218 		trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
219 
220 		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
221 			if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
222 				struct virtio_gpu_ctrl_hdr *cmd;
223 				cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
224 				DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
225 						      le32_to_cpu(resp->type),
226 						      le32_to_cpu(cmd->type));
227 			} else
228 				DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
229 		}
230 		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
231 			fence_id = le64_to_cpu(resp->fence_id);
232 			virtio_gpu_fence_event_process(vgdev, fence_id);
233 		}
234 		if (entry->resp_cb)
235 			entry->resp_cb(vgdev, entry);
236 	}
237 	wake_up(&vgdev->ctrlq.ack_queue);
238 
239 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
240 		if (entry->objs)
241 			virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
242 		list_del(&entry->list);
243 		free_vbuf(vgdev, entry);
244 	}
245 }
246 
247 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
248 {
249 	struct virtio_gpu_device *vgdev =
250 		container_of(work, struct virtio_gpu_device,
251 			     cursorq.dequeue_work);
252 	struct list_head reclaim_list;
253 	struct virtio_gpu_vbuffer *entry, *tmp;
254 
255 	INIT_LIST_HEAD(&reclaim_list);
256 	spin_lock(&vgdev->cursorq.qlock);
257 	do {
258 		virtqueue_disable_cb(vgdev->cursorq.vq);
259 		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
260 	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
261 	spin_unlock(&vgdev->cursorq.qlock);
262 
263 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
264 		list_del(&entry->list);
265 		free_vbuf(vgdev, entry);
266 	}
267 	wake_up(&vgdev->cursorq.ack_queue);
268 }
269 
270 /* Create sg_table from a vmalloc'd buffer. */
271 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
272 {
273 	int ret, s, i;
274 	struct sg_table *sgt;
275 	struct scatterlist *sg;
276 	struct page *pg;
277 
278 	if (WARN_ON(!PAGE_ALIGNED(data)))
279 		return NULL;
280 
281 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
282 	if (!sgt)
283 		return NULL;
284 
285 	*sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
286 	ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
287 	if (ret) {
288 		kfree(sgt);
289 		return NULL;
290 	}
291 
292 	for_each_sgtable_sg(sgt, sg, i) {
293 		pg = vmalloc_to_page(data);
294 		if (!pg) {
295 			sg_free_table(sgt);
296 			kfree(sgt);
297 			return NULL;
298 		}
299 
300 		s = min_t(int, PAGE_SIZE, size);
301 		sg_set_page(sg, pg, s, 0);
302 
303 		size -= s;
304 		data += s;
305 	}
306 
307 	return sgt;
308 }
309 
310 static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
311 				     struct virtio_gpu_vbuffer *vbuf,
312 				     struct virtio_gpu_fence *fence,
313 				     int elemcnt,
314 				     struct scatterlist **sgs,
315 				     int outcnt,
316 				     int incnt)
317 {
318 	struct virtqueue *vq = vgdev->ctrlq.vq;
319 	int ret, idx;
320 
321 	if (!drm_dev_enter(vgdev->ddev, &idx)) {
322 		if (fence && vbuf->objs)
323 			virtio_gpu_array_unlock_resv(vbuf->objs);
324 		free_vbuf(vgdev, vbuf);
325 		return -1;
326 	}
327 
328 	if (vgdev->has_indirect)
329 		elemcnt = 1;
330 
331 again:
332 	spin_lock(&vgdev->ctrlq.qlock);
333 
334 	if (vq->num_free < elemcnt) {
335 		spin_unlock(&vgdev->ctrlq.qlock);
336 		virtio_gpu_notify(vgdev);
337 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
338 		goto again;
339 	}
340 
341 	/* now that the position of the vbuf in the virtqueue is known, we can
342 	 * finally set the fence id
343 	 */
344 	if (fence) {
345 		virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
346 				      fence);
347 		if (vbuf->objs) {
348 			virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
349 			virtio_gpu_array_unlock_resv(vbuf->objs);
350 		}
351 	}
352 
353 	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
354 	WARN_ON(ret);
355 
356 	trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
357 
358 	atomic_inc(&vgdev->pending_commands);
359 
360 	spin_unlock(&vgdev->ctrlq.qlock);
361 
362 	drm_dev_exit(idx);
363 	return 0;
364 }
365 
366 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
367 					       struct virtio_gpu_vbuffer *vbuf,
368 					       struct virtio_gpu_fence *fence)
369 {
370 	struct scatterlist *sgs[3], vcmd, vout, vresp;
371 	struct sg_table *sgt = NULL;
372 	int elemcnt = 0, outcnt = 0, incnt = 0, ret;
373 
374 	/* set up vcmd */
375 	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
376 	elemcnt++;
377 	sgs[outcnt] = &vcmd;
378 	outcnt++;
379 
380 	/* set up vout */
381 	if (vbuf->data_size) {
382 		if (is_vmalloc_addr(vbuf->data_buf)) {
383 			int sg_ents;
384 			sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
385 					     &sg_ents);
386 			if (!sgt) {
387 				if (fence && vbuf->objs)
388 					virtio_gpu_array_unlock_resv(vbuf->objs);
389 				return -1;
390 			}
391 
392 			elemcnt += sg_ents;
393 			sgs[outcnt] = sgt->sgl;
394 		} else {
395 			sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
396 			elemcnt++;
397 			sgs[outcnt] = &vout;
398 		}
399 		outcnt++;
400 	}
401 
402 	/* set up vresp */
403 	if (vbuf->resp_size) {
404 		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
405 		elemcnt++;
406 		sgs[outcnt + incnt] = &vresp;
407 		incnt++;
408 	}
409 
410 	ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
411 					incnt);
412 
413 	if (sgt) {
414 		sg_free_table(sgt);
415 		kfree(sgt);
416 	}
417 	return ret;
418 }
419 
420 void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
421 {
422 	bool notify;
423 
424 	if (!atomic_read(&vgdev->pending_commands))
425 		return;
426 
427 	spin_lock(&vgdev->ctrlq.qlock);
428 	atomic_set(&vgdev->pending_commands, 0);
429 	notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
430 	spin_unlock(&vgdev->ctrlq.qlock);
431 
432 	if (notify)
433 		virtqueue_notify(vgdev->ctrlq.vq);
434 }
435 
436 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
437 					struct virtio_gpu_vbuffer *vbuf)
438 {
439 	return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
440 }
441 
442 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
443 				    struct virtio_gpu_vbuffer *vbuf)
444 {
445 	struct virtqueue *vq = vgdev->cursorq.vq;
446 	struct scatterlist *sgs[1], ccmd;
447 	int idx, ret, outcnt;
448 	bool notify;
449 
450 	if (!drm_dev_enter(vgdev->ddev, &idx)) {
451 		free_vbuf(vgdev, vbuf);
452 		return;
453 	}
454 
455 	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
456 	sgs[0] = &ccmd;
457 	outcnt = 1;
458 
459 	spin_lock(&vgdev->cursorq.qlock);
460 retry:
461 	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
462 	if (ret == -ENOSPC) {
463 		spin_unlock(&vgdev->cursorq.qlock);
464 		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
465 		spin_lock(&vgdev->cursorq.qlock);
466 		goto retry;
467 	} else {
468 		trace_virtio_gpu_cmd_queue(vq,
469 			virtio_gpu_vbuf_ctrl_hdr(vbuf));
470 
471 		notify = virtqueue_kick_prepare(vq);
472 	}
473 
474 	spin_unlock(&vgdev->cursorq.qlock);
475 
476 	if (notify)
477 		virtqueue_notify(vq);
478 
479 	drm_dev_exit(idx);
480 }
481 
482 /* just create gem objects for userspace and long lived objects,
483  * just use dma_alloced pages for the queue objects?
484  */
485 
486 /* create a basic resource */
487 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
488 				    struct virtio_gpu_object *bo,
489 				    struct virtio_gpu_object_params *params,
490 				    struct virtio_gpu_object_array *objs,
491 				    struct virtio_gpu_fence *fence)
492 {
493 	struct virtio_gpu_resource_create_2d *cmd_p;
494 	struct virtio_gpu_vbuffer *vbuf;
495 
496 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
497 	memset(cmd_p, 0, sizeof(*cmd_p));
498 	vbuf->objs = objs;
499 
500 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
501 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
502 	cmd_p->format = cpu_to_le32(params->format);
503 	cmd_p->width = cpu_to_le32(params->width);
504 	cmd_p->height = cpu_to_le32(params->height);
505 
506 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
507 	bo->created = true;
508 }
509 
510 static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
511 				    struct virtio_gpu_vbuffer *vbuf)
512 {
513 	struct virtio_gpu_object *bo;
514 
515 	bo = vbuf->resp_cb_data;
516 	vbuf->resp_cb_data = NULL;
517 
518 	virtio_gpu_cleanup_object(bo);
519 }
520 
521 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
522 				   struct virtio_gpu_object *bo)
523 {
524 	struct virtio_gpu_resource_unref *cmd_p;
525 	struct virtio_gpu_vbuffer *vbuf;
526 	int ret;
527 
528 	cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
529 					virtio_gpu_cmd_unref_cb);
530 	memset(cmd_p, 0, sizeof(*cmd_p));
531 
532 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
533 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
534 
535 	vbuf->resp_cb_data = bo;
536 	ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
537 	if (ret < 0)
538 		virtio_gpu_cleanup_object(bo);
539 }
540 
541 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
542 				uint32_t scanout_id, uint32_t resource_id,
543 				uint32_t width, uint32_t height,
544 				uint32_t x, uint32_t y)
545 {
546 	struct virtio_gpu_set_scanout *cmd_p;
547 	struct virtio_gpu_vbuffer *vbuf;
548 
549 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
550 	memset(cmd_p, 0, sizeof(*cmd_p));
551 
552 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
553 	cmd_p->resource_id = cpu_to_le32(resource_id);
554 	cmd_p->scanout_id = cpu_to_le32(scanout_id);
555 	cmd_p->r.width = cpu_to_le32(width);
556 	cmd_p->r.height = cpu_to_le32(height);
557 	cmd_p->r.x = cpu_to_le32(x);
558 	cmd_p->r.y = cpu_to_le32(y);
559 
560 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
561 }
562 
563 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
564 				   uint32_t resource_id,
565 				   uint32_t x, uint32_t y,
566 				   uint32_t width, uint32_t height,
567 				   struct virtio_gpu_object_array *objs,
568 				   struct virtio_gpu_fence *fence)
569 {
570 	struct virtio_gpu_resource_flush *cmd_p;
571 	struct virtio_gpu_vbuffer *vbuf;
572 
573 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
574 	memset(cmd_p, 0, sizeof(*cmd_p));
575 	vbuf->objs = objs;
576 
577 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
578 	cmd_p->resource_id = cpu_to_le32(resource_id);
579 	cmd_p->r.width = cpu_to_le32(width);
580 	cmd_p->r.height = cpu_to_le32(height);
581 	cmd_p->r.x = cpu_to_le32(x);
582 	cmd_p->r.y = cpu_to_le32(y);
583 
584 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
585 }
586 
587 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
588 					uint64_t offset,
589 					uint32_t width, uint32_t height,
590 					uint32_t x, uint32_t y,
591 					struct virtio_gpu_object_array *objs,
592 					struct virtio_gpu_fence *fence)
593 {
594 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
595 	struct virtio_gpu_transfer_to_host_2d *cmd_p;
596 	struct virtio_gpu_vbuffer *vbuf;
597 	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
598 	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
599 
600 	if (virtio_gpu_is_shmem(bo) && use_dma_api)
601 		dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
602 					    shmem->pages, DMA_TO_DEVICE);
603 
604 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
605 	memset(cmd_p, 0, sizeof(*cmd_p));
606 	vbuf->objs = objs;
607 
608 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
609 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
610 	cmd_p->offset = cpu_to_le64(offset);
611 	cmd_p->r.width = cpu_to_le32(width);
612 	cmd_p->r.height = cpu_to_le32(height);
613 	cmd_p->r.x = cpu_to_le32(x);
614 	cmd_p->r.y = cpu_to_le32(y);
615 
616 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
617 }
618 
619 static void
620 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
621 				       uint32_t resource_id,
622 				       struct virtio_gpu_mem_entry *ents,
623 				       uint32_t nents,
624 				       struct virtio_gpu_fence *fence)
625 {
626 	struct virtio_gpu_resource_attach_backing *cmd_p;
627 	struct virtio_gpu_vbuffer *vbuf;
628 
629 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
630 	memset(cmd_p, 0, sizeof(*cmd_p));
631 
632 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
633 	cmd_p->resource_id = cpu_to_le32(resource_id);
634 	cmd_p->nr_entries = cpu_to_le32(nents);
635 
636 	vbuf->data_buf = ents;
637 	vbuf->data_size = sizeof(*ents) * nents;
638 
639 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
640 }
641 
642 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
643 					       struct virtio_gpu_vbuffer *vbuf)
644 {
645 	struct virtio_gpu_resp_display_info *resp =
646 		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
647 	int i;
648 
649 	spin_lock(&vgdev->display_info_lock);
650 	for (i = 0; i < vgdev->num_scanouts; i++) {
651 		vgdev->outputs[i].info = resp->pmodes[i];
652 		if (resp->pmodes[i].enabled) {
653 			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
654 				  le32_to_cpu(resp->pmodes[i].r.width),
655 				  le32_to_cpu(resp->pmodes[i].r.height),
656 				  le32_to_cpu(resp->pmodes[i].r.x),
657 				  le32_to_cpu(resp->pmodes[i].r.y));
658 		} else {
659 			DRM_DEBUG("output %d: disabled", i);
660 		}
661 	}
662 
663 	vgdev->display_info_pending = false;
664 	spin_unlock(&vgdev->display_info_lock);
665 	wake_up(&vgdev->resp_wq);
666 
667 	if (!drm_helper_hpd_irq_event(vgdev->ddev))
668 		drm_kms_helper_hotplug_event(vgdev->ddev);
669 }
670 
671 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
672 					      struct virtio_gpu_vbuffer *vbuf)
673 {
674 	struct virtio_gpu_get_capset_info *cmd =
675 		(struct virtio_gpu_get_capset_info *)vbuf->buf;
676 	struct virtio_gpu_resp_capset_info *resp =
677 		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
678 	int i = le32_to_cpu(cmd->capset_index);
679 
680 	spin_lock(&vgdev->display_info_lock);
681 	if (vgdev->capsets) {
682 		vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
683 		vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
684 		vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
685 	} else {
686 		DRM_ERROR("invalid capset memory.");
687 	}
688 	spin_unlock(&vgdev->display_info_lock);
689 	wake_up(&vgdev->resp_wq);
690 }
691 
692 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
693 				     struct virtio_gpu_vbuffer *vbuf)
694 {
695 	struct virtio_gpu_get_capset *cmd =
696 		(struct virtio_gpu_get_capset *)vbuf->buf;
697 	struct virtio_gpu_resp_capset *resp =
698 		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
699 	struct virtio_gpu_drv_cap_cache *cache_ent;
700 
701 	spin_lock(&vgdev->display_info_lock);
702 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
703 		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
704 		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
705 			memcpy(cache_ent->caps_cache, resp->capset_data,
706 			       cache_ent->size);
707 			/* Copy must occur before is_valid is signalled. */
708 			smp_wmb();
709 			atomic_set(&cache_ent->is_valid, 1);
710 			break;
711 		}
712 	}
713 	spin_unlock(&vgdev->display_info_lock);
714 	wake_up_all(&vgdev->resp_wq);
715 }
716 
717 static int virtio_get_edid_block(void *data, u8 *buf,
718 				 unsigned int block, size_t len)
719 {
720 	struct virtio_gpu_resp_edid *resp = data;
721 	size_t start = block * EDID_LENGTH;
722 
723 	if (start + len > le32_to_cpu(resp->size))
724 		return -1;
725 	memcpy(buf, resp->edid + start, len);
726 	return 0;
727 }
728 
729 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
730 				       struct virtio_gpu_vbuffer *vbuf)
731 {
732 	struct virtio_gpu_cmd_get_edid *cmd =
733 		(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
734 	struct virtio_gpu_resp_edid *resp =
735 		(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
736 	uint32_t scanout = le32_to_cpu(cmd->scanout);
737 	struct virtio_gpu_output *output;
738 	struct edid *new_edid, *old_edid;
739 
740 	if (scanout >= vgdev->num_scanouts)
741 		return;
742 	output = vgdev->outputs + scanout;
743 
744 	new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
745 	drm_connector_update_edid_property(&output->conn, new_edid);
746 
747 	spin_lock(&vgdev->display_info_lock);
748 	old_edid = output->edid;
749 	output->edid = new_edid;
750 	spin_unlock(&vgdev->display_info_lock);
751 
752 	kfree(old_edid);
753 	wake_up(&vgdev->resp_wq);
754 }
755 
756 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
757 {
758 	struct virtio_gpu_ctrl_hdr *cmd_p;
759 	struct virtio_gpu_vbuffer *vbuf;
760 	void *resp_buf;
761 
762 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
763 			   GFP_KERNEL);
764 	if (!resp_buf)
765 		return -ENOMEM;
766 
767 	cmd_p = virtio_gpu_alloc_cmd_resp
768 		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
769 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
770 		 resp_buf);
771 	memset(cmd_p, 0, sizeof(*cmd_p));
772 
773 	vgdev->display_info_pending = true;
774 	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
775 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
776 	return 0;
777 }
778 
779 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
780 {
781 	struct virtio_gpu_get_capset_info *cmd_p;
782 	struct virtio_gpu_vbuffer *vbuf;
783 	void *resp_buf;
784 
785 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
786 			   GFP_KERNEL);
787 	if (!resp_buf)
788 		return -ENOMEM;
789 
790 	cmd_p = virtio_gpu_alloc_cmd_resp
791 		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
792 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
793 		 resp_buf);
794 	memset(cmd_p, 0, sizeof(*cmd_p));
795 
796 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
797 	cmd_p->capset_index = cpu_to_le32(idx);
798 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
799 	return 0;
800 }
801 
802 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
803 			      int idx, int version,
804 			      struct virtio_gpu_drv_cap_cache **cache_p)
805 {
806 	struct virtio_gpu_get_capset *cmd_p;
807 	struct virtio_gpu_vbuffer *vbuf;
808 	int max_size;
809 	struct virtio_gpu_drv_cap_cache *cache_ent;
810 	struct virtio_gpu_drv_cap_cache *search_ent;
811 	void *resp_buf;
812 
813 	*cache_p = NULL;
814 
815 	if (idx >= vgdev->num_capsets)
816 		return -EINVAL;
817 
818 	if (version > vgdev->capsets[idx].max_version)
819 		return -EINVAL;
820 
821 	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
822 	if (!cache_ent)
823 		return -ENOMEM;
824 
825 	max_size = vgdev->capsets[idx].max_size;
826 	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
827 	if (!cache_ent->caps_cache) {
828 		kfree(cache_ent);
829 		return -ENOMEM;
830 	}
831 
832 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
833 			   GFP_KERNEL);
834 	if (!resp_buf) {
835 		kfree(cache_ent->caps_cache);
836 		kfree(cache_ent);
837 		return -ENOMEM;
838 	}
839 
840 	cache_ent->version = version;
841 	cache_ent->id = vgdev->capsets[idx].id;
842 	atomic_set(&cache_ent->is_valid, 0);
843 	cache_ent->size = max_size;
844 	spin_lock(&vgdev->display_info_lock);
845 	/* Search while under lock in case it was added by another task. */
846 	list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
847 		if (search_ent->id == vgdev->capsets[idx].id &&
848 		    search_ent->version == version) {
849 			*cache_p = search_ent;
850 			break;
851 		}
852 	}
853 	if (!*cache_p)
854 		list_add_tail(&cache_ent->head, &vgdev->cap_cache);
855 	spin_unlock(&vgdev->display_info_lock);
856 
857 	if (*cache_p) {
858 		/* Entry was found, so free everything that was just created. */
859 		kfree(resp_buf);
860 		kfree(cache_ent->caps_cache);
861 		kfree(cache_ent);
862 		return 0;
863 	}
864 
865 	cmd_p = virtio_gpu_alloc_cmd_resp
866 		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
867 		 sizeof(struct virtio_gpu_resp_capset) + max_size,
868 		 resp_buf);
869 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
870 	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
871 	cmd_p->capset_version = cpu_to_le32(version);
872 	*cache_p = cache_ent;
873 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
874 
875 	return 0;
876 }
877 
878 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
879 {
880 	struct virtio_gpu_cmd_get_edid *cmd_p;
881 	struct virtio_gpu_vbuffer *vbuf;
882 	void *resp_buf;
883 	int scanout;
884 
885 	if (WARN_ON(!vgdev->has_edid))
886 		return -EINVAL;
887 
888 	for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
889 		resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
890 				   GFP_KERNEL);
891 		if (!resp_buf)
892 			return -ENOMEM;
893 
894 		cmd_p = virtio_gpu_alloc_cmd_resp
895 			(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
896 			 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
897 			 resp_buf);
898 		cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
899 		cmd_p->scanout = cpu_to_le32(scanout);
900 		virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
901 	}
902 
903 	return 0;
904 }
905 
906 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
907 				   uint32_t context_init, uint32_t nlen,
908 				   const char *name)
909 {
910 	struct virtio_gpu_ctx_create *cmd_p;
911 	struct virtio_gpu_vbuffer *vbuf;
912 
913 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
914 	memset(cmd_p, 0, sizeof(*cmd_p));
915 
916 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
917 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
918 	cmd_p->nlen = cpu_to_le32(nlen);
919 	cmd_p->context_init = cpu_to_le32(context_init);
920 	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
921 	cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
922 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
923 }
924 
925 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
926 				    uint32_t id)
927 {
928 	struct virtio_gpu_ctx_destroy *cmd_p;
929 	struct virtio_gpu_vbuffer *vbuf;
930 
931 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
932 	memset(cmd_p, 0, sizeof(*cmd_p));
933 
934 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
935 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
936 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
937 }
938 
939 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
940 					    uint32_t ctx_id,
941 					    struct virtio_gpu_object_array *objs)
942 {
943 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
944 	struct virtio_gpu_ctx_resource *cmd_p;
945 	struct virtio_gpu_vbuffer *vbuf;
946 
947 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
948 	memset(cmd_p, 0, sizeof(*cmd_p));
949 	vbuf->objs = objs;
950 
951 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
952 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
953 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
954 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
955 }
956 
957 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
958 					    uint32_t ctx_id,
959 					    struct virtio_gpu_object_array *objs)
960 {
961 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
962 	struct virtio_gpu_ctx_resource *cmd_p;
963 	struct virtio_gpu_vbuffer *vbuf;
964 
965 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
966 	memset(cmd_p, 0, sizeof(*cmd_p));
967 	vbuf->objs = objs;
968 
969 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
970 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
971 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
972 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
973 }
974 
975 void
976 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
977 				  struct virtio_gpu_object *bo,
978 				  struct virtio_gpu_object_params *params,
979 				  struct virtio_gpu_object_array *objs,
980 				  struct virtio_gpu_fence *fence)
981 {
982 	struct virtio_gpu_resource_create_3d *cmd_p;
983 	struct virtio_gpu_vbuffer *vbuf;
984 
985 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
986 	memset(cmd_p, 0, sizeof(*cmd_p));
987 	vbuf->objs = objs;
988 
989 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
990 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
991 	cmd_p->format = cpu_to_le32(params->format);
992 	cmd_p->width = cpu_to_le32(params->width);
993 	cmd_p->height = cpu_to_le32(params->height);
994 
995 	cmd_p->target = cpu_to_le32(params->target);
996 	cmd_p->bind = cpu_to_le32(params->bind);
997 	cmd_p->depth = cpu_to_le32(params->depth);
998 	cmd_p->array_size = cpu_to_le32(params->array_size);
999 	cmd_p->last_level = cpu_to_le32(params->last_level);
1000 	cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1001 	cmd_p->flags = cpu_to_le32(params->flags);
1002 
1003 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1004 
1005 	bo->created = true;
1006 }
1007 
1008 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1009 					uint32_t ctx_id,
1010 					uint64_t offset, uint32_t level,
1011 					uint32_t stride,
1012 					uint32_t layer_stride,
1013 					struct drm_virtgpu_3d_box *box,
1014 					struct virtio_gpu_object_array *objs,
1015 					struct virtio_gpu_fence *fence)
1016 {
1017 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1018 	struct virtio_gpu_transfer_host_3d *cmd_p;
1019 	struct virtio_gpu_vbuffer *vbuf;
1020 	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1021 
1022 	if (virtio_gpu_is_shmem(bo) && use_dma_api) {
1023 		struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
1024 		dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
1025 					    shmem->pages, DMA_TO_DEVICE);
1026 	}
1027 
1028 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1029 	memset(cmd_p, 0, sizeof(*cmd_p));
1030 
1031 	vbuf->objs = objs;
1032 
1033 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1034 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1035 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1036 	convert_to_hw_box(&cmd_p->box, box);
1037 	cmd_p->offset = cpu_to_le64(offset);
1038 	cmd_p->level = cpu_to_le32(level);
1039 	cmd_p->stride = cpu_to_le32(stride);
1040 	cmd_p->layer_stride = cpu_to_le32(layer_stride);
1041 
1042 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1043 }
1044 
1045 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1046 					  uint32_t ctx_id,
1047 					  uint64_t offset, uint32_t level,
1048 					  uint32_t stride,
1049 					  uint32_t layer_stride,
1050 					  struct drm_virtgpu_3d_box *box,
1051 					  struct virtio_gpu_object_array *objs,
1052 					  struct virtio_gpu_fence *fence)
1053 {
1054 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1055 	struct virtio_gpu_transfer_host_3d *cmd_p;
1056 	struct virtio_gpu_vbuffer *vbuf;
1057 
1058 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1059 	memset(cmd_p, 0, sizeof(*cmd_p));
1060 
1061 	vbuf->objs = objs;
1062 
1063 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1064 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1065 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1066 	convert_to_hw_box(&cmd_p->box, box);
1067 	cmd_p->offset = cpu_to_le64(offset);
1068 	cmd_p->level = cpu_to_le32(level);
1069 	cmd_p->stride = cpu_to_le32(stride);
1070 	cmd_p->layer_stride = cpu_to_le32(layer_stride);
1071 
1072 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1073 }
1074 
1075 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1076 			   void *data, uint32_t data_size,
1077 			   uint32_t ctx_id,
1078 			   struct virtio_gpu_object_array *objs,
1079 			   struct virtio_gpu_fence *fence)
1080 {
1081 	struct virtio_gpu_cmd_submit *cmd_p;
1082 	struct virtio_gpu_vbuffer *vbuf;
1083 
1084 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1085 	memset(cmd_p, 0, sizeof(*cmd_p));
1086 
1087 	vbuf->data_buf = data;
1088 	vbuf->data_size = data_size;
1089 	vbuf->objs = objs;
1090 
1091 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1092 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1093 	cmd_p->size = cpu_to_le32(data_size);
1094 
1095 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1096 }
1097 
1098 void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1099 			      struct virtio_gpu_object *obj,
1100 			      struct virtio_gpu_mem_entry *ents,
1101 			      unsigned int nents)
1102 {
1103 	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1104 					       ents, nents, NULL);
1105 }
1106 
1107 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1108 			    struct virtio_gpu_output *output)
1109 {
1110 	struct virtio_gpu_vbuffer *vbuf;
1111 	struct virtio_gpu_update_cursor *cur_p;
1112 
1113 	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1114 	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1115 	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1116 	virtio_gpu_queue_cursor(vgdev, vbuf);
1117 }
1118 
1119 static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1120 					    struct virtio_gpu_vbuffer *vbuf)
1121 {
1122 	struct virtio_gpu_object *obj =
1123 		gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1124 	struct virtio_gpu_resp_resource_uuid *resp =
1125 		(struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
1126 	uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1127 
1128 	spin_lock(&vgdev->resource_export_lock);
1129 	WARN_ON(obj->uuid_state != STATE_INITIALIZING);
1130 
1131 	if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
1132 	    obj->uuid_state == STATE_INITIALIZING) {
1133 		import_uuid(&obj->uuid, resp->uuid);
1134 		obj->uuid_state = STATE_OK;
1135 	} else {
1136 		obj->uuid_state = STATE_ERR;
1137 	}
1138 	spin_unlock(&vgdev->resource_export_lock);
1139 
1140 	wake_up_all(&vgdev->resp_wq);
1141 }
1142 
1143 int
1144 virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1145 				    struct virtio_gpu_object_array *objs)
1146 {
1147 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1148 	struct virtio_gpu_resource_assign_uuid *cmd_p;
1149 	struct virtio_gpu_vbuffer *vbuf;
1150 	struct virtio_gpu_resp_resource_uuid *resp_buf;
1151 
1152 	resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1153 	if (!resp_buf) {
1154 		spin_lock(&vgdev->resource_export_lock);
1155 		bo->uuid_state = STATE_ERR;
1156 		spin_unlock(&vgdev->resource_export_lock);
1157 		virtio_gpu_array_put_free(objs);
1158 		return -ENOMEM;
1159 	}
1160 
1161 	cmd_p = virtio_gpu_alloc_cmd_resp
1162 		(vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1163 		 sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
1164 	memset(cmd_p, 0, sizeof(*cmd_p));
1165 
1166 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
1167 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1168 
1169 	vbuf->objs = objs;
1170 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1171 	return 0;
1172 }
1173 
1174 static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
1175 					   struct virtio_gpu_vbuffer *vbuf)
1176 {
1177 	struct virtio_gpu_object *bo =
1178 		gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1179 	struct virtio_gpu_resp_map_info *resp =
1180 		(struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
1181 	struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
1182 	uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1183 
1184 	spin_lock(&vgdev->host_visible_lock);
1185 
1186 	if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
1187 		vram->map_info = resp->map_info;
1188 		vram->map_state = STATE_OK;
1189 	} else {
1190 		vram->map_state = STATE_ERR;
1191 	}
1192 
1193 	spin_unlock(&vgdev->host_visible_lock);
1194 	wake_up_all(&vgdev->resp_wq);
1195 }
1196 
1197 int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
1198 		       struct virtio_gpu_object_array *objs, uint64_t offset)
1199 {
1200 	struct virtio_gpu_resource_map_blob *cmd_p;
1201 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1202 	struct virtio_gpu_vbuffer *vbuf;
1203 	struct virtio_gpu_resp_map_info *resp_buf;
1204 
1205 	resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1206 	if (!resp_buf)
1207 		return -ENOMEM;
1208 
1209 	cmd_p = virtio_gpu_alloc_cmd_resp
1210 		(vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
1211 		 sizeof(struct virtio_gpu_resp_map_info), resp_buf);
1212 	memset(cmd_p, 0, sizeof(*cmd_p));
1213 
1214 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
1215 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1216 	cmd_p->offset = cpu_to_le64(offset);
1217 	vbuf->objs = objs;
1218 
1219 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1220 	return 0;
1221 }
1222 
1223 void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
1224 			  struct virtio_gpu_object *bo)
1225 {
1226 	struct virtio_gpu_resource_unmap_blob *cmd_p;
1227 	struct virtio_gpu_vbuffer *vbuf;
1228 
1229 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1230 	memset(cmd_p, 0, sizeof(*cmd_p));
1231 
1232 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
1233 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1234 
1235 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1236 }
1237 
1238 void
1239 virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
1240 				    struct virtio_gpu_object *bo,
1241 				    struct virtio_gpu_object_params *params,
1242 				    struct virtio_gpu_mem_entry *ents,
1243 				    uint32_t nents)
1244 {
1245 	struct virtio_gpu_resource_create_blob *cmd_p;
1246 	struct virtio_gpu_vbuffer *vbuf;
1247 
1248 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1249 	memset(cmd_p, 0, sizeof(*cmd_p));
1250 
1251 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
1252 	cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
1253 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1254 	cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
1255 	cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
1256 	cmd_p->blob_id = cpu_to_le64(params->blob_id);
1257 	cmd_p->size = cpu_to_le64(params->size);
1258 	cmd_p->nr_entries = cpu_to_le32(nents);
1259 
1260 	vbuf->data_buf = ents;
1261 	vbuf->data_size = sizeof(*ents) * nents;
1262 
1263 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1264 	bo->created = true;
1265 }
1266 
1267 void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
1268 				     uint32_t scanout_id,
1269 				     struct virtio_gpu_object *bo,
1270 				     struct drm_framebuffer *fb,
1271 				     uint32_t width, uint32_t height,
1272 				     uint32_t x, uint32_t y)
1273 {
1274 	uint32_t i;
1275 	struct virtio_gpu_set_scanout_blob *cmd_p;
1276 	struct virtio_gpu_vbuffer *vbuf;
1277 	uint32_t format = virtio_gpu_translate_format(fb->format->format);
1278 
1279 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1280 	memset(cmd_p, 0, sizeof(*cmd_p));
1281 
1282 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
1283 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1284 	cmd_p->scanout_id = cpu_to_le32(scanout_id);
1285 
1286 	cmd_p->format = cpu_to_le32(format);
1287 	cmd_p->width  = cpu_to_le32(fb->width);
1288 	cmd_p->height = cpu_to_le32(fb->height);
1289 
1290 	for (i = 0; i < 4; i++) {
1291 		cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
1292 		cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
1293 	}
1294 
1295 	cmd_p->r.width = cpu_to_le32(width);
1296 	cmd_p->r.height = cpu_to_le32(height);
1297 	cmd_p->r.x = cpu_to_le32(x);
1298 	cmd_p->r.y = cpu_to_le32(y);
1299 
1300 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1301 }
1302