xref: /openbmc/linux/drivers/gpu/drm/virtio/virtgpu_vq.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1dc5698e8SDave Airlie /*
2dc5698e8SDave Airlie  * Copyright (C) 2015 Red Hat, Inc.
3dc5698e8SDave Airlie  * All Rights Reserved.
4dc5698e8SDave Airlie  *
5dc5698e8SDave Airlie  * Authors:
6dc5698e8SDave Airlie  *    Dave Airlie <airlied@redhat.com>
7dc5698e8SDave Airlie  *    Gerd Hoffmann <kraxel@redhat.com>
8dc5698e8SDave Airlie  *
9dc5698e8SDave Airlie  * Permission is hereby granted, free of charge, to any person obtaining a
10dc5698e8SDave Airlie  * copy of this software and associated documentation files (the "Software"),
11dc5698e8SDave Airlie  * to deal in the Software without restriction, including without limitation
12dc5698e8SDave Airlie  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13dc5698e8SDave Airlie  * and/or sell copies of the Software, and to permit persons to whom the
14dc5698e8SDave Airlie  * Software is furnished to do so, subject to the following conditions:
15dc5698e8SDave Airlie  *
16dc5698e8SDave Airlie  * The above copyright notice and this permission notice (including the next
17dc5698e8SDave Airlie  * paragraph) shall be included in all copies or substantial portions of the
18dc5698e8SDave Airlie  * Software.
19dc5698e8SDave Airlie  *
20dc5698e8SDave Airlie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21dc5698e8SDave Airlie  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22dc5698e8SDave Airlie  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23dc5698e8SDave Airlie  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24dc5698e8SDave Airlie  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25dc5698e8SDave Airlie  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26dc5698e8SDave Airlie  * OTHER DEALINGS IN THE SOFTWARE.
27dc5698e8SDave Airlie  */
28dc5698e8SDave Airlie 
29a3d63977SSam Ravnborg #include <linux/dma-mapping.h>
30dc5698e8SDave Airlie #include <linux/virtio.h>
31dc5698e8SDave Airlie #include <linux/virtio_config.h>
32dc5698e8SDave Airlie #include <linux/virtio_ring.h>
33dc5698e8SDave Airlie 
34255490f9SVille Syrjälä #include <drm/drm_edid.h>
35255490f9SVille Syrjälä 
36a3d63977SSam Ravnborg #include "virtgpu_drv.h"
37a3d63977SSam Ravnborg #include "virtgpu_trace.h"
38a3d63977SSam Ravnborg 
39dc5698e8SDave Airlie #define MAX_INLINE_CMD_SIZE   96
40dc5698e8SDave Airlie #define MAX_INLINE_RESP_SIZE  24
41dc5698e8SDave Airlie #define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
42dc5698e8SDave Airlie 			       + MAX_INLINE_CMD_SIZE		 \
43dc5698e8SDave Airlie 			       + MAX_INLINE_RESP_SIZE)
44dc5698e8SDave Airlie 
convert_to_hw_box(struct virtio_gpu_box * dst,const struct drm_virtgpu_3d_box * src)451dc34852SGerd Hoffmann static void convert_to_hw_box(struct virtio_gpu_box *dst,
461dc34852SGerd Hoffmann 			      const struct drm_virtgpu_3d_box *src)
471dc34852SGerd Hoffmann {
481dc34852SGerd Hoffmann 	dst->x = cpu_to_le32(src->x);
491dc34852SGerd Hoffmann 	dst->y = cpu_to_le32(src->y);
501dc34852SGerd Hoffmann 	dst->z = cpu_to_le32(src->z);
511dc34852SGerd Hoffmann 	dst->w = cpu_to_le32(src->w);
521dc34852SGerd Hoffmann 	dst->h = cpu_to_le32(src->h);
531dc34852SGerd Hoffmann 	dst->d = cpu_to_le32(src->d);
541dc34852SGerd Hoffmann }
551dc34852SGerd Hoffmann 
virtio_gpu_ctrl_ack(struct virtqueue * vq)56dc5698e8SDave Airlie void virtio_gpu_ctrl_ack(struct virtqueue *vq)
57dc5698e8SDave Airlie {
58dc5698e8SDave Airlie 	struct drm_device *dev = vq->vdev->priv;
59dc5698e8SDave Airlie 	struct virtio_gpu_device *vgdev = dev->dev_private;
609d492b6bSRodrigo Siqueira 
61dc5698e8SDave Airlie 	schedule_work(&vgdev->ctrlq.dequeue_work);
62dc5698e8SDave Airlie }
63dc5698e8SDave Airlie 
virtio_gpu_cursor_ack(struct virtqueue * vq)64dc5698e8SDave Airlie void virtio_gpu_cursor_ack(struct virtqueue *vq)
65dc5698e8SDave Airlie {
66dc5698e8SDave Airlie 	struct drm_device *dev = vq->vdev->priv;
67dc5698e8SDave Airlie 	struct virtio_gpu_device *vgdev = dev->dev_private;
689d492b6bSRodrigo Siqueira 
69dc5698e8SDave Airlie 	schedule_work(&vgdev->cursorq.dequeue_work);
70dc5698e8SDave Airlie }
71dc5698e8SDave Airlie 
virtio_gpu_alloc_vbufs(struct virtio_gpu_device * vgdev)72dc5698e8SDave Airlie int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
73dc5698e8SDave Airlie {
74f5985bf9SGerd Hoffmann 	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
75f5985bf9SGerd Hoffmann 					 VBUFFER_SIZE,
76f5985bf9SGerd Hoffmann 					 __alignof__(struct virtio_gpu_vbuffer),
77f5985bf9SGerd Hoffmann 					 0, NULL);
78dc5698e8SDave Airlie 	if (!vgdev->vbufs)
79dc5698e8SDave Airlie 		return -ENOMEM;
80dc5698e8SDave Airlie 	return 0;
81dc5698e8SDave Airlie }
82dc5698e8SDave Airlie 
virtio_gpu_free_vbufs(struct virtio_gpu_device * vgdev)83dc5698e8SDave Airlie void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
84dc5698e8SDave Airlie {
85f5985bf9SGerd Hoffmann 	kmem_cache_destroy(vgdev->vbufs);
86f5985bf9SGerd Hoffmann 	vgdev->vbufs = NULL;
87dc5698e8SDave Airlie }
88dc5698e8SDave Airlie 
89dc5698e8SDave Airlie static struct virtio_gpu_vbuffer*
virtio_gpu_get_vbuf(struct virtio_gpu_device * vgdev,int size,int resp_size,void * resp_buf,virtio_gpu_resp_cb resp_cb)90dc5698e8SDave Airlie virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
91dc5698e8SDave Airlie 		    int size, int resp_size, void *resp_buf,
92dc5698e8SDave Airlie 		    virtio_gpu_resp_cb resp_cb)
93dc5698e8SDave Airlie {
94dc5698e8SDave Airlie 	struct virtio_gpu_vbuffer *vbuf;
95dc5698e8SDave Airlie 
965bd4f20dSliuyuntao 	vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
97dc5698e8SDave Airlie 
98145cbefcSChia-I Wu 	BUG_ON(size > MAX_INLINE_CMD_SIZE ||
99145cbefcSChia-I Wu 	       size < sizeof(struct virtio_gpu_ctrl_hdr));
100dc5698e8SDave Airlie 	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
101dc5698e8SDave Airlie 	vbuf->size = size;
102dc5698e8SDave Airlie 
103dc5698e8SDave Airlie 	vbuf->resp_cb = resp_cb;
104dc5698e8SDave Airlie 	vbuf->resp_size = resp_size;
105dc5698e8SDave Airlie 	if (resp_size <= MAX_INLINE_RESP_SIZE)
106dc5698e8SDave Airlie 		vbuf->resp_buf = (void *)vbuf->buf + size;
107dc5698e8SDave Airlie 	else
108dc5698e8SDave Airlie 		vbuf->resp_buf = resp_buf;
109dc5698e8SDave Airlie 	BUG_ON(!vbuf->resp_buf);
110dc5698e8SDave Airlie 	return vbuf;
111dc5698e8SDave Airlie }
112dc5698e8SDave Airlie 
113145cbefcSChia-I Wu static struct virtio_gpu_ctrl_hdr *
virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer * vbuf)114145cbefcSChia-I Wu virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
115145cbefcSChia-I Wu {
116145cbefcSChia-I Wu 	/* this assumes a vbuf contains a command that starts with a
117145cbefcSChia-I Wu 	 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
118145cbefcSChia-I Wu 	 * virtqueues.
119145cbefcSChia-I Wu 	 */
120145cbefcSChia-I Wu 	return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
121145cbefcSChia-I Wu }
122145cbefcSChia-I Wu 
123dc5698e8SDave Airlie static struct virtio_gpu_update_cursor*
virtio_gpu_alloc_cursor(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p)124dc5698e8SDave Airlie virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
125dc5698e8SDave Airlie 			struct virtio_gpu_vbuffer **vbuffer_p)
126dc5698e8SDave Airlie {
127dc5698e8SDave Airlie 	struct virtio_gpu_vbuffer *vbuf;
128dc5698e8SDave Airlie 
129dc5698e8SDave Airlie 	vbuf = virtio_gpu_get_vbuf
130dc5698e8SDave Airlie 		(vgdev, sizeof(struct virtio_gpu_update_cursor),
131dc5698e8SDave Airlie 		 0, NULL, NULL);
132dc5698e8SDave Airlie 	if (IS_ERR(vbuf)) {
133dc5698e8SDave Airlie 		*vbuffer_p = NULL;
134dc5698e8SDave Airlie 		return ERR_CAST(vbuf);
135dc5698e8SDave Airlie 	}
136dc5698e8SDave Airlie 	*vbuffer_p = vbuf;
137dc5698e8SDave Airlie 	return (struct virtio_gpu_update_cursor *)vbuf->buf;
138dc5698e8SDave Airlie }
139dc5698e8SDave Airlie 
virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device * vgdev,virtio_gpu_resp_cb cb,struct virtio_gpu_vbuffer ** vbuffer_p,int cmd_size,int resp_size,void * resp_buf)140dc5698e8SDave Airlie static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
141dc5698e8SDave Airlie 				       virtio_gpu_resp_cb cb,
142dc5698e8SDave Airlie 				       struct virtio_gpu_vbuffer **vbuffer_p,
143dc5698e8SDave Airlie 				       int cmd_size, int resp_size,
144dc5698e8SDave Airlie 				       void *resp_buf)
145dc5698e8SDave Airlie {
146dc5698e8SDave Airlie 	struct virtio_gpu_vbuffer *vbuf;
147dc5698e8SDave Airlie 
148dc5698e8SDave Airlie 	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
149dc5698e8SDave Airlie 				   resp_size, resp_buf, cb);
150dc5698e8SDave Airlie 	*vbuffer_p = vbuf;
151dc5698e8SDave Airlie 	return (struct virtio_gpu_command *)vbuf->buf;
152dc5698e8SDave Airlie }
153dc5698e8SDave Airlie 
virtio_gpu_alloc_cmd(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p,int size)1548235eab0SGerd Hoffmann static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
1558235eab0SGerd Hoffmann 				  struct virtio_gpu_vbuffer **vbuffer_p,
1568235eab0SGerd Hoffmann 				  int size)
1578235eab0SGerd Hoffmann {
1588235eab0SGerd Hoffmann 	return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
1598235eab0SGerd Hoffmann 					 sizeof(struct virtio_gpu_ctrl_hdr),
1608235eab0SGerd Hoffmann 					 NULL);
1618235eab0SGerd Hoffmann }
1628235eab0SGerd Hoffmann 
virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p,int size,virtio_gpu_resp_cb cb)1631ed5f698SGerd Hoffmann static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
1641ed5f698SGerd Hoffmann 				     struct virtio_gpu_vbuffer **vbuffer_p,
1651ed5f698SGerd Hoffmann 				     int size,
1661ed5f698SGerd Hoffmann 				     virtio_gpu_resp_cb cb)
1671ed5f698SGerd Hoffmann {
1681ed5f698SGerd Hoffmann 	return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
1691ed5f698SGerd Hoffmann 					 sizeof(struct virtio_gpu_ctrl_hdr),
1701ed5f698SGerd Hoffmann 					 NULL);
1711ed5f698SGerd Hoffmann }
1721ed5f698SGerd Hoffmann 
free_vbuf(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)173dc5698e8SDave Airlie static void free_vbuf(struct virtio_gpu_device *vgdev,
174dc5698e8SDave Airlie 		      struct virtio_gpu_vbuffer *vbuf)
175dc5698e8SDave Airlie {
176dc5698e8SDave Airlie 	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
177dc5698e8SDave Airlie 		kfree(vbuf->resp_buf);
178e1218b8cSDavid Riley 	kvfree(vbuf->data_buf);
179f5985bf9SGerd Hoffmann 	kmem_cache_free(vgdev->vbufs, vbuf);
180dc5698e8SDave Airlie }
181dc5698e8SDave Airlie 
reclaim_vbufs(struct virtqueue * vq,struct list_head * reclaim_list)182dc5698e8SDave Airlie static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
183dc5698e8SDave Airlie {
184dc5698e8SDave Airlie 	struct virtio_gpu_vbuffer *vbuf;
185dc5698e8SDave Airlie 	unsigned int len;
186dc5698e8SDave Airlie 	int freed = 0;
187dc5698e8SDave Airlie 
188dc5698e8SDave Airlie 	while ((vbuf = virtqueue_get_buf(vq, &len))) {
189dc5698e8SDave Airlie 		list_add_tail(&vbuf->list, reclaim_list);
190dc5698e8SDave Airlie 		freed++;
191dc5698e8SDave Airlie 	}
192dc5698e8SDave Airlie 	if (freed == 0)
193dc5698e8SDave Airlie 		DRM_DEBUG("Huh? zero vbufs reclaimed");
194dc5698e8SDave Airlie }
195dc5698e8SDave Airlie 
virtio_gpu_dequeue_ctrl_func(struct work_struct * work)196dc5698e8SDave Airlie void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
197dc5698e8SDave Airlie {
198dc5698e8SDave Airlie 	struct virtio_gpu_device *vgdev =
199dc5698e8SDave Airlie 		container_of(work, struct virtio_gpu_device,
200dc5698e8SDave Airlie 			     ctrlq.dequeue_work);
201dc5698e8SDave Airlie 	struct list_head reclaim_list;
202dc5698e8SDave Airlie 	struct virtio_gpu_vbuffer *entry, *tmp;
203dc5698e8SDave Airlie 	struct virtio_gpu_ctrl_hdr *resp;
204bbf588d7SGurchetan Singh 	u64 fence_id;
205dc5698e8SDave Airlie 
206dc5698e8SDave Airlie 	INIT_LIST_HEAD(&reclaim_list);
207dc5698e8SDave Airlie 	spin_lock(&vgdev->ctrlq.qlock);
208dc5698e8SDave Airlie 	do {
209dc5698e8SDave Airlie 		virtqueue_disable_cb(vgdev->ctrlq.vq);
210dc5698e8SDave Airlie 		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
211dc5698e8SDave Airlie 
212dc5698e8SDave Airlie 	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
213dc5698e8SDave Airlie 	spin_unlock(&vgdev->ctrlq.qlock);
214dc5698e8SDave Airlie 
215da758d51SGerd Hoffmann 	list_for_each_entry(entry, &reclaim_list, list) {
216dc5698e8SDave Airlie 		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
2175daf8857SChia-I Wu 
2182591939eSRob Clark 		trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp, entry->seqno);
2195daf8857SChia-I Wu 
2203630c2a2SGerd Hoffmann 		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
2212c77ae22SGerd Hoffmann 			if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
2223630c2a2SGerd Hoffmann 				struct virtio_gpu_ctrl_hdr *cmd;
223145cbefcSChia-I Wu 				cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
224e46e31cfSGerd Hoffmann 				DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
2253630c2a2SGerd Hoffmann 						      le32_to_cpu(resp->type),
2263630c2a2SGerd Hoffmann 						      le32_to_cpu(cmd->type));
2273630c2a2SGerd Hoffmann 			} else
228dc5698e8SDave Airlie 				DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
2293630c2a2SGerd Hoffmann 		}
230dc5698e8SDave Airlie 		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
231bbf588d7SGurchetan Singh 			fence_id = le64_to_cpu(resp->fence_id);
232bbf588d7SGurchetan Singh 			virtio_gpu_fence_event_process(vgdev, fence_id);
233dc5698e8SDave Airlie 		}
234dc5698e8SDave Airlie 		if (entry->resp_cb)
235dc5698e8SDave Airlie 			entry->resp_cb(vgdev, entry);
236dc5698e8SDave Airlie 	}
237dc5698e8SDave Airlie 	wake_up(&vgdev->ctrlq.ack_queue);
238dc5698e8SDave Airlie 
239da758d51SGerd Hoffmann 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
240da758d51SGerd Hoffmann 		if (entry->objs)
241f0c6cef7SGerd Hoffmann 			virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
242da758d51SGerd Hoffmann 		list_del(&entry->list);
243da758d51SGerd Hoffmann 		free_vbuf(vgdev, entry);
244da758d51SGerd Hoffmann 	}
245dc5698e8SDave Airlie }
246dc5698e8SDave Airlie 
virtio_gpu_dequeue_cursor_func(struct work_struct * work)247dc5698e8SDave Airlie void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
248dc5698e8SDave Airlie {
249dc5698e8SDave Airlie 	struct virtio_gpu_device *vgdev =
250dc5698e8SDave Airlie 		container_of(work, struct virtio_gpu_device,
251dc5698e8SDave Airlie 			     cursorq.dequeue_work);
252dc5698e8SDave Airlie 	struct list_head reclaim_list;
253dc5698e8SDave Airlie 	struct virtio_gpu_vbuffer *entry, *tmp;
254dc5698e8SDave Airlie 
255dc5698e8SDave Airlie 	INIT_LIST_HEAD(&reclaim_list);
256dc5698e8SDave Airlie 	spin_lock(&vgdev->cursorq.qlock);
257dc5698e8SDave Airlie 	do {
258dc5698e8SDave Airlie 		virtqueue_disable_cb(vgdev->cursorq.vq);
259dc5698e8SDave Airlie 		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
260dc5698e8SDave Airlie 	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
261dc5698e8SDave Airlie 	spin_unlock(&vgdev->cursorq.qlock);
262dc5698e8SDave Airlie 
263dc5698e8SDave Airlie 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
2642591939eSRob Clark 		struct virtio_gpu_ctrl_hdr *resp =
2652591939eSRob Clark 			(struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
2662591939eSRob Clark 
2672591939eSRob Clark 		trace_virtio_gpu_cmd_response(vgdev->cursorq.vq, resp, entry->seqno);
268dc5698e8SDave Airlie 		list_del(&entry->list);
269dc5698e8SDave Airlie 		free_vbuf(vgdev, entry);
270dc5698e8SDave Airlie 	}
271dc5698e8SDave Airlie 	wake_up(&vgdev->cursorq.ack_queue);
272dc5698e8SDave Airlie }
273dc5698e8SDave Airlie 
274e1218b8cSDavid Riley /* Create sg_table from a vmalloc'd buffer. */
vmalloc_to_sgt(char * data,uint32_t size,int * sg_ents)275e1218b8cSDavid Riley static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
276e1218b8cSDavid Riley {
277e1218b8cSDavid Riley 	int ret, s, i;
278e1218b8cSDavid Riley 	struct sg_table *sgt;
279e1218b8cSDavid Riley 	struct scatterlist *sg;
280e1218b8cSDavid Riley 	struct page *pg;
281e1218b8cSDavid Riley 
282e1218b8cSDavid Riley 	if (WARN_ON(!PAGE_ALIGNED(data)))
283e1218b8cSDavid Riley 		return NULL;
284e1218b8cSDavid Riley 
285e1218b8cSDavid Riley 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
286e1218b8cSDavid Riley 	if (!sgt)
287e1218b8cSDavid Riley 		return NULL;
288e1218b8cSDavid Riley 
289e1218b8cSDavid Riley 	*sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
290e1218b8cSDavid Riley 	ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
291e1218b8cSDavid Riley 	if (ret) {
292e1218b8cSDavid Riley 		kfree(sgt);
293e1218b8cSDavid Riley 		return NULL;
294e1218b8cSDavid Riley 	}
295e1218b8cSDavid Riley 
29675ef337bSMarek Szyprowski 	for_each_sgtable_sg(sgt, sg, i) {
297e1218b8cSDavid Riley 		pg = vmalloc_to_page(data);
298e1218b8cSDavid Riley 		if (!pg) {
299e1218b8cSDavid Riley 			sg_free_table(sgt);
300e1218b8cSDavid Riley 			kfree(sgt);
301e1218b8cSDavid Riley 			return NULL;
302e1218b8cSDavid Riley 		}
303e1218b8cSDavid Riley 
304e1218b8cSDavid Riley 		s = min_t(int, PAGE_SIZE, size);
305e1218b8cSDavid Riley 		sg_set_page(sg, pg, s, 0);
306e1218b8cSDavid Riley 
307e1218b8cSDavid Riley 		size -= s;
308e1218b8cSDavid Riley 		data += s;
309e1218b8cSDavid Riley 	}
310e1218b8cSDavid Riley 
311e1218b8cSDavid Riley 	return sgt;
312e1218b8cSDavid Riley }
313e1218b8cSDavid Riley 
virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf,struct virtio_gpu_fence * fence,int elemcnt,struct scatterlist ** sgs,int outcnt,int incnt)314b7170f94SGerd Hoffmann static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
315e1218b8cSDavid Riley 				     struct virtio_gpu_vbuffer *vbuf,
3166ebe8661SChia-I Wu 				     struct virtio_gpu_fence *fence,
3176ebe8661SChia-I Wu 				     int elemcnt,
318db2e2072SChia-I Wu 				     struct scatterlist **sgs,
319db2e2072SChia-I Wu 				     int outcnt,
320db2e2072SChia-I Wu 				     int incnt)
321dc5698e8SDave Airlie {
322dc5698e8SDave Airlie 	struct virtqueue *vq = vgdev->ctrlq.vq;
323b1df3a2bSGerd Hoffmann 	int ret, idx;
324b1df3a2bSGerd Hoffmann 
325b1df3a2bSGerd Hoffmann 	if (!drm_dev_enter(vgdev->ddev, &idx)) {
326b1df3a2bSGerd Hoffmann 		if (fence && vbuf->objs)
327b1df3a2bSGerd Hoffmann 			virtio_gpu_array_unlock_resv(vbuf->objs);
328b1df3a2bSGerd Hoffmann 		free_vbuf(vgdev, vbuf);
3294c703f5dSDmitry Osipenko 		return -ENODEV;
330b1df3a2bSGerd Hoffmann 	}
331dc5698e8SDave Airlie 
3325edbb560SGerd Hoffmann 	if (vgdev->has_indirect)
3335edbb560SGerd Hoffmann 		elemcnt = 1;
3345edbb560SGerd Hoffmann 
3356ebe8661SChia-I Wu again:
3366ebe8661SChia-I Wu 	spin_lock(&vgdev->ctrlq.qlock);
3376ebe8661SChia-I Wu 
3386ebe8661SChia-I Wu 	if (vq->num_free < elemcnt) {
3396ebe8661SChia-I Wu 		spin_unlock(&vgdev->ctrlq.qlock);
340fcdd19b8SGerd Hoffmann 		virtio_gpu_notify(vgdev);
3416ebe8661SChia-I Wu 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
3426ebe8661SChia-I Wu 		goto again;
3436ebe8661SChia-I Wu 	}
3446ebe8661SChia-I Wu 
3456ebe8661SChia-I Wu 	/* now that the position of the vbuf in the virtqueue is known, we can
3466ebe8661SChia-I Wu 	 * finally set the fence id
3476ebe8661SChia-I Wu 	 */
3486ebe8661SChia-I Wu 	if (fence) {
3496ebe8661SChia-I Wu 		virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
3506ebe8661SChia-I Wu 				      fence);
3516ebe8661SChia-I Wu 		if (vbuf->objs) {
3526ebe8661SChia-I Wu 			virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
3536ebe8661SChia-I Wu 			virtio_gpu_array_unlock_resv(vbuf->objs);
3546ebe8661SChia-I Wu 		}
3556ebe8661SChia-I Wu 	}
3566ebe8661SChia-I Wu 
357dc5698e8SDave Airlie 	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
3581425a4ceSChia-I Wu 	WARN_ON(ret);
3591425a4ceSChia-I Wu 
3602591939eSRob Clark 	vbuf->seqno = ++vgdev->ctrlq.seqno;
3612591939eSRob Clark 	trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
3625daf8857SChia-I Wu 
363cca41da1SGerd Hoffmann 	atomic_inc(&vgdev->pending_commands);
3641425a4ceSChia-I Wu 
3656ebe8661SChia-I Wu 	spin_unlock(&vgdev->ctrlq.qlock);
3666ebe8661SChia-I Wu 
367b1df3a2bSGerd Hoffmann 	drm_dev_exit(idx);
368b7170f94SGerd Hoffmann 	return 0;
369dc5698e8SDave Airlie }
370dc5698e8SDave Airlie 
virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf,struct virtio_gpu_fence * fence)371b7170f94SGerd Hoffmann static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
372ec2f0577SGerd Hoffmann 					       struct virtio_gpu_vbuffer *vbuf,
3734d55fd66SGerd Hoffmann 					       struct virtio_gpu_fence *fence)
374ec2f0577SGerd Hoffmann {
375db2e2072SChia-I Wu 	struct scatterlist *sgs[3], vcmd, vout, vresp;
376e1218b8cSDavid Riley 	struct sg_table *sgt = NULL;
377b7170f94SGerd Hoffmann 	int elemcnt = 0, outcnt = 0, incnt = 0, ret;
378e1218b8cSDavid Riley 
379db2e2072SChia-I Wu 	/* set up vcmd */
380db2e2072SChia-I Wu 	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
381db2e2072SChia-I Wu 	elemcnt++;
382db2e2072SChia-I Wu 	sgs[outcnt] = &vcmd;
383db2e2072SChia-I Wu 	outcnt++;
384db2e2072SChia-I Wu 
385db2e2072SChia-I Wu 	/* set up vout */
386e1218b8cSDavid Riley 	if (vbuf->data_size) {
387e1218b8cSDavid Riley 		if (is_vmalloc_addr(vbuf->data_buf)) {
388db2e2072SChia-I Wu 			int sg_ents;
389e1218b8cSDavid Riley 			sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
390db2e2072SChia-I Wu 					     &sg_ents);
3914d8d4869SChia-I Wu 			if (!sgt) {
3924d8d4869SChia-I Wu 				if (fence && vbuf->objs)
3934d8d4869SChia-I Wu 					virtio_gpu_array_unlock_resv(vbuf->objs);
3944c703f5dSDmitry Osipenko 				return -ENOMEM;
3954d8d4869SChia-I Wu 			}
396db2e2072SChia-I Wu 
397db2e2072SChia-I Wu 			elemcnt += sg_ents;
398db2e2072SChia-I Wu 			sgs[outcnt] = sgt->sgl;
399e1218b8cSDavid Riley 		} else {
400db2e2072SChia-I Wu 			sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
401db2e2072SChia-I Wu 			elemcnt++;
402db2e2072SChia-I Wu 			sgs[outcnt] = &vout;
403e1218b8cSDavid Riley 		}
404db2e2072SChia-I Wu 		outcnt++;
405db2e2072SChia-I Wu 	}
406db2e2072SChia-I Wu 
407db2e2072SChia-I Wu 	/* set up vresp */
408db2e2072SChia-I Wu 	if (vbuf->resp_size) {
409db2e2072SChia-I Wu 		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
410db2e2072SChia-I Wu 		elemcnt++;
411db2e2072SChia-I Wu 		sgs[outcnt + incnt] = &vresp;
412db2e2072SChia-I Wu 		incnt++;
413e1218b8cSDavid Riley 	}
414ec2f0577SGerd Hoffmann 
415b7170f94SGerd Hoffmann 	ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
41696b5d1beSChia-I Wu 					incnt);
417e1218b8cSDavid Riley 
418e1218b8cSDavid Riley 	if (sgt) {
419e1218b8cSDavid Riley 		sg_free_table(sgt);
420e1218b8cSDavid Riley 		kfree(sgt);
421e1218b8cSDavid Riley 	}
422b7170f94SGerd Hoffmann 	return ret;
423ec2f0577SGerd Hoffmann }
424ec2f0577SGerd Hoffmann 
virtio_gpu_notify(struct virtio_gpu_device * vgdev)425cca41da1SGerd Hoffmann void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
4267082e7a4SGerd Hoffmann {
427cca41da1SGerd Hoffmann 	bool notify;
4287082e7a4SGerd Hoffmann 
429cca41da1SGerd Hoffmann 	if (!atomic_read(&vgdev->pending_commands))
4307082e7a4SGerd Hoffmann 		return;
431cca41da1SGerd Hoffmann 
432cca41da1SGerd Hoffmann 	spin_lock(&vgdev->ctrlq.qlock);
433cca41da1SGerd Hoffmann 	atomic_set(&vgdev->pending_commands, 0);
434cca41da1SGerd Hoffmann 	notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
435cca41da1SGerd Hoffmann 	spin_unlock(&vgdev->ctrlq.qlock);
436cca41da1SGerd Hoffmann 
437cca41da1SGerd Hoffmann 	if (notify)
4387082e7a4SGerd Hoffmann 		virtqueue_notify(vgdev->ctrlq.vq);
4397082e7a4SGerd Hoffmann }
4407082e7a4SGerd Hoffmann 
virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)441b7170f94SGerd Hoffmann static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
44232d6c2c5SDavid Riley 					struct virtio_gpu_vbuffer *vbuf)
44332d6c2c5SDavid Riley {
444b7170f94SGerd Hoffmann 	return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
44532d6c2c5SDavid Riley }
44632d6c2c5SDavid Riley 
virtio_gpu_queue_cursor(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)4474100a7b8SGerd Hoffmann static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
448dc5698e8SDave Airlie 				    struct virtio_gpu_vbuffer *vbuf)
449dc5698e8SDave Airlie {
450dc5698e8SDave Airlie 	struct virtqueue *vq = vgdev->cursorq.vq;
451dc5698e8SDave Airlie 	struct scatterlist *sgs[1], ccmd;
452b1df3a2bSGerd Hoffmann 	int idx, ret, outcnt;
453744583ecSGerd Hoffmann 	bool notify;
454dc5698e8SDave Airlie 
455b1df3a2bSGerd Hoffmann 	if (!drm_dev_enter(vgdev->ddev, &idx)) {
456b1df3a2bSGerd Hoffmann 		free_vbuf(vgdev, vbuf);
4574100a7b8SGerd Hoffmann 		return;
458b1df3a2bSGerd Hoffmann 	}
459dc5698e8SDave Airlie 
460dc5698e8SDave Airlie 	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
461dc5698e8SDave Airlie 	sgs[0] = &ccmd;
462dc5698e8SDave Airlie 	outcnt = 1;
463dc5698e8SDave Airlie 
464dc5698e8SDave Airlie 	spin_lock(&vgdev->cursorq.qlock);
465dc5698e8SDave Airlie retry:
466dc5698e8SDave Airlie 	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
467dc5698e8SDave Airlie 	if (ret == -ENOSPC) {
468dc5698e8SDave Airlie 		spin_unlock(&vgdev->cursorq.qlock);
469d02d2700SGerd Hoffmann 		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
470dc5698e8SDave Airlie 		spin_lock(&vgdev->cursorq.qlock);
471dc5698e8SDave Airlie 		goto retry;
472dc5698e8SDave Airlie 	} else {
4732591939eSRob Clark 		vbuf->seqno = ++vgdev->cursorq.seqno;
4745daf8857SChia-I Wu 		trace_virtio_gpu_cmd_queue(vq,
4752591939eSRob Clark 			virtio_gpu_vbuf_ctrl_hdr(vbuf),
4762591939eSRob Clark 			vbuf->seqno);
4775daf8857SChia-I Wu 
478744583ecSGerd Hoffmann 		notify = virtqueue_kick_prepare(vq);
479dc5698e8SDave Airlie 	}
480dc5698e8SDave Airlie 
481dc5698e8SDave Airlie 	spin_unlock(&vgdev->cursorq.qlock);
482744583ecSGerd Hoffmann 
483744583ecSGerd Hoffmann 	if (notify)
484744583ecSGerd Hoffmann 		virtqueue_notify(vq);
485b1df3a2bSGerd Hoffmann 
486b1df3a2bSGerd Hoffmann 	drm_dev_exit(idx);
487dc5698e8SDave Airlie }
488dc5698e8SDave Airlie 
489dc5698e8SDave Airlie /* just create gem objects for userspace and long lived objects,
4905d883850SRodrigo Siqueira  * just use dma_alloced pages for the queue objects?
4915d883850SRodrigo Siqueira  */
492dc5698e8SDave Airlie 
493dc5698e8SDave Airlie /* create a basic resource */
virtio_gpu_cmd_create_resource(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)494dc5698e8SDave Airlie void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
49523c897d7SGerd Hoffmann 				    struct virtio_gpu_object *bo,
496530b2842SGerd Hoffmann 				    struct virtio_gpu_object_params *params,
497e2324300SGerd Hoffmann 				    struct virtio_gpu_object_array *objs,
498530b2842SGerd Hoffmann 				    struct virtio_gpu_fence *fence)
499dc5698e8SDave Airlie {
500dc5698e8SDave Airlie 	struct virtio_gpu_resource_create_2d *cmd_p;
501dc5698e8SDave Airlie 	struct virtio_gpu_vbuffer *vbuf;
502dc5698e8SDave Airlie 
503dc5698e8SDave Airlie 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
504dc5698e8SDave Airlie 	memset(cmd_p, 0, sizeof(*cmd_p));
505e2324300SGerd Hoffmann 	vbuf->objs = objs;
506dc5698e8SDave Airlie 
507dc5698e8SDave Airlie 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
508724cfdfdSGerd Hoffmann 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
509f9659329SGerd Hoffmann 	cmd_p->format = cpu_to_le32(params->format);
510f9659329SGerd Hoffmann 	cmd_p->width = cpu_to_le32(params->width);
511f9659329SGerd Hoffmann 	cmd_p->height = cpu_to_le32(params->height);
512dc5698e8SDave Airlie 
513e19d3411SChia-I Wu 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
51423c897d7SGerd Hoffmann 	bo->created = true;
515dc5698e8SDave Airlie }
516dc5698e8SDave Airlie 
virtio_gpu_cmd_unref_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)5171ed5f698SGerd Hoffmann static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
5181ed5f698SGerd Hoffmann 				    struct virtio_gpu_vbuffer *vbuf)
5191ed5f698SGerd Hoffmann {
5201ed5f698SGerd Hoffmann 	struct virtio_gpu_object *bo;
5211ed5f698SGerd Hoffmann 
5221ed5f698SGerd Hoffmann 	bo = vbuf->resp_cb_data;
5231ed5f698SGerd Hoffmann 	vbuf->resp_cb_data = NULL;
5241ed5f698SGerd Hoffmann 
5251ed5f698SGerd Hoffmann 	virtio_gpu_cleanup_object(bo);
5261ed5f698SGerd Hoffmann }
5271ed5f698SGerd Hoffmann 
virtio_gpu_cmd_unref_resource(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo)528dc5698e8SDave Airlie void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
5291ed5f698SGerd Hoffmann 				   struct virtio_gpu_object *bo)
530dc5698e8SDave Airlie {
531dc5698e8SDave Airlie 	struct virtio_gpu_resource_unref *cmd_p;
532dc5698e8SDave Airlie 	struct virtio_gpu_vbuffer *vbuf;
5339fe6bda9SGerd Hoffmann 	int ret;
534dc5698e8SDave Airlie 
5351ed5f698SGerd Hoffmann 	cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
5361ed5f698SGerd Hoffmann 					virtio_gpu_cmd_unref_cb);
537dc5698e8SDave Airlie 	memset(cmd_p, 0, sizeof(*cmd_p));
538dc5698e8SDave Airlie 
539dc5698e8SDave Airlie 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
5401ed5f698SGerd Hoffmann 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
541dc5698e8SDave Airlie 
5421ed5f698SGerd Hoffmann 	vbuf->resp_cb_data = bo;
5439fe6bda9SGerd Hoffmann 	ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
5449fe6bda9SGerd Hoffmann 	if (ret < 0)
5459fe6bda9SGerd Hoffmann 		virtio_gpu_cleanup_object(bo);
546dc5698e8SDave Airlie }
547dc5698e8SDave Airlie 
virtio_gpu_cmd_set_scanout(struct virtio_gpu_device * vgdev,uint32_t scanout_id,uint32_t resource_id,uint32_t width,uint32_t height,uint32_t x,uint32_t y)548dc5698e8SDave Airlie void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
549dc5698e8SDave Airlie 				uint32_t scanout_id, uint32_t resource_id,
550dc5698e8SDave Airlie 				uint32_t width, uint32_t height,
551dc5698e8SDave Airlie 				uint32_t x, uint32_t y)
552dc5698e8SDave Airlie {
553dc5698e8SDave Airlie 	struct virtio_gpu_set_scanout *cmd_p;
554dc5698e8SDave Airlie 	struct virtio_gpu_vbuffer *vbuf;
555dc5698e8SDave Airlie 
556dc5698e8SDave Airlie 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
557dc5698e8SDave Airlie 	memset(cmd_p, 0, sizeof(*cmd_p));
558dc5698e8SDave Airlie 
559dc5698e8SDave Airlie 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
560dc5698e8SDave Airlie 	cmd_p->resource_id = cpu_to_le32(resource_id);
561dc5698e8SDave Airlie 	cmd_p->scanout_id = cpu_to_le32(scanout_id);
562dc5698e8SDave Airlie 	cmd_p->r.width = cpu_to_le32(width);
563dc5698e8SDave Airlie 	cmd_p->r.height = cpu_to_le32(height);
564dc5698e8SDave Airlie 	cmd_p->r.x = cpu_to_le32(x);
565dc5698e8SDave Airlie 	cmd_p->r.y = cpu_to_le32(y);
566dc5698e8SDave Airlie 
567dc5698e8SDave Airlie 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
568dc5698e8SDave Airlie }
569dc5698e8SDave Airlie 
virtio_gpu_cmd_resource_flush(struct virtio_gpu_device * vgdev,uint32_t resource_id,uint32_t x,uint32_t y,uint32_t width,uint32_t height,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)570dc5698e8SDave Airlie void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
571dc5698e8SDave Airlie 				   uint32_t resource_id,
572dc5698e8SDave Airlie 				   uint32_t x, uint32_t y,
57356934baeSVivek Kasireddy 				   uint32_t width, uint32_t height,
57456934baeSVivek Kasireddy 				   struct virtio_gpu_object_array *objs,
57556934baeSVivek Kasireddy 				   struct virtio_gpu_fence *fence)
576dc5698e8SDave Airlie {
577dc5698e8SDave Airlie 	struct virtio_gpu_resource_flush *cmd_p;
578dc5698e8SDave Airlie 	struct virtio_gpu_vbuffer *vbuf;
579dc5698e8SDave Airlie 
580dc5698e8SDave Airlie 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
581dc5698e8SDave Airlie 	memset(cmd_p, 0, sizeof(*cmd_p));
58256934baeSVivek Kasireddy 	vbuf->objs = objs;
583dc5698e8SDave Airlie 
584dc5698e8SDave Airlie 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
585dc5698e8SDave Airlie 	cmd_p->resource_id = cpu_to_le32(resource_id);
586dc5698e8SDave Airlie 	cmd_p->r.width = cpu_to_le32(width);
587dc5698e8SDave Airlie 	cmd_p->r.height = cpu_to_le32(height);
588dc5698e8SDave Airlie 	cmd_p->r.x = cpu_to_le32(x);
589dc5698e8SDave Airlie 	cmd_p->r.y = cpu_to_le32(y);
590dc5698e8SDave Airlie 
59156934baeSVivek Kasireddy 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
592dc5698e8SDave Airlie }
593dc5698e8SDave Airlie 
virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device * vgdev,uint64_t offset,uint32_t width,uint32_t height,uint32_t x,uint32_t y,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)594dc5698e8SDave Airlie void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
595af334c5dSGerd Hoffmann 					uint64_t offset,
59664f1cc99SGerd Hoffmann 					uint32_t width, uint32_t height,
59764f1cc99SGerd Hoffmann 					uint32_t x, uint32_t y,
5983d3bdbc0SGerd Hoffmann 					struct virtio_gpu_object_array *objs,
5994d55fd66SGerd Hoffmann 					struct virtio_gpu_fence *fence)
600dc5698e8SDave Airlie {
6013d3bdbc0SGerd Hoffmann 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
602dc5698e8SDave Airlie 	struct virtio_gpu_transfer_to_host_2d *cmd_p;
603dc5698e8SDave Airlie 	struct virtio_gpu_vbuffer *vbuf;
60424b6842aSMichael S. Tsirkin 	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
6058f44ca22SJiandi An 
606e473216bSDmitry Osipenko 	if (virtio_gpu_is_shmem(bo) && use_dma_api)
607a54bace0SOleksandr Tyshchenko 		dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
608b5c9ed70SDmitry Osipenko 					    bo->base.sgt, DMA_TO_DEVICE);
609dc5698e8SDave Airlie 
610dc5698e8SDave Airlie 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
611dc5698e8SDave Airlie 	memset(cmd_p, 0, sizeof(*cmd_p));
6123d3bdbc0SGerd Hoffmann 	vbuf->objs = objs;
613dc5698e8SDave Airlie 
614dc5698e8SDave Airlie 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
615af334c5dSGerd Hoffmann 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
616dc5698e8SDave Airlie 	cmd_p->offset = cpu_to_le64(offset);
61764f1cc99SGerd Hoffmann 	cmd_p->r.width = cpu_to_le32(width);
61864f1cc99SGerd Hoffmann 	cmd_p->r.height = cpu_to_le32(height);
61964f1cc99SGerd Hoffmann 	cmd_p->r.x = cpu_to_le32(x);
62064f1cc99SGerd Hoffmann 	cmd_p->r.y = cpu_to_le32(y);
621dc5698e8SDave Airlie 
622e19d3411SChia-I Wu 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
623dc5698e8SDave Airlie }
624dc5698e8SDave Airlie 
625dc5698e8SDave Airlie static void
virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device * vgdev,uint32_t resource_id,struct virtio_gpu_mem_entry * ents,uint32_t nents,struct virtio_gpu_fence * fence)626dc5698e8SDave Airlie virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
627dc5698e8SDave Airlie 				       uint32_t resource_id,
628dc5698e8SDave Airlie 				       struct virtio_gpu_mem_entry *ents,
629dc5698e8SDave Airlie 				       uint32_t nents,
6304d55fd66SGerd Hoffmann 				       struct virtio_gpu_fence *fence)
631dc5698e8SDave Airlie {
632dc5698e8SDave Airlie 	struct virtio_gpu_resource_attach_backing *cmd_p;
633dc5698e8SDave Airlie 	struct virtio_gpu_vbuffer *vbuf;
634dc5698e8SDave Airlie 
635dc5698e8SDave Airlie 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
636dc5698e8SDave Airlie 	memset(cmd_p, 0, sizeof(*cmd_p));
637dc5698e8SDave Airlie 
638dc5698e8SDave Airlie 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
639dc5698e8SDave Airlie 	cmd_p->resource_id = cpu_to_le32(resource_id);
640dc5698e8SDave Airlie 	cmd_p->nr_entries = cpu_to_le32(nents);
641dc5698e8SDave Airlie 
642dc5698e8SDave Airlie 	vbuf->data_buf = ents;
643dc5698e8SDave Airlie 	vbuf->data_size = sizeof(*ents) * nents;
644dc5698e8SDave Airlie 
645e19d3411SChia-I Wu 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
646dc5698e8SDave Airlie }
647dc5698e8SDave Airlie 
virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)648dc5698e8SDave Airlie static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
649dc5698e8SDave Airlie 					       struct virtio_gpu_vbuffer *vbuf)
650dc5698e8SDave Airlie {
651dc5698e8SDave Airlie 	struct virtio_gpu_resp_display_info *resp =
652dc5698e8SDave Airlie 		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
653dc5698e8SDave Airlie 	int i;
654dc5698e8SDave Airlie 
655dc5698e8SDave Airlie 	spin_lock(&vgdev->display_info_lock);
656dc5698e8SDave Airlie 	for (i = 0; i < vgdev->num_scanouts; i++) {
657dc5698e8SDave Airlie 		vgdev->outputs[i].info = resp->pmodes[i];
658dc5698e8SDave Airlie 		if (resp->pmodes[i].enabled) {
659dc5698e8SDave Airlie 			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
660dc5698e8SDave Airlie 				  le32_to_cpu(resp->pmodes[i].r.width),
661dc5698e8SDave Airlie 				  le32_to_cpu(resp->pmodes[i].r.height),
662dc5698e8SDave Airlie 				  le32_to_cpu(resp->pmodes[i].r.x),
663dc5698e8SDave Airlie 				  le32_to_cpu(resp->pmodes[i].r.y));
664dc5698e8SDave Airlie 		} else {
665dc5698e8SDave Airlie 			DRM_DEBUG("output %d: disabled", i);
666dc5698e8SDave Airlie 		}
667dc5698e8SDave Airlie 	}
668dc5698e8SDave Airlie 
669441012afSDave Airlie 	vgdev->display_info_pending = false;
670dc5698e8SDave Airlie 	spin_unlock(&vgdev->display_info_lock);
671dc5698e8SDave Airlie 	wake_up(&vgdev->resp_wq);
672dc5698e8SDave Airlie 
673dc5698e8SDave Airlie 	if (!drm_helper_hpd_irq_event(vgdev->ddev))
674dc5698e8SDave Airlie 		drm_kms_helper_hotplug_event(vgdev->ddev);
675dc5698e8SDave Airlie }
676dc5698e8SDave Airlie 
virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)67762fb7a5eSGerd Hoffmann static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
67862fb7a5eSGerd Hoffmann 					      struct virtio_gpu_vbuffer *vbuf)
67962fb7a5eSGerd Hoffmann {
68062fb7a5eSGerd Hoffmann 	struct virtio_gpu_get_capset_info *cmd =
68162fb7a5eSGerd Hoffmann 		(struct virtio_gpu_get_capset_info *)vbuf->buf;
68262fb7a5eSGerd Hoffmann 	struct virtio_gpu_resp_capset_info *resp =
68362fb7a5eSGerd Hoffmann 		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
68462fb7a5eSGerd Hoffmann 	int i = le32_to_cpu(cmd->capset_index);
68562fb7a5eSGerd Hoffmann 
68662fb7a5eSGerd Hoffmann 	spin_lock(&vgdev->display_info_lock);
687e219688fSDoug Horn 	if (vgdev->capsets) {
68862fb7a5eSGerd Hoffmann 		vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
68962fb7a5eSGerd Hoffmann 		vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
69062fb7a5eSGerd Hoffmann 		vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
691e219688fSDoug Horn 	} else {
692e219688fSDoug Horn 		DRM_ERROR("invalid capset memory.");
693e219688fSDoug Horn 	}
69462fb7a5eSGerd Hoffmann 	spin_unlock(&vgdev->display_info_lock);
69562fb7a5eSGerd Hoffmann 	wake_up(&vgdev->resp_wq);
69662fb7a5eSGerd Hoffmann }
69762fb7a5eSGerd Hoffmann 
virtio_gpu_cmd_capset_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)69862fb7a5eSGerd Hoffmann static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
69962fb7a5eSGerd Hoffmann 				     struct virtio_gpu_vbuffer *vbuf)
70062fb7a5eSGerd Hoffmann {
70162fb7a5eSGerd Hoffmann 	struct virtio_gpu_get_capset *cmd =
70262fb7a5eSGerd Hoffmann 		(struct virtio_gpu_get_capset *)vbuf->buf;
70362fb7a5eSGerd Hoffmann 	struct virtio_gpu_resp_capset *resp =
70462fb7a5eSGerd Hoffmann 		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
70562fb7a5eSGerd Hoffmann 	struct virtio_gpu_drv_cap_cache *cache_ent;
70662fb7a5eSGerd Hoffmann 
70762fb7a5eSGerd Hoffmann 	spin_lock(&vgdev->display_info_lock);
70862fb7a5eSGerd Hoffmann 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
70962fb7a5eSGerd Hoffmann 		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
71062fb7a5eSGerd Hoffmann 		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
71162fb7a5eSGerd Hoffmann 			memcpy(cache_ent->caps_cache, resp->capset_data,
71262fb7a5eSGerd Hoffmann 			       cache_ent->size);
7139ff3a5c8SDavid Riley 			/* Copy must occur before is_valid is signalled. */
7149ff3a5c8SDavid Riley 			smp_wmb();
71562fb7a5eSGerd Hoffmann 			atomic_set(&cache_ent->is_valid, 1);
71662fb7a5eSGerd Hoffmann 			break;
71762fb7a5eSGerd Hoffmann 		}
71862fb7a5eSGerd Hoffmann 	}
71962fb7a5eSGerd Hoffmann 	spin_unlock(&vgdev->display_info_lock);
720676a905bSDavid Riley 	wake_up_all(&vgdev->resp_wq);
72162fb7a5eSGerd Hoffmann }
72262fb7a5eSGerd Hoffmann 
virtio_get_edid_block(void * data,u8 * buf,unsigned int block,size_t len)723b4b01b49SGerd Hoffmann static int virtio_get_edid_block(void *data, u8 *buf,
724b4b01b49SGerd Hoffmann 				 unsigned int block, size_t len)
725b4b01b49SGerd Hoffmann {
726b4b01b49SGerd Hoffmann 	struct virtio_gpu_resp_edid *resp = data;
727b4b01b49SGerd Hoffmann 	size_t start = block * EDID_LENGTH;
728b4b01b49SGerd Hoffmann 
729b4b01b49SGerd Hoffmann 	if (start + len > le32_to_cpu(resp->size))
7304c703f5dSDmitry Osipenko 		return -EINVAL;
731b4b01b49SGerd Hoffmann 	memcpy(buf, resp->edid + start, len);
732b4b01b49SGerd Hoffmann 	return 0;
733b4b01b49SGerd Hoffmann }
734b4b01b49SGerd Hoffmann 
virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)735b4b01b49SGerd Hoffmann static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
736b4b01b49SGerd Hoffmann 				       struct virtio_gpu_vbuffer *vbuf)
737b4b01b49SGerd Hoffmann {
738b4b01b49SGerd Hoffmann 	struct virtio_gpu_cmd_get_edid *cmd =
739b4b01b49SGerd Hoffmann 		(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
740b4b01b49SGerd Hoffmann 	struct virtio_gpu_resp_edid *resp =
741b4b01b49SGerd Hoffmann 		(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
742b4b01b49SGerd Hoffmann 	uint32_t scanout = le32_to_cpu(cmd->scanout);
743b4b01b49SGerd Hoffmann 	struct virtio_gpu_output *output;
744b4b01b49SGerd Hoffmann 	struct edid *new_edid, *old_edid;
745b4b01b49SGerd Hoffmann 
746b4b01b49SGerd Hoffmann 	if (scanout >= vgdev->num_scanouts)
747b4b01b49SGerd Hoffmann 		return;
748b4b01b49SGerd Hoffmann 	output = vgdev->outputs + scanout;
749b4b01b49SGerd Hoffmann 
750b4b01b49SGerd Hoffmann 	new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
75141de4be6SGerd Hoffmann 	drm_connector_update_edid_property(&output->conn, new_edid);
752b4b01b49SGerd Hoffmann 
753b4b01b49SGerd Hoffmann 	spin_lock(&vgdev->display_info_lock);
754b4b01b49SGerd Hoffmann 	old_edid = output->edid;
755b4b01b49SGerd Hoffmann 	output->edid = new_edid;
756b4b01b49SGerd Hoffmann 	spin_unlock(&vgdev->display_info_lock);
757b4b01b49SGerd Hoffmann 
758b4b01b49SGerd Hoffmann 	kfree(old_edid);
759b4b01b49SGerd Hoffmann 	wake_up(&vgdev->resp_wq);
760b4b01b49SGerd Hoffmann }
761b4b01b49SGerd Hoffmann 
virtio_gpu_cmd_get_display_info(struct virtio_gpu_device * vgdev)762dc5698e8SDave Airlie int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
763dc5698e8SDave Airlie {
764dc5698e8SDave Airlie 	struct virtio_gpu_ctrl_hdr *cmd_p;
765dc5698e8SDave Airlie 	struct virtio_gpu_vbuffer *vbuf;
766dc5698e8SDave Airlie 	void *resp_buf;
767dc5698e8SDave Airlie 
768dc5698e8SDave Airlie 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
769dc5698e8SDave Airlie 			   GFP_KERNEL);
770dc5698e8SDave Airlie 	if (!resp_buf)
771dc5698e8SDave Airlie 		return -ENOMEM;
772dc5698e8SDave Airlie 
773dc5698e8SDave Airlie 	cmd_p = virtio_gpu_alloc_cmd_resp
774dc5698e8SDave Airlie 		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
775dc5698e8SDave Airlie 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
776dc5698e8SDave Airlie 		 resp_buf);
777dc5698e8SDave Airlie 	memset(cmd_p, 0, sizeof(*cmd_p));
778dc5698e8SDave Airlie 
779441012afSDave Airlie 	vgdev->display_info_pending = true;
780dc5698e8SDave Airlie 	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
781dc5698e8SDave Airlie 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
782dc5698e8SDave Airlie 	return 0;
783dc5698e8SDave Airlie }
784dc5698e8SDave Airlie 
virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device * vgdev,int idx)78562fb7a5eSGerd Hoffmann int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
78662fb7a5eSGerd Hoffmann {
78762fb7a5eSGerd Hoffmann 	struct virtio_gpu_get_capset_info *cmd_p;
78862fb7a5eSGerd Hoffmann 	struct virtio_gpu_vbuffer *vbuf;
78962fb7a5eSGerd Hoffmann 	void *resp_buf;
79062fb7a5eSGerd Hoffmann 
79162fb7a5eSGerd Hoffmann 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
79262fb7a5eSGerd Hoffmann 			   GFP_KERNEL);
79362fb7a5eSGerd Hoffmann 	if (!resp_buf)
79462fb7a5eSGerd Hoffmann 		return -ENOMEM;
79562fb7a5eSGerd Hoffmann 
79662fb7a5eSGerd Hoffmann 	cmd_p = virtio_gpu_alloc_cmd_resp
79762fb7a5eSGerd Hoffmann 		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
79862fb7a5eSGerd Hoffmann 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
79962fb7a5eSGerd Hoffmann 		 resp_buf);
80062fb7a5eSGerd Hoffmann 	memset(cmd_p, 0, sizeof(*cmd_p));
80162fb7a5eSGerd Hoffmann 
80262fb7a5eSGerd Hoffmann 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
80362fb7a5eSGerd Hoffmann 	cmd_p->capset_index = cpu_to_le32(idx);
80462fb7a5eSGerd Hoffmann 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
80562fb7a5eSGerd Hoffmann 	return 0;
80662fb7a5eSGerd Hoffmann }
80762fb7a5eSGerd Hoffmann 
virtio_gpu_cmd_get_capset(struct virtio_gpu_device * vgdev,int idx,int version,struct virtio_gpu_drv_cap_cache ** cache_p)80862fb7a5eSGerd Hoffmann int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
80962fb7a5eSGerd Hoffmann 			      int idx, int version,
81062fb7a5eSGerd Hoffmann 			      struct virtio_gpu_drv_cap_cache **cache_p)
81162fb7a5eSGerd Hoffmann {
81262fb7a5eSGerd Hoffmann 	struct virtio_gpu_get_capset *cmd_p;
81362fb7a5eSGerd Hoffmann 	struct virtio_gpu_vbuffer *vbuf;
81409c4b494SDan Carpenter 	int max_size;
81562fb7a5eSGerd Hoffmann 	struct virtio_gpu_drv_cap_cache *cache_ent;
8165934ce99SDavid Riley 	struct virtio_gpu_drv_cap_cache *search_ent;
81762fb7a5eSGerd Hoffmann 	void *resp_buf;
81862fb7a5eSGerd Hoffmann 
8195934ce99SDavid Riley 	*cache_p = NULL;
8205934ce99SDavid Riley 
82109c4b494SDan Carpenter 	if (idx >= vgdev->num_capsets)
82262fb7a5eSGerd Hoffmann 		return -EINVAL;
82362fb7a5eSGerd Hoffmann 
82462fb7a5eSGerd Hoffmann 	if (version > vgdev->capsets[idx].max_version)
82562fb7a5eSGerd Hoffmann 		return -EINVAL;
82662fb7a5eSGerd Hoffmann 
82762fb7a5eSGerd Hoffmann 	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
82862fb7a5eSGerd Hoffmann 	if (!cache_ent)
82962fb7a5eSGerd Hoffmann 		return -ENOMEM;
83062fb7a5eSGerd Hoffmann 
83109c4b494SDan Carpenter 	max_size = vgdev->capsets[idx].max_size;
83262fb7a5eSGerd Hoffmann 	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
83362fb7a5eSGerd Hoffmann 	if (!cache_ent->caps_cache) {
83462fb7a5eSGerd Hoffmann 		kfree(cache_ent);
83562fb7a5eSGerd Hoffmann 		return -ENOMEM;
83662fb7a5eSGerd Hoffmann 	}
83762fb7a5eSGerd Hoffmann 
83862fb7a5eSGerd Hoffmann 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
83962fb7a5eSGerd Hoffmann 			   GFP_KERNEL);
84062fb7a5eSGerd Hoffmann 	if (!resp_buf) {
84162fb7a5eSGerd Hoffmann 		kfree(cache_ent->caps_cache);
84262fb7a5eSGerd Hoffmann 		kfree(cache_ent);
84362fb7a5eSGerd Hoffmann 		return -ENOMEM;
84462fb7a5eSGerd Hoffmann 	}
84562fb7a5eSGerd Hoffmann 
84662fb7a5eSGerd Hoffmann 	cache_ent->version = version;
84762fb7a5eSGerd Hoffmann 	cache_ent->id = vgdev->capsets[idx].id;
84862fb7a5eSGerd Hoffmann 	atomic_set(&cache_ent->is_valid, 0);
84962fb7a5eSGerd Hoffmann 	cache_ent->size = max_size;
85062fb7a5eSGerd Hoffmann 	spin_lock(&vgdev->display_info_lock);
8515934ce99SDavid Riley 	/* Search while under lock in case it was added by another task. */
8525934ce99SDavid Riley 	list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
8535934ce99SDavid Riley 		if (search_ent->id == vgdev->capsets[idx].id &&
8545934ce99SDavid Riley 		    search_ent->version == version) {
8555934ce99SDavid Riley 			*cache_p = search_ent;
8565934ce99SDavid Riley 			break;
8575934ce99SDavid Riley 		}
8585934ce99SDavid Riley 	}
8595934ce99SDavid Riley 	if (!*cache_p)
86062fb7a5eSGerd Hoffmann 		list_add_tail(&cache_ent->head, &vgdev->cap_cache);
86162fb7a5eSGerd Hoffmann 	spin_unlock(&vgdev->display_info_lock);
86262fb7a5eSGerd Hoffmann 
8635934ce99SDavid Riley 	if (*cache_p) {
8645934ce99SDavid Riley 		/* Entry was found, so free everything that was just created. */
8655934ce99SDavid Riley 		kfree(resp_buf);
8665934ce99SDavid Riley 		kfree(cache_ent->caps_cache);
8675934ce99SDavid Riley 		kfree(cache_ent);
8685934ce99SDavid Riley 		return 0;
8695934ce99SDavid Riley 	}
8705934ce99SDavid Riley 
87162fb7a5eSGerd Hoffmann 	cmd_p = virtio_gpu_alloc_cmd_resp
87262fb7a5eSGerd Hoffmann 		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
87362fb7a5eSGerd Hoffmann 		 sizeof(struct virtio_gpu_resp_capset) + max_size,
87462fb7a5eSGerd Hoffmann 		 resp_buf);
87562fb7a5eSGerd Hoffmann 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
87662fb7a5eSGerd Hoffmann 	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
87762fb7a5eSGerd Hoffmann 	cmd_p->capset_version = cpu_to_le32(version);
87862fb7a5eSGerd Hoffmann 	*cache_p = cache_ent;
87962fb7a5eSGerd Hoffmann 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
88062fb7a5eSGerd Hoffmann 
88162fb7a5eSGerd Hoffmann 	return 0;
88262fb7a5eSGerd Hoffmann }
88362fb7a5eSGerd Hoffmann 
virtio_gpu_cmd_get_edids(struct virtio_gpu_device * vgdev)884b4b01b49SGerd Hoffmann int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
885b4b01b49SGerd Hoffmann {
886b4b01b49SGerd Hoffmann 	struct virtio_gpu_cmd_get_edid *cmd_p;
887b4b01b49SGerd Hoffmann 	struct virtio_gpu_vbuffer *vbuf;
888b4b01b49SGerd Hoffmann 	void *resp_buf;
889b4b01b49SGerd Hoffmann 	int scanout;
890b4b01b49SGerd Hoffmann 
891b4b01b49SGerd Hoffmann 	if (WARN_ON(!vgdev->has_edid))
892b4b01b49SGerd Hoffmann 		return -EINVAL;
893b4b01b49SGerd Hoffmann 
894b4b01b49SGerd Hoffmann 	for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
895b4b01b49SGerd Hoffmann 		resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
896b4b01b49SGerd Hoffmann 				   GFP_KERNEL);
897b4b01b49SGerd Hoffmann 		if (!resp_buf)
898b4b01b49SGerd Hoffmann 			return -ENOMEM;
899b4b01b49SGerd Hoffmann 
900b4b01b49SGerd Hoffmann 		cmd_p = virtio_gpu_alloc_cmd_resp
901b4b01b49SGerd Hoffmann 			(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
902b4b01b49SGerd Hoffmann 			 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
903b4b01b49SGerd Hoffmann 			 resp_buf);
904b4b01b49SGerd Hoffmann 		cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
905b4b01b49SGerd Hoffmann 		cmd_p->scanout = cpu_to_le32(scanout);
906b4b01b49SGerd Hoffmann 		virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
907b4b01b49SGerd Hoffmann 	}
908b4b01b49SGerd Hoffmann 
909b4b01b49SGerd Hoffmann 	return 0;
910b4b01b49SGerd Hoffmann }
911b4b01b49SGerd Hoffmann 
virtio_gpu_cmd_context_create(struct virtio_gpu_device * vgdev,uint32_t id,uint32_t context_init,uint32_t nlen,const char * name)91262fb7a5eSGerd Hoffmann void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
9134fb530e5SAnthoine Bourgeois 				   uint32_t context_init, uint32_t nlen,
9144fb530e5SAnthoine Bourgeois 				   const char *name)
91562fb7a5eSGerd Hoffmann {
91662fb7a5eSGerd Hoffmann 	struct virtio_gpu_ctx_create *cmd_p;
91762fb7a5eSGerd Hoffmann 	struct virtio_gpu_vbuffer *vbuf;
91862fb7a5eSGerd Hoffmann 
91962fb7a5eSGerd Hoffmann 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
92062fb7a5eSGerd Hoffmann 	memset(cmd_p, 0, sizeof(*cmd_p));
92162fb7a5eSGerd Hoffmann 
92262fb7a5eSGerd Hoffmann 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
92362fb7a5eSGerd Hoffmann 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
92462fb7a5eSGerd Hoffmann 	cmd_p->nlen = cpu_to_le32(nlen);
9254fb530e5SAnthoine Bourgeois 	cmd_p->context_init = cpu_to_le32(context_init);
926*8ac75246SXu Panda 	strscpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name));
92762fb7a5eSGerd Hoffmann 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
92862fb7a5eSGerd Hoffmann }
92962fb7a5eSGerd Hoffmann 
virtio_gpu_cmd_context_destroy(struct virtio_gpu_device * vgdev,uint32_t id)93062fb7a5eSGerd Hoffmann void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
93162fb7a5eSGerd Hoffmann 				    uint32_t id)
93262fb7a5eSGerd Hoffmann {
93362fb7a5eSGerd Hoffmann 	struct virtio_gpu_ctx_destroy *cmd_p;
93462fb7a5eSGerd Hoffmann 	struct virtio_gpu_vbuffer *vbuf;
93562fb7a5eSGerd Hoffmann 
93662fb7a5eSGerd Hoffmann 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
93762fb7a5eSGerd Hoffmann 	memset(cmd_p, 0, sizeof(*cmd_p));
93862fb7a5eSGerd Hoffmann 
93962fb7a5eSGerd Hoffmann 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
94062fb7a5eSGerd Hoffmann 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
94162fb7a5eSGerd Hoffmann 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
94262fb7a5eSGerd Hoffmann }
94362fb7a5eSGerd Hoffmann 
virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device * vgdev,uint32_t ctx_id,struct virtio_gpu_object_array * objs)94462fb7a5eSGerd Hoffmann void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
94562fb7a5eSGerd Hoffmann 					    uint32_t ctx_id,
94693c38d15SGerd Hoffmann 					    struct virtio_gpu_object_array *objs)
94762fb7a5eSGerd Hoffmann {
94893c38d15SGerd Hoffmann 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
94962fb7a5eSGerd Hoffmann 	struct virtio_gpu_ctx_resource *cmd_p;
95062fb7a5eSGerd Hoffmann 	struct virtio_gpu_vbuffer *vbuf;
95162fb7a5eSGerd Hoffmann 
95262fb7a5eSGerd Hoffmann 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
95362fb7a5eSGerd Hoffmann 	memset(cmd_p, 0, sizeof(*cmd_p));
95493c38d15SGerd Hoffmann 	vbuf->objs = objs;
95562fb7a5eSGerd Hoffmann 
95662fb7a5eSGerd Hoffmann 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
95762fb7a5eSGerd Hoffmann 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
95893c38d15SGerd Hoffmann 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
95962fb7a5eSGerd Hoffmann 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
96062fb7a5eSGerd Hoffmann }
96162fb7a5eSGerd Hoffmann 
virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device * vgdev,uint32_t ctx_id,struct virtio_gpu_object_array * objs)96262fb7a5eSGerd Hoffmann void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
96362fb7a5eSGerd Hoffmann 					    uint32_t ctx_id,
96493c38d15SGerd Hoffmann 					    struct virtio_gpu_object_array *objs)
96562fb7a5eSGerd Hoffmann {
96693c38d15SGerd Hoffmann 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
96762fb7a5eSGerd Hoffmann 	struct virtio_gpu_ctx_resource *cmd_p;
96862fb7a5eSGerd Hoffmann 	struct virtio_gpu_vbuffer *vbuf;
96962fb7a5eSGerd Hoffmann 
97062fb7a5eSGerd Hoffmann 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
97162fb7a5eSGerd Hoffmann 	memset(cmd_p, 0, sizeof(*cmd_p));
97293c38d15SGerd Hoffmann 	vbuf->objs = objs;
97362fb7a5eSGerd Hoffmann 
97462fb7a5eSGerd Hoffmann 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
97562fb7a5eSGerd Hoffmann 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
97693c38d15SGerd Hoffmann 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
97762fb7a5eSGerd Hoffmann 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
97862fb7a5eSGerd Hoffmann }
97962fb7a5eSGerd Hoffmann 
98062fb7a5eSGerd Hoffmann void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)98162fb7a5eSGerd Hoffmann virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
98223c897d7SGerd Hoffmann 				  struct virtio_gpu_object *bo,
983530b2842SGerd Hoffmann 				  struct virtio_gpu_object_params *params,
984e2324300SGerd Hoffmann 				  struct virtio_gpu_object_array *objs,
985530b2842SGerd Hoffmann 				  struct virtio_gpu_fence *fence)
98662fb7a5eSGerd Hoffmann {
98762fb7a5eSGerd Hoffmann 	struct virtio_gpu_resource_create_3d *cmd_p;
98862fb7a5eSGerd Hoffmann 	struct virtio_gpu_vbuffer *vbuf;
98962fb7a5eSGerd Hoffmann 
99062fb7a5eSGerd Hoffmann 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
99162fb7a5eSGerd Hoffmann 	memset(cmd_p, 0, sizeof(*cmd_p));
992e2324300SGerd Hoffmann 	vbuf->objs = objs;
99362fb7a5eSGerd Hoffmann 
99462fb7a5eSGerd Hoffmann 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
995fd4d6a42SGerd Hoffmann 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
996fd4d6a42SGerd Hoffmann 	cmd_p->format = cpu_to_le32(params->format);
997fd4d6a42SGerd Hoffmann 	cmd_p->width = cpu_to_le32(params->width);
998fd4d6a42SGerd Hoffmann 	cmd_p->height = cpu_to_le32(params->height);
999fd4d6a42SGerd Hoffmann 
1000fd4d6a42SGerd Hoffmann 	cmd_p->target = cpu_to_le32(params->target);
1001fd4d6a42SGerd Hoffmann 	cmd_p->bind = cpu_to_le32(params->bind);
1002fd4d6a42SGerd Hoffmann 	cmd_p->depth = cpu_to_le32(params->depth);
1003fd4d6a42SGerd Hoffmann 	cmd_p->array_size = cpu_to_le32(params->array_size);
1004fd4d6a42SGerd Hoffmann 	cmd_p->last_level = cpu_to_le32(params->last_level);
1005fd4d6a42SGerd Hoffmann 	cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1006fd4d6a42SGerd Hoffmann 	cmd_p->flags = cpu_to_le32(params->flags);
100762fb7a5eSGerd Hoffmann 
1008e19d3411SChia-I Wu 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1009cca41da1SGerd Hoffmann 
101023c897d7SGerd Hoffmann 	bo->created = true;
101162fb7a5eSGerd Hoffmann }
101262fb7a5eSGerd Hoffmann 
virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device * vgdev,uint32_t ctx_id,uint64_t offset,uint32_t level,uint32_t stride,uint32_t layer_stride,struct drm_virtgpu_3d_box * box,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)101362fb7a5eSGerd Hoffmann void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1014af334c5dSGerd Hoffmann 					uint32_t ctx_id,
101562fb7a5eSGerd Hoffmann 					uint64_t offset, uint32_t level,
101650c3d193SGurchetan Singh 					uint32_t stride,
101750c3d193SGurchetan Singh 					uint32_t layer_stride,
10181dc34852SGerd Hoffmann 					struct drm_virtgpu_3d_box *box,
10193d3bdbc0SGerd Hoffmann 					struct virtio_gpu_object_array *objs,
10204d55fd66SGerd Hoffmann 					struct virtio_gpu_fence *fence)
102162fb7a5eSGerd Hoffmann {
10223d3bdbc0SGerd Hoffmann 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
102362fb7a5eSGerd Hoffmann 	struct virtio_gpu_transfer_host_3d *cmd_p;
102462fb7a5eSGerd Hoffmann 	struct virtio_gpu_vbuffer *vbuf;
102524b6842aSMichael S. Tsirkin 	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
102650c3d193SGurchetan Singh 
1027b5c9ed70SDmitry Osipenko 	if (virtio_gpu_is_shmem(bo) && use_dma_api)
1028a54bace0SOleksandr Tyshchenko 		dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
1029b5c9ed70SDmitry Osipenko 					    bo->base.sgt, DMA_TO_DEVICE);
103062fb7a5eSGerd Hoffmann 
103162fb7a5eSGerd Hoffmann 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
103262fb7a5eSGerd Hoffmann 	memset(cmd_p, 0, sizeof(*cmd_p));
103362fb7a5eSGerd Hoffmann 
10343d3bdbc0SGerd Hoffmann 	vbuf->objs = objs;
10353d3bdbc0SGerd Hoffmann 
103662fb7a5eSGerd Hoffmann 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
103762fb7a5eSGerd Hoffmann 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1038af334c5dSGerd Hoffmann 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
10391dc34852SGerd Hoffmann 	convert_to_hw_box(&cmd_p->box, box);
104062fb7a5eSGerd Hoffmann 	cmd_p->offset = cpu_to_le64(offset);
104162fb7a5eSGerd Hoffmann 	cmd_p->level = cpu_to_le32(level);
104250c3d193SGurchetan Singh 	cmd_p->stride = cpu_to_le32(stride);
104350c3d193SGurchetan Singh 	cmd_p->layer_stride = cpu_to_le32(layer_stride);
104462fb7a5eSGerd Hoffmann 
1045e19d3411SChia-I Wu 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
104662fb7a5eSGerd Hoffmann }
104762fb7a5eSGerd Hoffmann 
virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device * vgdev,uint32_t ctx_id,uint64_t offset,uint32_t level,uint32_t stride,uint32_t layer_stride,struct drm_virtgpu_3d_box * box,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)104862fb7a5eSGerd Hoffmann void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1049375f156aSGerd Hoffmann 					  uint32_t ctx_id,
105062fb7a5eSGerd Hoffmann 					  uint64_t offset, uint32_t level,
105150c3d193SGurchetan Singh 					  uint32_t stride,
105250c3d193SGurchetan Singh 					  uint32_t layer_stride,
10531dc34852SGerd Hoffmann 					  struct drm_virtgpu_3d_box *box,
1054375f156aSGerd Hoffmann 					  struct virtio_gpu_object_array *objs,
10554d55fd66SGerd Hoffmann 					  struct virtio_gpu_fence *fence)
105662fb7a5eSGerd Hoffmann {
1057375f156aSGerd Hoffmann 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
105862fb7a5eSGerd Hoffmann 	struct virtio_gpu_transfer_host_3d *cmd_p;
105962fb7a5eSGerd Hoffmann 	struct virtio_gpu_vbuffer *vbuf;
106062fb7a5eSGerd Hoffmann 
106162fb7a5eSGerd Hoffmann 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
106262fb7a5eSGerd Hoffmann 	memset(cmd_p, 0, sizeof(*cmd_p));
106362fb7a5eSGerd Hoffmann 
1064375f156aSGerd Hoffmann 	vbuf->objs = objs;
1065375f156aSGerd Hoffmann 
106662fb7a5eSGerd Hoffmann 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
106762fb7a5eSGerd Hoffmann 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1068375f156aSGerd Hoffmann 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
10691dc34852SGerd Hoffmann 	convert_to_hw_box(&cmd_p->box, box);
107062fb7a5eSGerd Hoffmann 	cmd_p->offset = cpu_to_le64(offset);
107162fb7a5eSGerd Hoffmann 	cmd_p->level = cpu_to_le32(level);
107250c3d193SGurchetan Singh 	cmd_p->stride = cpu_to_le32(stride);
107350c3d193SGurchetan Singh 	cmd_p->layer_stride = cpu_to_le32(layer_stride);
107462fb7a5eSGerd Hoffmann 
1075e19d3411SChia-I Wu 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
107662fb7a5eSGerd Hoffmann }
107762fb7a5eSGerd Hoffmann 
virtio_gpu_cmd_submit(struct virtio_gpu_device * vgdev,void * data,uint32_t data_size,uint32_t ctx_id,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)107862fb7a5eSGerd Hoffmann void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
107962fb7a5eSGerd Hoffmann 			   void *data, uint32_t data_size,
1080da758d51SGerd Hoffmann 			   uint32_t ctx_id,
1081da758d51SGerd Hoffmann 			   struct virtio_gpu_object_array *objs,
1082da758d51SGerd Hoffmann 			   struct virtio_gpu_fence *fence)
108362fb7a5eSGerd Hoffmann {
108462fb7a5eSGerd Hoffmann 	struct virtio_gpu_cmd_submit *cmd_p;
108562fb7a5eSGerd Hoffmann 	struct virtio_gpu_vbuffer *vbuf;
108662fb7a5eSGerd Hoffmann 
108762fb7a5eSGerd Hoffmann 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
108862fb7a5eSGerd Hoffmann 	memset(cmd_p, 0, sizeof(*cmd_p));
108962fb7a5eSGerd Hoffmann 
109062fb7a5eSGerd Hoffmann 	vbuf->data_buf = data;
109162fb7a5eSGerd Hoffmann 	vbuf->data_size = data_size;
1092da758d51SGerd Hoffmann 	vbuf->objs = objs;
109362fb7a5eSGerd Hoffmann 
109462fb7a5eSGerd Hoffmann 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
109562fb7a5eSGerd Hoffmann 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
109662fb7a5eSGerd Hoffmann 	cmd_p->size = cpu_to_le32(data_size);
109762fb7a5eSGerd Hoffmann 
1098e19d3411SChia-I Wu 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
109962fb7a5eSGerd Hoffmann }
110062fb7a5eSGerd Hoffmann 
virtio_gpu_object_attach(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * obj,struct virtio_gpu_mem_entry * ents,unsigned int nents)1101c76d4ab7SGurchetan Singh void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1102dc5698e8SDave Airlie 			      struct virtio_gpu_object *obj,
11032f2aa137SGerd Hoffmann 			      struct virtio_gpu_mem_entry *ents,
11042f2aa137SGerd Hoffmann 			      unsigned int nents)
1105dc5698e8SDave Airlie {
1106724cfdfdSGerd Hoffmann 	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
11072f2aa137SGerd Hoffmann 					       ents, nents, NULL);
1108dc5698e8SDave Airlie }
1109dc5698e8SDave Airlie 
virtio_gpu_cursor_ping(struct virtio_gpu_device * vgdev,struct virtio_gpu_output * output)1110dc5698e8SDave Airlie void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1111dc5698e8SDave Airlie 			    struct virtio_gpu_output *output)
1112dc5698e8SDave Airlie {
1113dc5698e8SDave Airlie 	struct virtio_gpu_vbuffer *vbuf;
1114dc5698e8SDave Airlie 	struct virtio_gpu_update_cursor *cur_p;
1115dc5698e8SDave Airlie 
1116dc5698e8SDave Airlie 	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1117dc5698e8SDave Airlie 	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1118dc5698e8SDave Airlie 	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1119dc5698e8SDave Airlie 	virtio_gpu_queue_cursor(vgdev, vbuf);
1120dc5698e8SDave Airlie }
1121c84adb30SDavid Stevens 
virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)1122c84adb30SDavid Stevens static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1123c84adb30SDavid Stevens 					    struct virtio_gpu_vbuffer *vbuf)
1124c84adb30SDavid Stevens {
1125c84adb30SDavid Stevens 	struct virtio_gpu_object *obj =
1126c84adb30SDavid Stevens 		gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1127c84adb30SDavid Stevens 	struct virtio_gpu_resp_resource_uuid *resp =
1128c84adb30SDavid Stevens 		(struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
1129c84adb30SDavid Stevens 	uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1130c84adb30SDavid Stevens 
1131c84adb30SDavid Stevens 	spin_lock(&vgdev->resource_export_lock);
11320ce07296SGurchetan Singh 	WARN_ON(obj->uuid_state != STATE_INITIALIZING);
1133c84adb30SDavid Stevens 
1134c84adb30SDavid Stevens 	if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
11350ce07296SGurchetan Singh 	    obj->uuid_state == STATE_INITIALIZING) {
1136412ae84cSAndy Shevchenko 		import_uuid(&obj->uuid, resp->uuid);
11370ce07296SGurchetan Singh 		obj->uuid_state = STATE_OK;
1138c84adb30SDavid Stevens 	} else {
11390ce07296SGurchetan Singh 		obj->uuid_state = STATE_ERR;
1140c84adb30SDavid Stevens 	}
1141c84adb30SDavid Stevens 	spin_unlock(&vgdev->resource_export_lock);
1142c84adb30SDavid Stevens 
1143c84adb30SDavid Stevens 	wake_up_all(&vgdev->resp_wq);
1144c84adb30SDavid Stevens }
1145c84adb30SDavid Stevens 
1146c84adb30SDavid Stevens int
virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device * vgdev,struct virtio_gpu_object_array * objs)1147c84adb30SDavid Stevens virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1148c84adb30SDavid Stevens 				    struct virtio_gpu_object_array *objs)
1149c84adb30SDavid Stevens {
1150c84adb30SDavid Stevens 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1151c84adb30SDavid Stevens 	struct virtio_gpu_resource_assign_uuid *cmd_p;
1152c84adb30SDavid Stevens 	struct virtio_gpu_vbuffer *vbuf;
1153c84adb30SDavid Stevens 	struct virtio_gpu_resp_resource_uuid *resp_buf;
1154c84adb30SDavid Stevens 
1155c84adb30SDavid Stevens 	resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1156c84adb30SDavid Stevens 	if (!resp_buf) {
1157c84adb30SDavid Stevens 		spin_lock(&vgdev->resource_export_lock);
11580ce07296SGurchetan Singh 		bo->uuid_state = STATE_ERR;
1159c84adb30SDavid Stevens 		spin_unlock(&vgdev->resource_export_lock);
1160c84adb30SDavid Stevens 		virtio_gpu_array_put_free(objs);
1161c84adb30SDavid Stevens 		return -ENOMEM;
1162c84adb30SDavid Stevens 	}
1163c84adb30SDavid Stevens 
1164c84adb30SDavid Stevens 	cmd_p = virtio_gpu_alloc_cmd_resp
1165c84adb30SDavid Stevens 		(vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1166c84adb30SDavid Stevens 		 sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
1167c84adb30SDavid Stevens 	memset(cmd_p, 0, sizeof(*cmd_p));
1168c84adb30SDavid Stevens 
1169c84adb30SDavid Stevens 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
1170c84adb30SDavid Stevens 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1171c84adb30SDavid Stevens 
1172c84adb30SDavid Stevens 	vbuf->objs = objs;
1173c84adb30SDavid Stevens 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1174c84adb30SDavid Stevens 	return 0;
1175c84adb30SDavid Stevens }
117616845c5dSGerd Hoffmann 
virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)117716845c5dSGerd Hoffmann static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
117816845c5dSGerd Hoffmann 					   struct virtio_gpu_vbuffer *vbuf)
117916845c5dSGerd Hoffmann {
118016845c5dSGerd Hoffmann 	struct virtio_gpu_object *bo =
118116845c5dSGerd Hoffmann 		gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
118216845c5dSGerd Hoffmann 	struct virtio_gpu_resp_map_info *resp =
118316845c5dSGerd Hoffmann 		(struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
118416845c5dSGerd Hoffmann 	struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
118516845c5dSGerd Hoffmann 	uint32_t resp_type = le32_to_cpu(resp->hdr.type);
118616845c5dSGerd Hoffmann 
118716845c5dSGerd Hoffmann 	spin_lock(&vgdev->host_visible_lock);
118816845c5dSGerd Hoffmann 
118916845c5dSGerd Hoffmann 	if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
119016845c5dSGerd Hoffmann 		vram->map_info = resp->map_info;
119116845c5dSGerd Hoffmann 		vram->map_state = STATE_OK;
119216845c5dSGerd Hoffmann 	} else {
119316845c5dSGerd Hoffmann 		vram->map_state = STATE_ERR;
119416845c5dSGerd Hoffmann 	}
119516845c5dSGerd Hoffmann 
119616845c5dSGerd Hoffmann 	spin_unlock(&vgdev->host_visible_lock);
119716845c5dSGerd Hoffmann 	wake_up_all(&vgdev->resp_wq);
119816845c5dSGerd Hoffmann }
119916845c5dSGerd Hoffmann 
virtio_gpu_cmd_map(struct virtio_gpu_device * vgdev,struct virtio_gpu_object_array * objs,uint64_t offset)120016845c5dSGerd Hoffmann int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
120116845c5dSGerd Hoffmann 		       struct virtio_gpu_object_array *objs, uint64_t offset)
120216845c5dSGerd Hoffmann {
120316845c5dSGerd Hoffmann 	struct virtio_gpu_resource_map_blob *cmd_p;
120416845c5dSGerd Hoffmann 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
120516845c5dSGerd Hoffmann 	struct virtio_gpu_vbuffer *vbuf;
120616845c5dSGerd Hoffmann 	struct virtio_gpu_resp_map_info *resp_buf;
120716845c5dSGerd Hoffmann 
120816845c5dSGerd Hoffmann 	resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
120944510939SDan Carpenter 	if (!resp_buf)
121016845c5dSGerd Hoffmann 		return -ENOMEM;
121116845c5dSGerd Hoffmann 
121216845c5dSGerd Hoffmann 	cmd_p = virtio_gpu_alloc_cmd_resp
121316845c5dSGerd Hoffmann 		(vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
121416845c5dSGerd Hoffmann 		 sizeof(struct virtio_gpu_resp_map_info), resp_buf);
121516845c5dSGerd Hoffmann 	memset(cmd_p, 0, sizeof(*cmd_p));
121616845c5dSGerd Hoffmann 
121716845c5dSGerd Hoffmann 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
121816845c5dSGerd Hoffmann 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
121916845c5dSGerd Hoffmann 	cmd_p->offset = cpu_to_le64(offset);
122016845c5dSGerd Hoffmann 	vbuf->objs = objs;
122116845c5dSGerd Hoffmann 
122216845c5dSGerd Hoffmann 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
122316845c5dSGerd Hoffmann 	return 0;
122416845c5dSGerd Hoffmann }
122516845c5dSGerd Hoffmann 
virtio_gpu_cmd_unmap(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo)122616845c5dSGerd Hoffmann void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
122716845c5dSGerd Hoffmann 			  struct virtio_gpu_object *bo)
122816845c5dSGerd Hoffmann {
122916845c5dSGerd Hoffmann 	struct virtio_gpu_resource_unmap_blob *cmd_p;
123016845c5dSGerd Hoffmann 	struct virtio_gpu_vbuffer *vbuf;
123116845c5dSGerd Hoffmann 
123216845c5dSGerd Hoffmann 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
123316845c5dSGerd Hoffmann 	memset(cmd_p, 0, sizeof(*cmd_p));
123416845c5dSGerd Hoffmann 
123516845c5dSGerd Hoffmann 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
123616845c5dSGerd Hoffmann 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
123716845c5dSGerd Hoffmann 
123816845c5dSGerd Hoffmann 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
123916845c5dSGerd Hoffmann }
1240c7a6ac4fSGurchetan Singh 
1241c7a6ac4fSGurchetan Singh void
virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_mem_entry * ents,uint32_t nents)1242c7a6ac4fSGurchetan Singh virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
1243c7a6ac4fSGurchetan Singh 				    struct virtio_gpu_object *bo,
1244c7a6ac4fSGurchetan Singh 				    struct virtio_gpu_object_params *params,
1245c7a6ac4fSGurchetan Singh 				    struct virtio_gpu_mem_entry *ents,
1246c7a6ac4fSGurchetan Singh 				    uint32_t nents)
1247c7a6ac4fSGurchetan Singh {
1248c7a6ac4fSGurchetan Singh 	struct virtio_gpu_resource_create_blob *cmd_p;
1249c7a6ac4fSGurchetan Singh 	struct virtio_gpu_vbuffer *vbuf;
1250c7a6ac4fSGurchetan Singh 
1251c7a6ac4fSGurchetan Singh 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1252c7a6ac4fSGurchetan Singh 	memset(cmd_p, 0, sizeof(*cmd_p));
1253c7a6ac4fSGurchetan Singh 
1254c7a6ac4fSGurchetan Singh 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
1255c7a6ac4fSGurchetan Singh 	cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
1256c7a6ac4fSGurchetan Singh 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1257c7a6ac4fSGurchetan Singh 	cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
1258c7a6ac4fSGurchetan Singh 	cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
1259c7a6ac4fSGurchetan Singh 	cmd_p->blob_id = cpu_to_le64(params->blob_id);
1260c7a6ac4fSGurchetan Singh 	cmd_p->size = cpu_to_le64(params->size);
1261c7a6ac4fSGurchetan Singh 	cmd_p->nr_entries = cpu_to_le32(nents);
1262c7a6ac4fSGurchetan Singh 
1263c7a6ac4fSGurchetan Singh 	vbuf->data_buf = ents;
1264c7a6ac4fSGurchetan Singh 	vbuf->data_size = sizeof(*ents) * nents;
1265c7a6ac4fSGurchetan Singh 
1266c7a6ac4fSGurchetan Singh 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1267c7a6ac4fSGurchetan Singh 	bo->created = true;
1268c7a6ac4fSGurchetan Singh }
1269c7a6ac4fSGurchetan Singh 
virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device * vgdev,uint32_t scanout_id,struct virtio_gpu_object * bo,struct drm_framebuffer * fb,uint32_t width,uint32_t height,uint32_t x,uint32_t y)1270c7a6ac4fSGurchetan Singh void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
1271c7a6ac4fSGurchetan Singh 				     uint32_t scanout_id,
1272c7a6ac4fSGurchetan Singh 				     struct virtio_gpu_object *bo,
1273c7a6ac4fSGurchetan Singh 				     struct drm_framebuffer *fb,
1274c7a6ac4fSGurchetan Singh 				     uint32_t width, uint32_t height,
1275c7a6ac4fSGurchetan Singh 				     uint32_t x, uint32_t y)
1276c7a6ac4fSGurchetan Singh {
1277c7a6ac4fSGurchetan Singh 	uint32_t i;
1278c7a6ac4fSGurchetan Singh 	struct virtio_gpu_set_scanout_blob *cmd_p;
1279c7a6ac4fSGurchetan Singh 	struct virtio_gpu_vbuffer *vbuf;
1280c7a6ac4fSGurchetan Singh 	uint32_t format = virtio_gpu_translate_format(fb->format->format);
1281c7a6ac4fSGurchetan Singh 
1282c7a6ac4fSGurchetan Singh 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1283c7a6ac4fSGurchetan Singh 	memset(cmd_p, 0, sizeof(*cmd_p));
1284c7a6ac4fSGurchetan Singh 
1285c7a6ac4fSGurchetan Singh 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
1286c7a6ac4fSGurchetan Singh 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1287c7a6ac4fSGurchetan Singh 	cmd_p->scanout_id = cpu_to_le32(scanout_id);
1288c7a6ac4fSGurchetan Singh 
1289c7a6ac4fSGurchetan Singh 	cmd_p->format = cpu_to_le32(format);
1290c7a6ac4fSGurchetan Singh 	cmd_p->width  = cpu_to_le32(fb->width);
1291c7a6ac4fSGurchetan Singh 	cmd_p->height = cpu_to_le32(fb->height);
1292c7a6ac4fSGurchetan Singh 
1293c7a6ac4fSGurchetan Singh 	for (i = 0; i < 4; i++) {
1294c7a6ac4fSGurchetan Singh 		cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
1295c7a6ac4fSGurchetan Singh 		cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
1296c7a6ac4fSGurchetan Singh 	}
1297c7a6ac4fSGurchetan Singh 
1298c7a6ac4fSGurchetan Singh 	cmd_p->r.width = cpu_to_le32(width);
1299c7a6ac4fSGurchetan Singh 	cmd_p->r.height = cpu_to_le32(height);
1300c7a6ac4fSGurchetan Singh 	cmd_p->r.x = cpu_to_le32(x);
1301c7a6ac4fSGurchetan Singh 	cmd_p->r.y = cpu_to_le32(y);
1302c7a6ac4fSGurchetan Singh 
1303c7a6ac4fSGurchetan Singh 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1304c7a6ac4fSGurchetan Singh }
1305