1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie <airlied@redhat.com>
7  *    Gerd Hoffmann <kraxel@redhat.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26  * OTHER DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <drm/drmP.h>
30 #include "virtgpu_drv.h"
31 #include <linux/virtio.h>
32 #include <linux/virtio_config.h>
33 #include <linux/virtio_ring.h>
34 
35 #define MAX_INLINE_CMD_SIZE   96
36 #define MAX_INLINE_RESP_SIZE  24
37 #define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
38 			       + MAX_INLINE_CMD_SIZE		 \
39 			       + MAX_INLINE_RESP_SIZE)
40 
41 void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
42 				uint32_t *resid)
43 {
44 	int handle;
45 
46 	idr_preload(GFP_KERNEL);
47 	spin_lock(&vgdev->resource_idr_lock);
48 	handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
49 	spin_unlock(&vgdev->resource_idr_lock);
50 	idr_preload_end();
51 	*resid = handle;
52 }
53 
54 void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
55 {
56 	spin_lock(&vgdev->resource_idr_lock);
57 	idr_remove(&vgdev->resource_idr, id);
58 	spin_unlock(&vgdev->resource_idr_lock);
59 }
60 
61 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
62 {
63 	struct drm_device *dev = vq->vdev->priv;
64 	struct virtio_gpu_device *vgdev = dev->dev_private;
65 	schedule_work(&vgdev->ctrlq.dequeue_work);
66 }
67 
68 void virtio_gpu_cursor_ack(struct virtqueue *vq)
69 {
70 	struct drm_device *dev = vq->vdev->priv;
71 	struct virtio_gpu_device *vgdev = dev->dev_private;
72 	schedule_work(&vgdev->cursorq.dequeue_work);
73 }
74 
75 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
76 {
77 	struct virtio_gpu_vbuffer *vbuf;
78 	int i, size, count = 16;
79 	void *ptr;
80 
81 	INIT_LIST_HEAD(&vgdev->free_vbufs);
82 	spin_lock_init(&vgdev->free_vbufs_lock);
83 	count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
84 	count += virtqueue_get_vring_size(vgdev->cursorq.vq);
85 	size = count * VBUFFER_SIZE;
86 	DRM_INFO("virtio vbuffers: %d bufs, %zdB each, %dkB total.\n",
87 		 count, VBUFFER_SIZE, size / 1024);
88 
89 	vgdev->vbufs = kzalloc(size, GFP_KERNEL);
90 	if (!vgdev->vbufs)
91 		return -ENOMEM;
92 
93 	for (i = 0, ptr = vgdev->vbufs;
94 	     i < count;
95 	     i++, ptr += VBUFFER_SIZE) {
96 		vbuf = ptr;
97 		list_add(&vbuf->list, &vgdev->free_vbufs);
98 	}
99 	return 0;
100 }
101 
102 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
103 {
104 	struct virtio_gpu_vbuffer *vbuf;
105 	int i, count = 0;
106 
107 	count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
108 	count += virtqueue_get_vring_size(vgdev->cursorq.vq);
109 
110 	spin_lock(&vgdev->free_vbufs_lock);
111 	for (i = 0; i < count; i++) {
112 		if (WARN_ON(list_empty(&vgdev->free_vbufs))) {
113 			spin_unlock(&vgdev->free_vbufs_lock);
114 			return;
115 		}
116 		vbuf = list_first_entry(&vgdev->free_vbufs,
117 					struct virtio_gpu_vbuffer, list);
118 		list_del(&vbuf->list);
119 	}
120 	spin_unlock(&vgdev->free_vbufs_lock);
121 	kfree(vgdev->vbufs);
122 }
123 
124 static struct virtio_gpu_vbuffer*
125 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
126 		    int size, int resp_size, void *resp_buf,
127 		    virtio_gpu_resp_cb resp_cb)
128 {
129 	struct virtio_gpu_vbuffer *vbuf;
130 
131 	spin_lock(&vgdev->free_vbufs_lock);
132 	BUG_ON(list_empty(&vgdev->free_vbufs));
133 	vbuf = list_first_entry(&vgdev->free_vbufs,
134 				struct virtio_gpu_vbuffer, list);
135 	list_del(&vbuf->list);
136 	spin_unlock(&vgdev->free_vbufs_lock);
137 	memset(vbuf, 0, VBUFFER_SIZE);
138 
139 	BUG_ON(size > MAX_INLINE_CMD_SIZE);
140 	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
141 	vbuf->size = size;
142 
143 	vbuf->resp_cb = resp_cb;
144 	vbuf->resp_size = resp_size;
145 	if (resp_size <= MAX_INLINE_RESP_SIZE)
146 		vbuf->resp_buf = (void *)vbuf->buf + size;
147 	else
148 		vbuf->resp_buf = resp_buf;
149 	BUG_ON(!vbuf->resp_buf);
150 	return vbuf;
151 }
152 
153 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
154 				  struct virtio_gpu_vbuffer **vbuffer_p,
155 				  int size)
156 {
157 	struct virtio_gpu_vbuffer *vbuf;
158 
159 	vbuf = virtio_gpu_get_vbuf(vgdev, size,
160 				   sizeof(struct virtio_gpu_ctrl_hdr),
161 				   NULL, NULL);
162 	if (IS_ERR(vbuf)) {
163 		*vbuffer_p = NULL;
164 		return ERR_CAST(vbuf);
165 	}
166 	*vbuffer_p = vbuf;
167 	return vbuf->buf;
168 }
169 
170 static struct virtio_gpu_update_cursor*
171 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
172 			struct virtio_gpu_vbuffer **vbuffer_p)
173 {
174 	struct virtio_gpu_vbuffer *vbuf;
175 
176 	vbuf = virtio_gpu_get_vbuf
177 		(vgdev, sizeof(struct virtio_gpu_update_cursor),
178 		 0, NULL, NULL);
179 	if (IS_ERR(vbuf)) {
180 		*vbuffer_p = NULL;
181 		return ERR_CAST(vbuf);
182 	}
183 	*vbuffer_p = vbuf;
184 	return (struct virtio_gpu_update_cursor *)vbuf->buf;
185 }
186 
187 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
188 				       virtio_gpu_resp_cb cb,
189 				       struct virtio_gpu_vbuffer **vbuffer_p,
190 				       int cmd_size, int resp_size,
191 				       void *resp_buf)
192 {
193 	struct virtio_gpu_vbuffer *vbuf;
194 
195 	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
196 				   resp_size, resp_buf, cb);
197 	if (IS_ERR(vbuf)) {
198 		*vbuffer_p = NULL;
199 		return ERR_CAST(vbuf);
200 	}
201 	*vbuffer_p = vbuf;
202 	return (struct virtio_gpu_command *)vbuf->buf;
203 }
204 
205 static void free_vbuf(struct virtio_gpu_device *vgdev,
206 		      struct virtio_gpu_vbuffer *vbuf)
207 {
208 	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
209 		kfree(vbuf->resp_buf);
210 	kfree(vbuf->data_buf);
211 	spin_lock(&vgdev->free_vbufs_lock);
212 	list_add(&vbuf->list, &vgdev->free_vbufs);
213 	spin_unlock(&vgdev->free_vbufs_lock);
214 }
215 
216 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
217 {
218 	struct virtio_gpu_vbuffer *vbuf;
219 	unsigned int len;
220 	int freed = 0;
221 
222 	while ((vbuf = virtqueue_get_buf(vq, &len))) {
223 		list_add_tail(&vbuf->list, reclaim_list);
224 		freed++;
225 	}
226 	if (freed == 0)
227 		DRM_DEBUG("Huh? zero vbufs reclaimed");
228 }
229 
230 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
231 {
232 	struct virtio_gpu_device *vgdev =
233 		container_of(work, struct virtio_gpu_device,
234 			     ctrlq.dequeue_work);
235 	struct list_head reclaim_list;
236 	struct virtio_gpu_vbuffer *entry, *tmp;
237 	struct virtio_gpu_ctrl_hdr *resp;
238 	u64 fence_id = 0;
239 
240 	INIT_LIST_HEAD(&reclaim_list);
241 	spin_lock(&vgdev->ctrlq.qlock);
242 	do {
243 		virtqueue_disable_cb(vgdev->ctrlq.vq);
244 		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
245 
246 	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
247 	spin_unlock(&vgdev->ctrlq.qlock);
248 
249 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
250 		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
251 		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
252 			DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
253 		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
254 			u64 f = le64_to_cpu(resp->fence_id);
255 
256 			if (fence_id > f) {
257 				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
258 					  __func__, fence_id, f);
259 			} else {
260 				fence_id = f;
261 			}
262 		}
263 		if (entry->resp_cb)
264 			entry->resp_cb(vgdev, entry);
265 
266 		list_del(&entry->list);
267 		free_vbuf(vgdev, entry);
268 	}
269 	wake_up(&vgdev->ctrlq.ack_queue);
270 
271 	if (fence_id)
272 		virtio_gpu_fence_event_process(vgdev, fence_id);
273 }
274 
275 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
276 {
277 	struct virtio_gpu_device *vgdev =
278 		container_of(work, struct virtio_gpu_device,
279 			     cursorq.dequeue_work);
280 	struct list_head reclaim_list;
281 	struct virtio_gpu_vbuffer *entry, *tmp;
282 
283 	INIT_LIST_HEAD(&reclaim_list);
284 	spin_lock(&vgdev->cursorq.qlock);
285 	do {
286 		virtqueue_disable_cb(vgdev->cursorq.vq);
287 		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
288 	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
289 	spin_unlock(&vgdev->cursorq.qlock);
290 
291 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
292 		list_del(&entry->list);
293 		free_vbuf(vgdev, entry);
294 	}
295 	wake_up(&vgdev->cursorq.ack_queue);
296 }
297 
298 static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
299 					       struct virtio_gpu_vbuffer *vbuf)
300 		__releases(&vgdev->ctrlq.qlock)
301 		__acquires(&vgdev->ctrlq.qlock)
302 {
303 	struct virtqueue *vq = vgdev->ctrlq.vq;
304 	struct scatterlist *sgs[3], vcmd, vout, vresp;
305 	int outcnt = 0, incnt = 0;
306 	int ret;
307 
308 	if (!vgdev->vqs_ready)
309 		return -ENODEV;
310 
311 	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
312 	sgs[outcnt+incnt] = &vcmd;
313 	outcnt++;
314 
315 	if (vbuf->data_size) {
316 		sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
317 		sgs[outcnt + incnt] = &vout;
318 		outcnt++;
319 	}
320 
321 	if (vbuf->resp_size) {
322 		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
323 		sgs[outcnt + incnt] = &vresp;
324 		incnt++;
325 	}
326 
327 retry:
328 	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
329 	if (ret == -ENOSPC) {
330 		spin_unlock(&vgdev->ctrlq.qlock);
331 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
332 		spin_lock(&vgdev->ctrlq.qlock);
333 		goto retry;
334 	} else {
335 		virtqueue_kick(vq);
336 	}
337 
338 	if (!ret)
339 		ret = vq->num_free;
340 	return ret;
341 }
342 
343 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
344 					struct virtio_gpu_vbuffer *vbuf)
345 {
346 	int rc;
347 
348 	spin_lock(&vgdev->ctrlq.qlock);
349 	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
350 	spin_unlock(&vgdev->ctrlq.qlock);
351 	return rc;
352 }
353 
354 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
355 					       struct virtio_gpu_vbuffer *vbuf,
356 					       struct virtio_gpu_ctrl_hdr *hdr,
357 					       struct virtio_gpu_fence **fence)
358 {
359 	struct virtqueue *vq = vgdev->ctrlq.vq;
360 	int rc;
361 
362 again:
363 	spin_lock(&vgdev->ctrlq.qlock);
364 
365 	/*
366 	 * Make sure we have enouth space in the virtqueue.  If not
367 	 * wait here until we have.
368 	 *
369 	 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
370 	 * to wait for free space, which can result in fence ids being
371 	 * submitted out-of-order.
372 	 */
373 	if (vq->num_free < 3) {
374 		spin_unlock(&vgdev->ctrlq.qlock);
375 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
376 		goto again;
377 	}
378 
379 	if (fence)
380 		virtio_gpu_fence_emit(vgdev, hdr, fence);
381 	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
382 	spin_unlock(&vgdev->ctrlq.qlock);
383 	return rc;
384 }
385 
386 static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
387 				   struct virtio_gpu_vbuffer *vbuf)
388 {
389 	struct virtqueue *vq = vgdev->cursorq.vq;
390 	struct scatterlist *sgs[1], ccmd;
391 	int ret;
392 	int outcnt;
393 
394 	if (!vgdev->vqs_ready)
395 		return -ENODEV;
396 
397 	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
398 	sgs[0] = &ccmd;
399 	outcnt = 1;
400 
401 	spin_lock(&vgdev->cursorq.qlock);
402 retry:
403 	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
404 	if (ret == -ENOSPC) {
405 		spin_unlock(&vgdev->cursorq.qlock);
406 		wait_event(vgdev->cursorq.ack_queue, vq->num_free);
407 		spin_lock(&vgdev->cursorq.qlock);
408 		goto retry;
409 	} else {
410 		virtqueue_kick(vq);
411 	}
412 
413 	spin_unlock(&vgdev->cursorq.qlock);
414 
415 	if (!ret)
416 		ret = vq->num_free;
417 	return ret;
418 }
419 
420 /* just create gem objects for userspace and long lived objects,
421    just use dma_alloced pages for the queue objects? */
422 
423 /* create a basic resource */
424 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
425 				    uint32_t resource_id,
426 				    uint32_t format,
427 				    uint32_t width,
428 				    uint32_t height)
429 {
430 	struct virtio_gpu_resource_create_2d *cmd_p;
431 	struct virtio_gpu_vbuffer *vbuf;
432 
433 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
434 	memset(cmd_p, 0, sizeof(*cmd_p));
435 
436 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
437 	cmd_p->resource_id = cpu_to_le32(resource_id);
438 	cmd_p->format = cpu_to_le32(format);
439 	cmd_p->width = cpu_to_le32(width);
440 	cmd_p->height = cpu_to_le32(height);
441 
442 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
443 }
444 
445 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
446 				   uint32_t resource_id)
447 {
448 	struct virtio_gpu_resource_unref *cmd_p;
449 	struct virtio_gpu_vbuffer *vbuf;
450 
451 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
452 	memset(cmd_p, 0, sizeof(*cmd_p));
453 
454 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
455 	cmd_p->resource_id = cpu_to_le32(resource_id);
456 
457 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
458 }
459 
460 void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
461 					   uint32_t resource_id)
462 {
463 	struct virtio_gpu_resource_detach_backing *cmd_p;
464 	struct virtio_gpu_vbuffer *vbuf;
465 
466 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
467 	memset(cmd_p, 0, sizeof(*cmd_p));
468 
469 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
470 	cmd_p->resource_id = cpu_to_le32(resource_id);
471 
472 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
473 }
474 
475 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
476 				uint32_t scanout_id, uint32_t resource_id,
477 				uint32_t width, uint32_t height,
478 				uint32_t x, uint32_t y)
479 {
480 	struct virtio_gpu_set_scanout *cmd_p;
481 	struct virtio_gpu_vbuffer *vbuf;
482 
483 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
484 	memset(cmd_p, 0, sizeof(*cmd_p));
485 
486 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
487 	cmd_p->resource_id = cpu_to_le32(resource_id);
488 	cmd_p->scanout_id = cpu_to_le32(scanout_id);
489 	cmd_p->r.width = cpu_to_le32(width);
490 	cmd_p->r.height = cpu_to_le32(height);
491 	cmd_p->r.x = cpu_to_le32(x);
492 	cmd_p->r.y = cpu_to_le32(y);
493 
494 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
495 }
496 
497 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
498 				   uint32_t resource_id,
499 				   uint32_t x, uint32_t y,
500 				   uint32_t width, uint32_t height)
501 {
502 	struct virtio_gpu_resource_flush *cmd_p;
503 	struct virtio_gpu_vbuffer *vbuf;
504 
505 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
506 	memset(cmd_p, 0, sizeof(*cmd_p));
507 
508 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
509 	cmd_p->resource_id = cpu_to_le32(resource_id);
510 	cmd_p->r.width = cpu_to_le32(width);
511 	cmd_p->r.height = cpu_to_le32(height);
512 	cmd_p->r.x = cpu_to_le32(x);
513 	cmd_p->r.y = cpu_to_le32(y);
514 
515 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
516 }
517 
518 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
519 					uint32_t resource_id, uint64_t offset,
520 					__le32 width, __le32 height,
521 					__le32 x, __le32 y,
522 					struct virtio_gpu_fence **fence)
523 {
524 	struct virtio_gpu_transfer_to_host_2d *cmd_p;
525 	struct virtio_gpu_vbuffer *vbuf;
526 
527 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
528 	memset(cmd_p, 0, sizeof(*cmd_p));
529 
530 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
531 	cmd_p->resource_id = cpu_to_le32(resource_id);
532 	cmd_p->offset = cpu_to_le64(offset);
533 	cmd_p->r.width = width;
534 	cmd_p->r.height = height;
535 	cmd_p->r.x = x;
536 	cmd_p->r.y = y;
537 
538 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
539 }
540 
541 static void
542 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
543 				       uint32_t resource_id,
544 				       struct virtio_gpu_mem_entry *ents,
545 				       uint32_t nents,
546 				       struct virtio_gpu_fence **fence)
547 {
548 	struct virtio_gpu_resource_attach_backing *cmd_p;
549 	struct virtio_gpu_vbuffer *vbuf;
550 
551 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
552 	memset(cmd_p, 0, sizeof(*cmd_p));
553 
554 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
555 	cmd_p->resource_id = cpu_to_le32(resource_id);
556 	cmd_p->nr_entries = cpu_to_le32(nents);
557 
558 	vbuf->data_buf = ents;
559 	vbuf->data_size = sizeof(*ents) * nents;
560 
561 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
562 }
563 
564 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
565 					       struct virtio_gpu_vbuffer *vbuf)
566 {
567 	struct virtio_gpu_resp_display_info *resp =
568 		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
569 	int i;
570 
571 	spin_lock(&vgdev->display_info_lock);
572 	for (i = 0; i < vgdev->num_scanouts; i++) {
573 		vgdev->outputs[i].info = resp->pmodes[i];
574 		if (resp->pmodes[i].enabled) {
575 			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
576 				  le32_to_cpu(resp->pmodes[i].r.width),
577 				  le32_to_cpu(resp->pmodes[i].r.height),
578 				  le32_to_cpu(resp->pmodes[i].r.x),
579 				  le32_to_cpu(resp->pmodes[i].r.y));
580 		} else {
581 			DRM_DEBUG("output %d: disabled", i);
582 		}
583 	}
584 
585 	vgdev->display_info_pending = false;
586 	spin_unlock(&vgdev->display_info_lock);
587 	wake_up(&vgdev->resp_wq);
588 
589 	if (!drm_helper_hpd_irq_event(vgdev->ddev))
590 		drm_kms_helper_hotplug_event(vgdev->ddev);
591 }
592 
593 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
594 					      struct virtio_gpu_vbuffer *vbuf)
595 {
596 	struct virtio_gpu_get_capset_info *cmd =
597 		(struct virtio_gpu_get_capset_info *)vbuf->buf;
598 	struct virtio_gpu_resp_capset_info *resp =
599 		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
600 	int i = le32_to_cpu(cmd->capset_index);
601 
602 	spin_lock(&vgdev->display_info_lock);
603 	vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
604 	vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
605 	vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
606 	spin_unlock(&vgdev->display_info_lock);
607 	wake_up(&vgdev->resp_wq);
608 }
609 
610 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
611 				     struct virtio_gpu_vbuffer *vbuf)
612 {
613 	struct virtio_gpu_get_capset *cmd =
614 		(struct virtio_gpu_get_capset *)vbuf->buf;
615 	struct virtio_gpu_resp_capset *resp =
616 		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
617 	struct virtio_gpu_drv_cap_cache *cache_ent;
618 
619 	spin_lock(&vgdev->display_info_lock);
620 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
621 		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
622 		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
623 			memcpy(cache_ent->caps_cache, resp->capset_data,
624 			       cache_ent->size);
625 			atomic_set(&cache_ent->is_valid, 1);
626 			break;
627 		}
628 	}
629 	spin_unlock(&vgdev->display_info_lock);
630 	wake_up(&vgdev->resp_wq);
631 }
632 
633 
634 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
635 {
636 	struct virtio_gpu_ctrl_hdr *cmd_p;
637 	struct virtio_gpu_vbuffer *vbuf;
638 	void *resp_buf;
639 
640 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
641 			   GFP_KERNEL);
642 	if (!resp_buf)
643 		return -ENOMEM;
644 
645 	cmd_p = virtio_gpu_alloc_cmd_resp
646 		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
647 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
648 		 resp_buf);
649 	memset(cmd_p, 0, sizeof(*cmd_p));
650 
651 	vgdev->display_info_pending = true;
652 	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
653 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
654 	return 0;
655 }
656 
657 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
658 {
659 	struct virtio_gpu_get_capset_info *cmd_p;
660 	struct virtio_gpu_vbuffer *vbuf;
661 	void *resp_buf;
662 
663 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
664 			   GFP_KERNEL);
665 	if (!resp_buf)
666 		return -ENOMEM;
667 
668 	cmd_p = virtio_gpu_alloc_cmd_resp
669 		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
670 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
671 		 resp_buf);
672 	memset(cmd_p, 0, sizeof(*cmd_p));
673 
674 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
675 	cmd_p->capset_index = cpu_to_le32(idx);
676 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
677 	return 0;
678 }
679 
680 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
681 			      int idx, int version,
682 			      struct virtio_gpu_drv_cap_cache **cache_p)
683 {
684 	struct virtio_gpu_get_capset *cmd_p;
685 	struct virtio_gpu_vbuffer *vbuf;
686 	int max_size = vgdev->capsets[idx].max_size;
687 	struct virtio_gpu_drv_cap_cache *cache_ent;
688 	void *resp_buf;
689 
690 	if (idx > vgdev->num_capsets)
691 		return -EINVAL;
692 
693 	if (version > vgdev->capsets[idx].max_version)
694 		return -EINVAL;
695 
696 	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
697 	if (!cache_ent)
698 		return -ENOMEM;
699 
700 	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
701 	if (!cache_ent->caps_cache) {
702 		kfree(cache_ent);
703 		return -ENOMEM;
704 	}
705 
706 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
707 			   GFP_KERNEL);
708 	if (!resp_buf) {
709 		kfree(cache_ent->caps_cache);
710 		kfree(cache_ent);
711 		return -ENOMEM;
712 	}
713 
714 	cache_ent->version = version;
715 	cache_ent->id = vgdev->capsets[idx].id;
716 	atomic_set(&cache_ent->is_valid, 0);
717 	cache_ent->size = max_size;
718 	spin_lock(&vgdev->display_info_lock);
719 	list_add_tail(&cache_ent->head, &vgdev->cap_cache);
720 	spin_unlock(&vgdev->display_info_lock);
721 
722 	cmd_p = virtio_gpu_alloc_cmd_resp
723 		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
724 		 sizeof(struct virtio_gpu_resp_capset) + max_size,
725 		 resp_buf);
726 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
727 	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
728 	cmd_p->capset_version = cpu_to_le32(version);
729 	*cache_p = cache_ent;
730 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
731 
732 	return 0;
733 }
734 
735 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
736 				   uint32_t nlen, const char *name)
737 {
738 	struct virtio_gpu_ctx_create *cmd_p;
739 	struct virtio_gpu_vbuffer *vbuf;
740 
741 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
742 	memset(cmd_p, 0, sizeof(*cmd_p));
743 
744 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
745 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
746 	cmd_p->nlen = cpu_to_le32(nlen);
747 	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)-1);
748 	cmd_p->debug_name[sizeof(cmd_p->debug_name)-1] = 0;
749 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
750 }
751 
752 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
753 				    uint32_t id)
754 {
755 	struct virtio_gpu_ctx_destroy *cmd_p;
756 	struct virtio_gpu_vbuffer *vbuf;
757 
758 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
759 	memset(cmd_p, 0, sizeof(*cmd_p));
760 
761 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
762 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
763 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
764 }
765 
766 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
767 					    uint32_t ctx_id,
768 					    uint32_t resource_id)
769 {
770 	struct virtio_gpu_ctx_resource *cmd_p;
771 	struct virtio_gpu_vbuffer *vbuf;
772 
773 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
774 	memset(cmd_p, 0, sizeof(*cmd_p));
775 
776 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
777 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
778 	cmd_p->resource_id = cpu_to_le32(resource_id);
779 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
780 
781 }
782 
783 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
784 					    uint32_t ctx_id,
785 					    uint32_t resource_id)
786 {
787 	struct virtio_gpu_ctx_resource *cmd_p;
788 	struct virtio_gpu_vbuffer *vbuf;
789 
790 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
791 	memset(cmd_p, 0, sizeof(*cmd_p));
792 
793 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
794 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
795 	cmd_p->resource_id = cpu_to_le32(resource_id);
796 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
797 }
798 
799 void
800 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
801 				  struct virtio_gpu_resource_create_3d *rc_3d,
802 				  struct virtio_gpu_fence **fence)
803 {
804 	struct virtio_gpu_resource_create_3d *cmd_p;
805 	struct virtio_gpu_vbuffer *vbuf;
806 
807 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
808 	memset(cmd_p, 0, sizeof(*cmd_p));
809 
810 	*cmd_p = *rc_3d;
811 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
812 	cmd_p->hdr.flags = 0;
813 
814 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
815 }
816 
817 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
818 					uint32_t resource_id, uint32_t ctx_id,
819 					uint64_t offset, uint32_t level,
820 					struct virtio_gpu_box *box,
821 					struct virtio_gpu_fence **fence)
822 {
823 	struct virtio_gpu_transfer_host_3d *cmd_p;
824 	struct virtio_gpu_vbuffer *vbuf;
825 
826 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
827 	memset(cmd_p, 0, sizeof(*cmd_p));
828 
829 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
830 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
831 	cmd_p->resource_id = cpu_to_le32(resource_id);
832 	cmd_p->box = *box;
833 	cmd_p->offset = cpu_to_le64(offset);
834 	cmd_p->level = cpu_to_le32(level);
835 
836 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
837 }
838 
839 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
840 					  uint32_t resource_id, uint32_t ctx_id,
841 					  uint64_t offset, uint32_t level,
842 					  struct virtio_gpu_box *box,
843 					  struct virtio_gpu_fence **fence)
844 {
845 	struct virtio_gpu_transfer_host_3d *cmd_p;
846 	struct virtio_gpu_vbuffer *vbuf;
847 
848 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
849 	memset(cmd_p, 0, sizeof(*cmd_p));
850 
851 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
852 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
853 	cmd_p->resource_id = cpu_to_le32(resource_id);
854 	cmd_p->box = *box;
855 	cmd_p->offset = cpu_to_le64(offset);
856 	cmd_p->level = cpu_to_le32(level);
857 
858 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
859 }
860 
861 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
862 			   void *data, uint32_t data_size,
863 			   uint32_t ctx_id, struct virtio_gpu_fence **fence)
864 {
865 	struct virtio_gpu_cmd_submit *cmd_p;
866 	struct virtio_gpu_vbuffer *vbuf;
867 
868 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
869 	memset(cmd_p, 0, sizeof(*cmd_p));
870 
871 	vbuf->data_buf = data;
872 	vbuf->data_size = data_size;
873 
874 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
875 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
876 	cmd_p->size = cpu_to_le32(data_size);
877 
878 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
879 }
880 
881 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
882 			     struct virtio_gpu_object *obj,
883 			     uint32_t resource_id,
884 			     struct virtio_gpu_fence **fence)
885 {
886 	struct virtio_gpu_mem_entry *ents;
887 	struct scatterlist *sg;
888 	int si;
889 
890 	if (!obj->pages) {
891 		int ret;
892 		ret = virtio_gpu_object_get_sg_table(vgdev, obj);
893 		if (ret)
894 			return ret;
895 	}
896 
897 	/* gets freed when the ring has consumed it */
898 	ents = kmalloc_array(obj->pages->nents,
899 			     sizeof(struct virtio_gpu_mem_entry),
900 			     GFP_KERNEL);
901 	if (!ents) {
902 		DRM_ERROR("failed to allocate ent list\n");
903 		return -ENOMEM;
904 	}
905 
906 	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
907 		ents[si].addr = cpu_to_le64(sg_phys(sg));
908 		ents[si].length = cpu_to_le32(sg->length);
909 		ents[si].padding = 0;
910 	}
911 
912 	virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
913 					       ents, obj->pages->nents,
914 					       fence);
915 	obj->hw_res_handle = resource_id;
916 	return 0;
917 }
918 
919 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
920 			    struct virtio_gpu_output *output)
921 {
922 	struct virtio_gpu_vbuffer *vbuf;
923 	struct virtio_gpu_update_cursor *cur_p;
924 
925 	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
926 	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
927 	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
928 	virtio_gpu_queue_cursor(vgdev, vbuf);
929 }
930