1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie <airlied@redhat.com>
7  *    Gerd Hoffmann <kraxel@redhat.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26  * OTHER DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <drm/drmP.h>
30 #include "virtgpu_drv.h"
31 #include <linux/virtio.h>
32 #include <linux/virtio_config.h>
33 #include <linux/virtio_ring.h>
34 
35 #define MAX_INLINE_CMD_SIZE   96
36 #define MAX_INLINE_RESP_SIZE  24
37 #define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
38 			       + MAX_INLINE_CMD_SIZE		 \
39 			       + MAX_INLINE_RESP_SIZE)
40 
41 void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
42 				uint32_t *resid)
43 {
44 	int handle;
45 
46 	idr_preload(GFP_KERNEL);
47 	spin_lock(&vgdev->resource_idr_lock);
48 	handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
49 	spin_unlock(&vgdev->resource_idr_lock);
50 	idr_preload_end();
51 	*resid = handle;
52 }
53 
54 void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
55 {
56 	spin_lock(&vgdev->resource_idr_lock);
57 	idr_remove(&vgdev->resource_idr, id);
58 	spin_unlock(&vgdev->resource_idr_lock);
59 }
60 
61 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
62 {
63 	struct drm_device *dev = vq->vdev->priv;
64 	struct virtio_gpu_device *vgdev = dev->dev_private;
65 
66 	schedule_work(&vgdev->ctrlq.dequeue_work);
67 }
68 
69 void virtio_gpu_cursor_ack(struct virtqueue *vq)
70 {
71 	struct drm_device *dev = vq->vdev->priv;
72 	struct virtio_gpu_device *vgdev = dev->dev_private;
73 
74 	schedule_work(&vgdev->cursorq.dequeue_work);
75 }
76 
77 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
78 {
79 	vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
80 					 VBUFFER_SIZE,
81 					 __alignof__(struct virtio_gpu_vbuffer),
82 					 0, NULL);
83 	if (!vgdev->vbufs)
84 		return -ENOMEM;
85 	return 0;
86 }
87 
88 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
89 {
90 	kmem_cache_destroy(vgdev->vbufs);
91 	vgdev->vbufs = NULL;
92 }
93 
94 static struct virtio_gpu_vbuffer*
95 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
96 		    int size, int resp_size, void *resp_buf,
97 		    virtio_gpu_resp_cb resp_cb)
98 {
99 	struct virtio_gpu_vbuffer *vbuf;
100 
101 	vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL);
102 	if (!vbuf)
103 		return ERR_PTR(-ENOMEM);
104 	memset(vbuf, 0, VBUFFER_SIZE);
105 
106 	BUG_ON(size > MAX_INLINE_CMD_SIZE);
107 	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
108 	vbuf->size = size;
109 
110 	vbuf->resp_cb = resp_cb;
111 	vbuf->resp_size = resp_size;
112 	if (resp_size <= MAX_INLINE_RESP_SIZE)
113 		vbuf->resp_buf = (void *)vbuf->buf + size;
114 	else
115 		vbuf->resp_buf = resp_buf;
116 	BUG_ON(!vbuf->resp_buf);
117 	return vbuf;
118 }
119 
120 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
121 				  struct virtio_gpu_vbuffer **vbuffer_p,
122 				  int size)
123 {
124 	struct virtio_gpu_vbuffer *vbuf;
125 
126 	vbuf = virtio_gpu_get_vbuf(vgdev, size,
127 				   sizeof(struct virtio_gpu_ctrl_hdr),
128 				   NULL, NULL);
129 	if (IS_ERR(vbuf)) {
130 		*vbuffer_p = NULL;
131 		return ERR_CAST(vbuf);
132 	}
133 	*vbuffer_p = vbuf;
134 	return vbuf->buf;
135 }
136 
137 static struct virtio_gpu_update_cursor*
138 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
139 			struct virtio_gpu_vbuffer **vbuffer_p)
140 {
141 	struct virtio_gpu_vbuffer *vbuf;
142 
143 	vbuf = virtio_gpu_get_vbuf
144 		(vgdev, sizeof(struct virtio_gpu_update_cursor),
145 		 0, NULL, NULL);
146 	if (IS_ERR(vbuf)) {
147 		*vbuffer_p = NULL;
148 		return ERR_CAST(vbuf);
149 	}
150 	*vbuffer_p = vbuf;
151 	return (struct virtio_gpu_update_cursor *)vbuf->buf;
152 }
153 
154 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
155 				       virtio_gpu_resp_cb cb,
156 				       struct virtio_gpu_vbuffer **vbuffer_p,
157 				       int cmd_size, int resp_size,
158 				       void *resp_buf)
159 {
160 	struct virtio_gpu_vbuffer *vbuf;
161 
162 	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
163 				   resp_size, resp_buf, cb);
164 	if (IS_ERR(vbuf)) {
165 		*vbuffer_p = NULL;
166 		return ERR_CAST(vbuf);
167 	}
168 	*vbuffer_p = vbuf;
169 	return (struct virtio_gpu_command *)vbuf->buf;
170 }
171 
172 static void free_vbuf(struct virtio_gpu_device *vgdev,
173 		      struct virtio_gpu_vbuffer *vbuf)
174 {
175 	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
176 		kfree(vbuf->resp_buf);
177 	kfree(vbuf->data_buf);
178 	kmem_cache_free(vgdev->vbufs, vbuf);
179 }
180 
181 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
182 {
183 	struct virtio_gpu_vbuffer *vbuf;
184 	unsigned int len;
185 	int freed = 0;
186 
187 	while ((vbuf = virtqueue_get_buf(vq, &len))) {
188 		list_add_tail(&vbuf->list, reclaim_list);
189 		freed++;
190 	}
191 	if (freed == 0)
192 		DRM_DEBUG("Huh? zero vbufs reclaimed");
193 }
194 
195 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
196 {
197 	struct virtio_gpu_device *vgdev =
198 		container_of(work, struct virtio_gpu_device,
199 			     ctrlq.dequeue_work);
200 	struct list_head reclaim_list;
201 	struct virtio_gpu_vbuffer *entry, *tmp;
202 	struct virtio_gpu_ctrl_hdr *resp;
203 	u64 fence_id = 0;
204 
205 	INIT_LIST_HEAD(&reclaim_list);
206 	spin_lock(&vgdev->ctrlq.qlock);
207 	do {
208 		virtqueue_disable_cb(vgdev->ctrlq.vq);
209 		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
210 
211 	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
212 	spin_unlock(&vgdev->ctrlq.qlock);
213 
214 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
215 		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
216 		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
217 			DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
218 		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
219 			u64 f = le64_to_cpu(resp->fence_id);
220 
221 			if (fence_id > f) {
222 				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
223 					  __func__, fence_id, f);
224 			} else {
225 				fence_id = f;
226 			}
227 		}
228 		if (entry->resp_cb)
229 			entry->resp_cb(vgdev, entry);
230 
231 		list_del(&entry->list);
232 		free_vbuf(vgdev, entry);
233 	}
234 	wake_up(&vgdev->ctrlq.ack_queue);
235 
236 	if (fence_id)
237 		virtio_gpu_fence_event_process(vgdev, fence_id);
238 }
239 
240 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
241 {
242 	struct virtio_gpu_device *vgdev =
243 		container_of(work, struct virtio_gpu_device,
244 			     cursorq.dequeue_work);
245 	struct list_head reclaim_list;
246 	struct virtio_gpu_vbuffer *entry, *tmp;
247 
248 	INIT_LIST_HEAD(&reclaim_list);
249 	spin_lock(&vgdev->cursorq.qlock);
250 	do {
251 		virtqueue_disable_cb(vgdev->cursorq.vq);
252 		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
253 	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
254 	spin_unlock(&vgdev->cursorq.qlock);
255 
256 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
257 		list_del(&entry->list);
258 		free_vbuf(vgdev, entry);
259 	}
260 	wake_up(&vgdev->cursorq.ack_queue);
261 }
262 
263 static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
264 					       struct virtio_gpu_vbuffer *vbuf)
265 		__releases(&vgdev->ctrlq.qlock)
266 		__acquires(&vgdev->ctrlq.qlock)
267 {
268 	struct virtqueue *vq = vgdev->ctrlq.vq;
269 	struct scatterlist *sgs[3], vcmd, vout, vresp;
270 	int outcnt = 0, incnt = 0;
271 	int ret;
272 
273 	if (!vgdev->vqs_ready)
274 		return -ENODEV;
275 
276 	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
277 	sgs[outcnt + incnt] = &vcmd;
278 	outcnt++;
279 
280 	if (vbuf->data_size) {
281 		sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
282 		sgs[outcnt + incnt] = &vout;
283 		outcnt++;
284 	}
285 
286 	if (vbuf->resp_size) {
287 		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
288 		sgs[outcnt + incnt] = &vresp;
289 		incnt++;
290 	}
291 
292 retry:
293 	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
294 	if (ret == -ENOSPC) {
295 		spin_unlock(&vgdev->ctrlq.qlock);
296 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
297 		spin_lock(&vgdev->ctrlq.qlock);
298 		goto retry;
299 	} else {
300 		virtqueue_kick(vq);
301 	}
302 
303 	if (!ret)
304 		ret = vq->num_free;
305 	return ret;
306 }
307 
308 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
309 					struct virtio_gpu_vbuffer *vbuf)
310 {
311 	int rc;
312 
313 	spin_lock(&vgdev->ctrlq.qlock);
314 	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
315 	spin_unlock(&vgdev->ctrlq.qlock);
316 	return rc;
317 }
318 
319 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
320 					       struct virtio_gpu_vbuffer *vbuf,
321 					       struct virtio_gpu_ctrl_hdr *hdr,
322 					       struct virtio_gpu_fence **fence)
323 {
324 	struct virtqueue *vq = vgdev->ctrlq.vq;
325 	int rc;
326 
327 again:
328 	spin_lock(&vgdev->ctrlq.qlock);
329 
330 	/*
331 	 * Make sure we have enouth space in the virtqueue.  If not
332 	 * wait here until we have.
333 	 *
334 	 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
335 	 * to wait for free space, which can result in fence ids being
336 	 * submitted out-of-order.
337 	 */
338 	if (vq->num_free < 3) {
339 		spin_unlock(&vgdev->ctrlq.qlock);
340 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
341 		goto again;
342 	}
343 
344 	if (fence)
345 		virtio_gpu_fence_emit(vgdev, hdr, fence);
346 	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
347 	spin_unlock(&vgdev->ctrlq.qlock);
348 	return rc;
349 }
350 
351 static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
352 				   struct virtio_gpu_vbuffer *vbuf)
353 {
354 	struct virtqueue *vq = vgdev->cursorq.vq;
355 	struct scatterlist *sgs[1], ccmd;
356 	int ret;
357 	int outcnt;
358 
359 	if (!vgdev->vqs_ready)
360 		return -ENODEV;
361 
362 	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
363 	sgs[0] = &ccmd;
364 	outcnt = 1;
365 
366 	spin_lock(&vgdev->cursorq.qlock);
367 retry:
368 	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
369 	if (ret == -ENOSPC) {
370 		spin_unlock(&vgdev->cursorq.qlock);
371 		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
372 		spin_lock(&vgdev->cursorq.qlock);
373 		goto retry;
374 	} else {
375 		virtqueue_kick(vq);
376 	}
377 
378 	spin_unlock(&vgdev->cursorq.qlock);
379 
380 	if (!ret)
381 		ret = vq->num_free;
382 	return ret;
383 }
384 
385 /* just create gem objects for userspace and long lived objects,
386  * just use dma_alloced pages for the queue objects?
387  */
388 
389 /* create a basic resource */
390 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
391 				    uint32_t resource_id,
392 				    uint32_t format,
393 				    uint32_t width,
394 				    uint32_t height)
395 {
396 	struct virtio_gpu_resource_create_2d *cmd_p;
397 	struct virtio_gpu_vbuffer *vbuf;
398 
399 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
400 	memset(cmd_p, 0, sizeof(*cmd_p));
401 
402 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
403 	cmd_p->resource_id = cpu_to_le32(resource_id);
404 	cmd_p->format = cpu_to_le32(format);
405 	cmd_p->width = cpu_to_le32(width);
406 	cmd_p->height = cpu_to_le32(height);
407 
408 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
409 }
410 
411 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
412 				   uint32_t resource_id)
413 {
414 	struct virtio_gpu_resource_unref *cmd_p;
415 	struct virtio_gpu_vbuffer *vbuf;
416 
417 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
418 	memset(cmd_p, 0, sizeof(*cmd_p));
419 
420 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
421 	cmd_p->resource_id = cpu_to_le32(resource_id);
422 
423 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
424 }
425 
426 static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
427 						  uint32_t resource_id,
428 						  struct virtio_gpu_fence **fence)
429 {
430 	struct virtio_gpu_resource_detach_backing *cmd_p;
431 	struct virtio_gpu_vbuffer *vbuf;
432 
433 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
434 	memset(cmd_p, 0, sizeof(*cmd_p));
435 
436 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
437 	cmd_p->resource_id = cpu_to_le32(resource_id);
438 
439 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
440 }
441 
442 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
443 				uint32_t scanout_id, uint32_t resource_id,
444 				uint32_t width, uint32_t height,
445 				uint32_t x, uint32_t y)
446 {
447 	struct virtio_gpu_set_scanout *cmd_p;
448 	struct virtio_gpu_vbuffer *vbuf;
449 
450 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
451 	memset(cmd_p, 0, sizeof(*cmd_p));
452 
453 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
454 	cmd_p->resource_id = cpu_to_le32(resource_id);
455 	cmd_p->scanout_id = cpu_to_le32(scanout_id);
456 	cmd_p->r.width = cpu_to_le32(width);
457 	cmd_p->r.height = cpu_to_le32(height);
458 	cmd_p->r.x = cpu_to_le32(x);
459 	cmd_p->r.y = cpu_to_le32(y);
460 
461 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
462 }
463 
464 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
465 				   uint32_t resource_id,
466 				   uint32_t x, uint32_t y,
467 				   uint32_t width, uint32_t height)
468 {
469 	struct virtio_gpu_resource_flush *cmd_p;
470 	struct virtio_gpu_vbuffer *vbuf;
471 
472 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
473 	memset(cmd_p, 0, sizeof(*cmd_p));
474 
475 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
476 	cmd_p->resource_id = cpu_to_le32(resource_id);
477 	cmd_p->r.width = cpu_to_le32(width);
478 	cmd_p->r.height = cpu_to_le32(height);
479 	cmd_p->r.x = cpu_to_le32(x);
480 	cmd_p->r.y = cpu_to_le32(y);
481 
482 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
483 }
484 
485 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
486 					uint32_t resource_id, uint64_t offset,
487 					__le32 width, __le32 height,
488 					__le32 x, __le32 y,
489 					struct virtio_gpu_fence **fence)
490 {
491 	struct virtio_gpu_transfer_to_host_2d *cmd_p;
492 	struct virtio_gpu_vbuffer *vbuf;
493 
494 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
495 	memset(cmd_p, 0, sizeof(*cmd_p));
496 
497 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
498 	cmd_p->resource_id = cpu_to_le32(resource_id);
499 	cmd_p->offset = cpu_to_le64(offset);
500 	cmd_p->r.width = width;
501 	cmd_p->r.height = height;
502 	cmd_p->r.x = x;
503 	cmd_p->r.y = y;
504 
505 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
506 }
507 
508 static void
509 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
510 				       uint32_t resource_id,
511 				       struct virtio_gpu_mem_entry *ents,
512 				       uint32_t nents,
513 				       struct virtio_gpu_fence **fence)
514 {
515 	struct virtio_gpu_resource_attach_backing *cmd_p;
516 	struct virtio_gpu_vbuffer *vbuf;
517 
518 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
519 	memset(cmd_p, 0, sizeof(*cmd_p));
520 
521 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
522 	cmd_p->resource_id = cpu_to_le32(resource_id);
523 	cmd_p->nr_entries = cpu_to_le32(nents);
524 
525 	vbuf->data_buf = ents;
526 	vbuf->data_size = sizeof(*ents) * nents;
527 
528 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
529 }
530 
531 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
532 					       struct virtio_gpu_vbuffer *vbuf)
533 {
534 	struct virtio_gpu_resp_display_info *resp =
535 		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
536 	int i;
537 
538 	spin_lock(&vgdev->display_info_lock);
539 	for (i = 0; i < vgdev->num_scanouts; i++) {
540 		vgdev->outputs[i].info = resp->pmodes[i];
541 		if (resp->pmodes[i].enabled) {
542 			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
543 				  le32_to_cpu(resp->pmodes[i].r.width),
544 				  le32_to_cpu(resp->pmodes[i].r.height),
545 				  le32_to_cpu(resp->pmodes[i].r.x),
546 				  le32_to_cpu(resp->pmodes[i].r.y));
547 		} else {
548 			DRM_DEBUG("output %d: disabled", i);
549 		}
550 	}
551 
552 	vgdev->display_info_pending = false;
553 	spin_unlock(&vgdev->display_info_lock);
554 	wake_up(&vgdev->resp_wq);
555 
556 	if (!drm_helper_hpd_irq_event(vgdev->ddev))
557 		drm_kms_helper_hotplug_event(vgdev->ddev);
558 }
559 
560 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
561 					      struct virtio_gpu_vbuffer *vbuf)
562 {
563 	struct virtio_gpu_get_capset_info *cmd =
564 		(struct virtio_gpu_get_capset_info *)vbuf->buf;
565 	struct virtio_gpu_resp_capset_info *resp =
566 		(struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
567 	int i = le32_to_cpu(cmd->capset_index);
568 
569 	spin_lock(&vgdev->display_info_lock);
570 	vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
571 	vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
572 	vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
573 	spin_unlock(&vgdev->display_info_lock);
574 	wake_up(&vgdev->resp_wq);
575 }
576 
577 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
578 				     struct virtio_gpu_vbuffer *vbuf)
579 {
580 	struct virtio_gpu_get_capset *cmd =
581 		(struct virtio_gpu_get_capset *)vbuf->buf;
582 	struct virtio_gpu_resp_capset *resp =
583 		(struct virtio_gpu_resp_capset *)vbuf->resp_buf;
584 	struct virtio_gpu_drv_cap_cache *cache_ent;
585 
586 	spin_lock(&vgdev->display_info_lock);
587 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
588 		if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
589 		    cache_ent->id == le32_to_cpu(cmd->capset_id)) {
590 			memcpy(cache_ent->caps_cache, resp->capset_data,
591 			       cache_ent->size);
592 			atomic_set(&cache_ent->is_valid, 1);
593 			break;
594 		}
595 	}
596 	spin_unlock(&vgdev->display_info_lock);
597 	wake_up(&vgdev->resp_wq);
598 }
599 
600 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
601 {
602 	struct virtio_gpu_ctrl_hdr *cmd_p;
603 	struct virtio_gpu_vbuffer *vbuf;
604 	void *resp_buf;
605 
606 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
607 			   GFP_KERNEL);
608 	if (!resp_buf)
609 		return -ENOMEM;
610 
611 	cmd_p = virtio_gpu_alloc_cmd_resp
612 		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
613 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
614 		 resp_buf);
615 	memset(cmd_p, 0, sizeof(*cmd_p));
616 
617 	vgdev->display_info_pending = true;
618 	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
619 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
620 	return 0;
621 }
622 
623 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
624 {
625 	struct virtio_gpu_get_capset_info *cmd_p;
626 	struct virtio_gpu_vbuffer *vbuf;
627 	void *resp_buf;
628 
629 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
630 			   GFP_KERNEL);
631 	if (!resp_buf)
632 		return -ENOMEM;
633 
634 	cmd_p = virtio_gpu_alloc_cmd_resp
635 		(vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
636 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
637 		 resp_buf);
638 	memset(cmd_p, 0, sizeof(*cmd_p));
639 
640 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
641 	cmd_p->capset_index = cpu_to_le32(idx);
642 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
643 	return 0;
644 }
645 
646 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
647 			      int idx, int version,
648 			      struct virtio_gpu_drv_cap_cache **cache_p)
649 {
650 	struct virtio_gpu_get_capset *cmd_p;
651 	struct virtio_gpu_vbuffer *vbuf;
652 	int max_size;
653 	struct virtio_gpu_drv_cap_cache *cache_ent;
654 	void *resp_buf;
655 
656 	if (idx >= vgdev->num_capsets)
657 		return -EINVAL;
658 
659 	if (version > vgdev->capsets[idx].max_version)
660 		return -EINVAL;
661 
662 	cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
663 	if (!cache_ent)
664 		return -ENOMEM;
665 
666 	max_size = vgdev->capsets[idx].max_size;
667 	cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
668 	if (!cache_ent->caps_cache) {
669 		kfree(cache_ent);
670 		return -ENOMEM;
671 	}
672 
673 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
674 			   GFP_KERNEL);
675 	if (!resp_buf) {
676 		kfree(cache_ent->caps_cache);
677 		kfree(cache_ent);
678 		return -ENOMEM;
679 	}
680 
681 	cache_ent->version = version;
682 	cache_ent->id = vgdev->capsets[idx].id;
683 	atomic_set(&cache_ent->is_valid, 0);
684 	cache_ent->size = max_size;
685 	spin_lock(&vgdev->display_info_lock);
686 	list_add_tail(&cache_ent->head, &vgdev->cap_cache);
687 	spin_unlock(&vgdev->display_info_lock);
688 
689 	cmd_p = virtio_gpu_alloc_cmd_resp
690 		(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
691 		 sizeof(struct virtio_gpu_resp_capset) + max_size,
692 		 resp_buf);
693 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
694 	cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
695 	cmd_p->capset_version = cpu_to_le32(version);
696 	*cache_p = cache_ent;
697 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
698 
699 	return 0;
700 }
701 
702 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
703 				   uint32_t nlen, const char *name)
704 {
705 	struct virtio_gpu_ctx_create *cmd_p;
706 	struct virtio_gpu_vbuffer *vbuf;
707 
708 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
709 	memset(cmd_p, 0, sizeof(*cmd_p));
710 
711 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
712 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
713 	cmd_p->nlen = cpu_to_le32(nlen);
714 	strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
715 	cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
716 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
717 }
718 
719 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
720 				    uint32_t id)
721 {
722 	struct virtio_gpu_ctx_destroy *cmd_p;
723 	struct virtio_gpu_vbuffer *vbuf;
724 
725 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
726 	memset(cmd_p, 0, sizeof(*cmd_p));
727 
728 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
729 	cmd_p->hdr.ctx_id = cpu_to_le32(id);
730 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
731 }
732 
733 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
734 					    uint32_t ctx_id,
735 					    uint32_t resource_id)
736 {
737 	struct virtio_gpu_ctx_resource *cmd_p;
738 	struct virtio_gpu_vbuffer *vbuf;
739 
740 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
741 	memset(cmd_p, 0, sizeof(*cmd_p));
742 
743 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
744 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
745 	cmd_p->resource_id = cpu_to_le32(resource_id);
746 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
747 
748 }
749 
750 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
751 					    uint32_t ctx_id,
752 					    uint32_t resource_id)
753 {
754 	struct virtio_gpu_ctx_resource *cmd_p;
755 	struct virtio_gpu_vbuffer *vbuf;
756 
757 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
758 	memset(cmd_p, 0, sizeof(*cmd_p));
759 
760 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
761 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
762 	cmd_p->resource_id = cpu_to_le32(resource_id);
763 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
764 }
765 
766 void
767 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
768 				  struct virtio_gpu_resource_create_3d *rc_3d,
769 				  struct virtio_gpu_fence **fence)
770 {
771 	struct virtio_gpu_resource_create_3d *cmd_p;
772 	struct virtio_gpu_vbuffer *vbuf;
773 
774 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
775 	memset(cmd_p, 0, sizeof(*cmd_p));
776 
777 	*cmd_p = *rc_3d;
778 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
779 	cmd_p->hdr.flags = 0;
780 
781 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
782 }
783 
784 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
785 					uint32_t resource_id, uint32_t ctx_id,
786 					uint64_t offset, uint32_t level,
787 					struct virtio_gpu_box *box,
788 					struct virtio_gpu_fence **fence)
789 {
790 	struct virtio_gpu_transfer_host_3d *cmd_p;
791 	struct virtio_gpu_vbuffer *vbuf;
792 
793 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
794 	memset(cmd_p, 0, sizeof(*cmd_p));
795 
796 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
797 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
798 	cmd_p->resource_id = cpu_to_le32(resource_id);
799 	cmd_p->box = *box;
800 	cmd_p->offset = cpu_to_le64(offset);
801 	cmd_p->level = cpu_to_le32(level);
802 
803 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
804 }
805 
806 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
807 					  uint32_t resource_id, uint32_t ctx_id,
808 					  uint64_t offset, uint32_t level,
809 					  struct virtio_gpu_box *box,
810 					  struct virtio_gpu_fence **fence)
811 {
812 	struct virtio_gpu_transfer_host_3d *cmd_p;
813 	struct virtio_gpu_vbuffer *vbuf;
814 
815 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
816 	memset(cmd_p, 0, sizeof(*cmd_p));
817 
818 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
819 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
820 	cmd_p->resource_id = cpu_to_le32(resource_id);
821 	cmd_p->box = *box;
822 	cmd_p->offset = cpu_to_le64(offset);
823 	cmd_p->level = cpu_to_le32(level);
824 
825 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
826 }
827 
828 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
829 			   void *data, uint32_t data_size,
830 			   uint32_t ctx_id, struct virtio_gpu_fence **fence)
831 {
832 	struct virtio_gpu_cmd_submit *cmd_p;
833 	struct virtio_gpu_vbuffer *vbuf;
834 
835 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
836 	memset(cmd_p, 0, sizeof(*cmd_p));
837 
838 	vbuf->data_buf = data;
839 	vbuf->data_size = data_size;
840 
841 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
842 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
843 	cmd_p->size = cpu_to_le32(data_size);
844 
845 	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
846 }
847 
848 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
849 			     struct virtio_gpu_object *obj,
850 			     uint32_t resource_id,
851 			     struct virtio_gpu_fence **fence)
852 {
853 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
854 	struct virtio_gpu_mem_entry *ents;
855 	struct scatterlist *sg;
856 	int si, nents;
857 
858 	if (!obj->pages) {
859 		int ret;
860 
861 		ret = virtio_gpu_object_get_sg_table(vgdev, obj);
862 		if (ret)
863 			return ret;
864 	}
865 
866 	if (use_dma_api) {
867 		obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
868 					 obj->pages->sgl, obj->pages->nents,
869 					 DMA_TO_DEVICE);
870 		nents = obj->mapped;
871 	} else {
872 		nents = obj->pages->nents;
873 	}
874 
875 	/* gets freed when the ring has consumed it */
876 	ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
877 			     GFP_KERNEL);
878 	if (!ents) {
879 		DRM_ERROR("failed to allocate ent list\n");
880 		return -ENOMEM;
881 	}
882 
883 	for_each_sg(obj->pages->sgl, sg, nents, si) {
884 		ents[si].addr = cpu_to_le64(use_dma_api
885 					    ? sg_dma_address(sg)
886 					    : sg_phys(sg));
887 		ents[si].length = cpu_to_le32(sg->length);
888 		ents[si].padding = 0;
889 	}
890 
891 	virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
892 					       ents, nents,
893 					       fence);
894 	obj->hw_res_handle = resource_id;
895 	return 0;
896 }
897 
898 void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
899 			      struct virtio_gpu_object *obj)
900 {
901 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
902 	struct virtio_gpu_fence *fence;
903 
904 	if (use_dma_api && obj->mapped) {
905 		/* detach backing and wait for the host process it ... */
906 		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence);
907 		dma_fence_wait(&fence->f, true);
908 		dma_fence_put(&fence->f);
909 
910 		/* ... then tear down iommu mappings */
911 		dma_unmap_sg(vgdev->vdev->dev.parent,
912 			     obj->pages->sgl, obj->mapped,
913 			     DMA_TO_DEVICE);
914 		obj->mapped = 0;
915 	} else {
916 		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
917 	}
918 }
919 
920 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
921 			    struct virtio_gpu_output *output)
922 {
923 	struct virtio_gpu_vbuffer *vbuf;
924 	struct virtio_gpu_update_cursor *cur_p;
925 
926 	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
927 	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
928 	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
929 	virtio_gpu_queue_cursor(vgdev, vbuf);
930 }
931