1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie <airlied@redhat.com>
7  *    Gerd Hoffmann <kraxel@redhat.com>
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26  * OTHER DEALINGS IN THE SOFTWARE.
27  */
28 
29 #include <drm/drmP.h>
30 #include "virtgpu_drv.h"
31 #include <linux/virtio.h>
32 #include <linux/virtio_config.h>
33 #include <linux/virtio_ring.h>
34 
35 #define MAX_INLINE_CMD_SIZE   96
36 #define MAX_INLINE_RESP_SIZE  24
37 #define VBUFFER_SIZE          (sizeof(struct virtio_gpu_vbuffer) \
38 			       + MAX_INLINE_CMD_SIZE		 \
39 			       + MAX_INLINE_RESP_SIZE)
40 
41 void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
42 				uint32_t *resid)
43 {
44 	int handle;
45 
46 	idr_preload(GFP_KERNEL);
47 	spin_lock(&vgdev->resource_idr_lock);
48 	handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
49 	spin_unlock(&vgdev->resource_idr_lock);
50 	idr_preload_end();
51 	*resid = handle;
52 }
53 
54 void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
55 {
56 	spin_lock(&vgdev->resource_idr_lock);
57 	idr_remove(&vgdev->resource_idr, id);
58 	spin_unlock(&vgdev->resource_idr_lock);
59 }
60 
61 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
62 {
63 	struct drm_device *dev = vq->vdev->priv;
64 	struct virtio_gpu_device *vgdev = dev->dev_private;
65 	schedule_work(&vgdev->ctrlq.dequeue_work);
66 }
67 
68 void virtio_gpu_cursor_ack(struct virtqueue *vq)
69 {
70 	struct drm_device *dev = vq->vdev->priv;
71 	struct virtio_gpu_device *vgdev = dev->dev_private;
72 	schedule_work(&vgdev->cursorq.dequeue_work);
73 }
74 
75 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
76 {
77 	struct virtio_gpu_vbuffer *vbuf;
78 	int i, size, count = 0;
79 	void *ptr;
80 
81 	INIT_LIST_HEAD(&vgdev->free_vbufs);
82 	spin_lock_init(&vgdev->free_vbufs_lock);
83 	count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
84 	count += virtqueue_get_vring_size(vgdev->cursorq.vq);
85 	size = count * VBUFFER_SIZE;
86 	DRM_INFO("virtio vbuffers: %d bufs, %zdB each, %dkB total.\n",
87 		 count, VBUFFER_SIZE, size / 1024);
88 
89 	vgdev->vbufs = kzalloc(size, GFP_KERNEL);
90 	if (!vgdev->vbufs)
91 		return -ENOMEM;
92 
93 	for (i = 0, ptr = vgdev->vbufs;
94 	     i < count;
95 	     i++, ptr += VBUFFER_SIZE) {
96 		vbuf = ptr;
97 		list_add(&vbuf->list, &vgdev->free_vbufs);
98 	}
99 	return 0;
100 }
101 
102 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
103 {
104 	struct virtio_gpu_vbuffer *vbuf;
105 	int i, count = 0;
106 
107 	count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
108 	count += virtqueue_get_vring_size(vgdev->cursorq.vq);
109 
110 	spin_lock(&vgdev->free_vbufs_lock);
111 	for (i = 0; i < count; i++) {
112 		if (WARN_ON(list_empty(&vgdev->free_vbufs)))
113 			return;
114 		vbuf = list_first_entry(&vgdev->free_vbufs,
115 					struct virtio_gpu_vbuffer, list);
116 		list_del(&vbuf->list);
117 	}
118 	spin_unlock(&vgdev->free_vbufs_lock);
119 	kfree(vgdev->vbufs);
120 }
121 
122 static struct virtio_gpu_vbuffer*
123 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
124 		    int size, int resp_size, void *resp_buf,
125 		    virtio_gpu_resp_cb resp_cb)
126 {
127 	struct virtio_gpu_vbuffer *vbuf;
128 
129 	spin_lock(&vgdev->free_vbufs_lock);
130 	BUG_ON(list_empty(&vgdev->free_vbufs));
131 	vbuf = list_first_entry(&vgdev->free_vbufs,
132 				struct virtio_gpu_vbuffer, list);
133 	list_del(&vbuf->list);
134 	spin_unlock(&vgdev->free_vbufs_lock);
135 	memset(vbuf, 0, VBUFFER_SIZE);
136 
137 	BUG_ON(size > MAX_INLINE_CMD_SIZE);
138 	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
139 	vbuf->size = size;
140 
141 	vbuf->resp_cb = resp_cb;
142 	vbuf->resp_size = resp_size;
143 	if (resp_size <= MAX_INLINE_RESP_SIZE)
144 		vbuf->resp_buf = (void *)vbuf->buf + size;
145 	else
146 		vbuf->resp_buf = resp_buf;
147 	BUG_ON(!vbuf->resp_buf);
148 	return vbuf;
149 }
150 
151 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
152 				  struct virtio_gpu_vbuffer **vbuffer_p,
153 				  int size)
154 {
155 	struct virtio_gpu_vbuffer *vbuf;
156 
157 	vbuf = virtio_gpu_get_vbuf(vgdev, size,
158 				   sizeof(struct virtio_gpu_ctrl_hdr),
159 				   NULL, NULL);
160 	if (IS_ERR(vbuf)) {
161 		*vbuffer_p = NULL;
162 		return ERR_CAST(vbuf);
163 	}
164 	*vbuffer_p = vbuf;
165 	return vbuf->buf;
166 }
167 
168 static struct virtio_gpu_update_cursor*
169 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
170 			struct virtio_gpu_vbuffer **vbuffer_p)
171 {
172 	struct virtio_gpu_vbuffer *vbuf;
173 
174 	vbuf = virtio_gpu_get_vbuf
175 		(vgdev, sizeof(struct virtio_gpu_update_cursor),
176 		 0, NULL, NULL);
177 	if (IS_ERR(vbuf)) {
178 		*vbuffer_p = NULL;
179 		return ERR_CAST(vbuf);
180 	}
181 	*vbuffer_p = vbuf;
182 	return (struct virtio_gpu_update_cursor *)vbuf->buf;
183 }
184 
185 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
186 				       virtio_gpu_resp_cb cb,
187 				       struct virtio_gpu_vbuffer **vbuffer_p,
188 				       int cmd_size, int resp_size,
189 				       void *resp_buf)
190 {
191 	struct virtio_gpu_vbuffer *vbuf;
192 
193 	vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
194 				   resp_size, resp_buf, cb);
195 	if (IS_ERR(vbuf)) {
196 		*vbuffer_p = NULL;
197 		return ERR_CAST(vbuf);
198 	}
199 	*vbuffer_p = vbuf;
200 	return (struct virtio_gpu_command *)vbuf->buf;
201 }
202 
203 static void free_vbuf(struct virtio_gpu_device *vgdev,
204 		      struct virtio_gpu_vbuffer *vbuf)
205 {
206 	if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
207 		kfree(vbuf->resp_buf);
208 	kfree(vbuf->data_buf);
209 	spin_lock(&vgdev->free_vbufs_lock);
210 	list_add(&vbuf->list, &vgdev->free_vbufs);
211 	spin_unlock(&vgdev->free_vbufs_lock);
212 }
213 
214 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
215 {
216 	struct virtio_gpu_vbuffer *vbuf;
217 	unsigned int len;
218 	int freed = 0;
219 
220 	while ((vbuf = virtqueue_get_buf(vq, &len))) {
221 		list_add_tail(&vbuf->list, reclaim_list);
222 		freed++;
223 	}
224 	if (freed == 0)
225 		DRM_DEBUG("Huh? zero vbufs reclaimed");
226 }
227 
228 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
229 {
230 	struct virtio_gpu_device *vgdev =
231 		container_of(work, struct virtio_gpu_device,
232 			     ctrlq.dequeue_work);
233 	struct list_head reclaim_list;
234 	struct virtio_gpu_vbuffer *entry, *tmp;
235 	struct virtio_gpu_ctrl_hdr *resp;
236 	u64 fence_id = 0;
237 
238 	INIT_LIST_HEAD(&reclaim_list);
239 	spin_lock(&vgdev->ctrlq.qlock);
240 	do {
241 		virtqueue_disable_cb(vgdev->ctrlq.vq);
242 		reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
243 
244 	} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
245 	spin_unlock(&vgdev->ctrlq.qlock);
246 
247 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
248 		resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
249 		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
250 			DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
251 		if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
252 			u64 f = le64_to_cpu(resp->fence_id);
253 
254 			if (fence_id > f) {
255 				DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
256 					  __func__, fence_id, f);
257 			} else {
258 				fence_id = f;
259 			}
260 		}
261 		if (entry->resp_cb)
262 			entry->resp_cb(vgdev, entry);
263 
264 		list_del(&entry->list);
265 		free_vbuf(vgdev, entry);
266 	}
267 	wake_up(&vgdev->ctrlq.ack_queue);
268 
269 	if (fence_id)
270 		virtio_gpu_fence_event_process(vgdev, fence_id);
271 }
272 
273 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
274 {
275 	struct virtio_gpu_device *vgdev =
276 		container_of(work, struct virtio_gpu_device,
277 			     cursorq.dequeue_work);
278 	struct list_head reclaim_list;
279 	struct virtio_gpu_vbuffer *entry, *tmp;
280 
281 	INIT_LIST_HEAD(&reclaim_list);
282 	spin_lock(&vgdev->cursorq.qlock);
283 	do {
284 		virtqueue_disable_cb(vgdev->cursorq.vq);
285 		reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
286 	} while (!virtqueue_enable_cb(vgdev->cursorq.vq));
287 	spin_unlock(&vgdev->cursorq.qlock);
288 
289 	list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
290 		list_del(&entry->list);
291 		free_vbuf(vgdev, entry);
292 	}
293 	wake_up(&vgdev->cursorq.ack_queue);
294 }
295 
296 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
297 					struct virtio_gpu_vbuffer *vbuf)
298 {
299 	struct virtqueue *vq = vgdev->ctrlq.vq;
300 	struct scatterlist *sgs[3], vcmd, vout, vresp;
301 	int outcnt = 0, incnt = 0;
302 	int ret;
303 
304 	if (!vgdev->vqs_ready)
305 		return -ENODEV;
306 
307 	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
308 	sgs[outcnt+incnt] = &vcmd;
309 	outcnt++;
310 
311 	if (vbuf->data_size) {
312 		sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
313 		sgs[outcnt + incnt] = &vout;
314 		outcnt++;
315 	}
316 
317 	if (vbuf->resp_size) {
318 		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
319 		sgs[outcnt + incnt] = &vresp;
320 		incnt++;
321 	}
322 
323 	spin_lock(&vgdev->ctrlq.qlock);
324 retry:
325 	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
326 	if (ret == -ENOSPC) {
327 		spin_unlock(&vgdev->ctrlq.qlock);
328 		wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
329 		spin_lock(&vgdev->ctrlq.qlock);
330 		goto retry;
331 	} else {
332 		virtqueue_kick(vq);
333 	}
334 	spin_unlock(&vgdev->ctrlq.qlock);
335 
336 	if (!ret)
337 		ret = vq->num_free;
338 	return ret;
339 }
340 
341 static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
342 				   struct virtio_gpu_vbuffer *vbuf)
343 {
344 	struct virtqueue *vq = vgdev->cursorq.vq;
345 	struct scatterlist *sgs[1], ccmd;
346 	int ret;
347 	int outcnt;
348 
349 	if (!vgdev->vqs_ready)
350 		return -ENODEV;
351 
352 	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
353 	sgs[0] = &ccmd;
354 	outcnt = 1;
355 
356 	spin_lock(&vgdev->cursorq.qlock);
357 retry:
358 	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
359 	if (ret == -ENOSPC) {
360 		spin_unlock(&vgdev->cursorq.qlock);
361 		wait_event(vgdev->cursorq.ack_queue, vq->num_free);
362 		spin_lock(&vgdev->cursorq.qlock);
363 		goto retry;
364 	} else {
365 		virtqueue_kick(vq);
366 	}
367 
368 	spin_unlock(&vgdev->cursorq.qlock);
369 
370 	if (!ret)
371 		ret = vq->num_free;
372 	return ret;
373 }
374 
375 /* just create gem objects for userspace and long lived objects,
376    just use dma_alloced pages for the queue objects? */
377 
378 /* create a basic resource */
379 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
380 				    uint32_t resource_id,
381 				    uint32_t format,
382 				    uint32_t width,
383 				    uint32_t height)
384 {
385 	struct virtio_gpu_resource_create_2d *cmd_p;
386 	struct virtio_gpu_vbuffer *vbuf;
387 
388 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
389 	memset(cmd_p, 0, sizeof(*cmd_p));
390 
391 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
392 	cmd_p->resource_id = cpu_to_le32(resource_id);
393 	cmd_p->format = cpu_to_le32(format);
394 	cmd_p->width = cpu_to_le32(width);
395 	cmd_p->height = cpu_to_le32(height);
396 
397 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
398 }
399 
400 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
401 				   uint32_t resource_id)
402 {
403 	struct virtio_gpu_resource_unref *cmd_p;
404 	struct virtio_gpu_vbuffer *vbuf;
405 
406 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
407 	memset(cmd_p, 0, sizeof(*cmd_p));
408 
409 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
410 	cmd_p->resource_id = cpu_to_le32(resource_id);
411 
412 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
413 }
414 
415 void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
416 					   uint32_t resource_id)
417 {
418 	struct virtio_gpu_resource_detach_backing *cmd_p;
419 	struct virtio_gpu_vbuffer *vbuf;
420 
421 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
422 	memset(cmd_p, 0, sizeof(*cmd_p));
423 
424 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
425 	cmd_p->resource_id = cpu_to_le32(resource_id);
426 
427 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
428 }
429 
430 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
431 				uint32_t scanout_id, uint32_t resource_id,
432 				uint32_t width, uint32_t height,
433 				uint32_t x, uint32_t y)
434 {
435 	struct virtio_gpu_set_scanout *cmd_p;
436 	struct virtio_gpu_vbuffer *vbuf;
437 
438 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
439 	memset(cmd_p, 0, sizeof(*cmd_p));
440 
441 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
442 	cmd_p->resource_id = cpu_to_le32(resource_id);
443 	cmd_p->scanout_id = cpu_to_le32(scanout_id);
444 	cmd_p->r.width = cpu_to_le32(width);
445 	cmd_p->r.height = cpu_to_le32(height);
446 	cmd_p->r.x = cpu_to_le32(x);
447 	cmd_p->r.y = cpu_to_le32(y);
448 
449 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
450 }
451 
452 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
453 				   uint32_t resource_id,
454 				   uint32_t x, uint32_t y,
455 				   uint32_t width, uint32_t height)
456 {
457 	struct virtio_gpu_resource_flush *cmd_p;
458 	struct virtio_gpu_vbuffer *vbuf;
459 
460 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
461 	memset(cmd_p, 0, sizeof(*cmd_p));
462 
463 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
464 	cmd_p->resource_id = cpu_to_le32(resource_id);
465 	cmd_p->r.width = cpu_to_le32(width);
466 	cmd_p->r.height = cpu_to_le32(height);
467 	cmd_p->r.x = cpu_to_le32(x);
468 	cmd_p->r.y = cpu_to_le32(y);
469 
470 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
471 }
472 
473 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
474 					uint32_t resource_id, uint64_t offset,
475 					__le32 width, __le32 height,
476 					__le32 x, __le32 y,
477 					struct virtio_gpu_fence **fence)
478 {
479 	struct virtio_gpu_transfer_to_host_2d *cmd_p;
480 	struct virtio_gpu_vbuffer *vbuf;
481 
482 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
483 	memset(cmd_p, 0, sizeof(*cmd_p));
484 
485 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
486 	cmd_p->resource_id = cpu_to_le32(resource_id);
487 	cmd_p->offset = cpu_to_le64(offset);
488 	cmd_p->r.width = width;
489 	cmd_p->r.height = height;
490 	cmd_p->r.x = x;
491 	cmd_p->r.y = y;
492 
493 	if (fence)
494 		virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence);
495 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
496 }
497 
498 static void
499 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
500 				       uint32_t resource_id,
501 				       struct virtio_gpu_mem_entry *ents,
502 				       uint32_t nents,
503 				       struct virtio_gpu_fence **fence)
504 {
505 	struct virtio_gpu_resource_attach_backing *cmd_p;
506 	struct virtio_gpu_vbuffer *vbuf;
507 
508 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
509 	memset(cmd_p, 0, sizeof(*cmd_p));
510 
511 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
512 	cmd_p->resource_id = cpu_to_le32(resource_id);
513 	cmd_p->nr_entries = cpu_to_le32(nents);
514 
515 	vbuf->data_buf = ents;
516 	vbuf->data_size = sizeof(*ents) * nents;
517 
518 	if (fence)
519 		virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence);
520 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
521 }
522 
523 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
524 					       struct virtio_gpu_vbuffer *vbuf)
525 {
526 	struct virtio_gpu_resp_display_info *resp =
527 		(struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
528 	int i;
529 
530 	spin_lock(&vgdev->display_info_lock);
531 	for (i = 0; i < vgdev->num_scanouts; i++) {
532 		vgdev->outputs[i].info = resp->pmodes[i];
533 		if (resp->pmodes[i].enabled) {
534 			DRM_DEBUG("output %d: %dx%d+%d+%d", i,
535 				  le32_to_cpu(resp->pmodes[i].r.width),
536 				  le32_to_cpu(resp->pmodes[i].r.height),
537 				  le32_to_cpu(resp->pmodes[i].r.x),
538 				  le32_to_cpu(resp->pmodes[i].r.y));
539 		} else {
540 			DRM_DEBUG("output %d: disabled", i);
541 		}
542 	}
543 
544 	vgdev->display_info_pending = false;
545 	spin_unlock(&vgdev->display_info_lock);
546 	wake_up(&vgdev->resp_wq);
547 
548 	if (!drm_helper_hpd_irq_event(vgdev->ddev))
549 		drm_kms_helper_hotplug_event(vgdev->ddev);
550 }
551 
552 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
553 {
554 	struct virtio_gpu_ctrl_hdr *cmd_p;
555 	struct virtio_gpu_vbuffer *vbuf;
556 	void *resp_buf;
557 
558 	resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
559 			   GFP_KERNEL);
560 	if (!resp_buf)
561 		return -ENOMEM;
562 
563 	cmd_p = virtio_gpu_alloc_cmd_resp
564 		(vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
565 		 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
566 		 resp_buf);
567 	memset(cmd_p, 0, sizeof(*cmd_p));
568 
569 	vgdev->display_info_pending = true;
570 	cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
571 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
572 	return 0;
573 }
574 
575 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
576 			     struct virtio_gpu_object *obj,
577 			     uint32_t resource_id,
578 			     struct virtio_gpu_fence **fence)
579 {
580 	struct virtio_gpu_mem_entry *ents;
581 	struct scatterlist *sg;
582 	int si;
583 
584 	if (!obj->pages) {
585 		int ret;
586 		ret = virtio_gpu_object_get_sg_table(vgdev, obj);
587 		if (ret)
588 			return ret;
589 	}
590 
591 	/* gets freed when the ring has consumed it */
592 	ents = kmalloc_array(obj->pages->nents,
593 			     sizeof(struct virtio_gpu_mem_entry),
594 			     GFP_KERNEL);
595 	if (!ents) {
596 		DRM_ERROR("failed to allocate ent list\n");
597 		return -ENOMEM;
598 	}
599 
600 	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
601 		ents[si].addr = cpu_to_le64(sg_phys(sg));
602 		ents[si].length = cpu_to_le32(sg->length);
603 		ents[si].padding = 0;
604 	}
605 
606 	virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
607 					       ents, obj->pages->nents,
608 					       fence);
609 	obj->hw_res_handle = resource_id;
610 	return 0;
611 }
612 
613 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
614 			    struct virtio_gpu_output *output)
615 {
616 	struct virtio_gpu_vbuffer *vbuf;
617 	struct virtio_gpu_update_cursor *cur_p;
618 
619 	output->cursor.pos.scanout_id = cpu_to_le32(output->index);
620 	cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
621 	memcpy(cur_p, &output->cursor, sizeof(output->cursor));
622 	virtio_gpu_queue_cursor(vgdev, vbuf);
623 }
624