1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie
7  *    Alon Levy
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  */
27 
28 #include <drm/drmP.h>
29 #include <drm/virtgpu_drm.h>
30 #include <drm/ttm/ttm_execbuf_util.h>
31 
32 #include "virtgpu_drv.h"
33 
34 static void convert_to_hw_box(struct virtio_gpu_box *dst,
35 			      const struct drm_virtgpu_3d_box *src)
36 {
37 	dst->x = cpu_to_le32(src->x);
38 	dst->y = cpu_to_le32(src->y);
39 	dst->z = cpu_to_le32(src->z);
40 	dst->w = cpu_to_le32(src->w);
41 	dst->h = cpu_to_le32(src->h);
42 	dst->d = cpu_to_le32(src->d);
43 }
44 
45 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
46 				struct drm_file *file_priv)
47 {
48 	struct virtio_gpu_device *vgdev = dev->dev_private;
49 	struct drm_virtgpu_map *virtio_gpu_map = data;
50 
51 	return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
52 					 virtio_gpu_map->handle,
53 					 &virtio_gpu_map->offset);
54 }
55 
56 static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
57 					   struct list_head *head)
58 {
59 	struct ttm_validate_buffer *buf;
60 	struct ttm_buffer_object *bo;
61 	struct virtio_gpu_object *qobj;
62 	int ret;
63 
64 	ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
65 	if (ret != 0)
66 		return ret;
67 
68 	list_for_each_entry(buf, head, head) {
69 		bo = buf->bo;
70 		qobj = container_of(bo, struct virtio_gpu_object, tbo);
71 		ret = ttm_bo_validate(bo, &qobj->placement, false, false);
72 		if (ret) {
73 			ttm_eu_backoff_reservation(ticket, head);
74 			return ret;
75 		}
76 	}
77 	return 0;
78 }
79 
80 static void virtio_gpu_unref_list(struct list_head *head)
81 {
82 	struct ttm_validate_buffer *buf;
83 	struct ttm_buffer_object *bo;
84 	struct virtio_gpu_object *qobj;
85 	list_for_each_entry(buf, head, head) {
86 		bo = buf->bo;
87 		qobj = container_of(bo, struct virtio_gpu_object, tbo);
88 
89 		drm_gem_object_unreference_unlocked(&qobj->gem_base);
90 	}
91 }
92 
93 /*
94  * Usage of execbuffer:
95  * Relocations need to take into account the full VIRTIO_GPUDrawable size.
96  * However, the command as passed from user space must *not* contain the initial
97  * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
98  */
99 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
100 				 struct drm_file *drm_file)
101 {
102 	struct drm_virtgpu_execbuffer *exbuf = data;
103 	struct virtio_gpu_device *vgdev = dev->dev_private;
104 	struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
105 	struct drm_gem_object *gobj;
106 	struct virtio_gpu_fence *fence;
107 	struct virtio_gpu_object *qobj;
108 	int ret;
109 	uint32_t *bo_handles = NULL;
110 	void __user *user_bo_handles = NULL;
111 	struct list_head validate_list;
112 	struct ttm_validate_buffer *buflist = NULL;
113 	int i;
114 	struct ww_acquire_ctx ticket;
115 	void *buf;
116 
117 	if (vgdev->has_virgl_3d == false)
118 		return -ENOSYS;
119 
120 	INIT_LIST_HEAD(&validate_list);
121 	if (exbuf->num_bo_handles) {
122 
123 		bo_handles = kvmalloc_array(exbuf->num_bo_handles,
124 					   sizeof(uint32_t), GFP_KERNEL);
125 		buflist = kvmalloc_array(exbuf->num_bo_handles,
126 					   sizeof(struct ttm_validate_buffer),
127 					   GFP_KERNEL | __GFP_ZERO);
128 		if (!bo_handles || !buflist) {
129 			kvfree(bo_handles);
130 			kvfree(buflist);
131 			return -ENOMEM;
132 		}
133 
134 		user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
135 		if (copy_from_user(bo_handles, user_bo_handles,
136 				   exbuf->num_bo_handles * sizeof(uint32_t))) {
137 			ret = -EFAULT;
138 			kvfree(bo_handles);
139 			kvfree(buflist);
140 			return ret;
141 		}
142 
143 		for (i = 0; i < exbuf->num_bo_handles; i++) {
144 			gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
145 			if (!gobj) {
146 				kvfree(bo_handles);
147 				kvfree(buflist);
148 				return -ENOENT;
149 			}
150 
151 			qobj = gem_to_virtio_gpu_obj(gobj);
152 			buflist[i].bo = &qobj->tbo;
153 
154 			list_add(&buflist[i].head, &validate_list);
155 		}
156 		kvfree(bo_handles);
157 	}
158 
159 	ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
160 	if (ret)
161 		goto out_free;
162 
163 	buf = memdup_user((void __user *)(uintptr_t)exbuf->command,
164 			  exbuf->size);
165 	if (IS_ERR(buf)) {
166 		ret = PTR_ERR(buf);
167 		goto out_unresv;
168 	}
169 	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
170 			      vfpriv->ctx_id, &fence);
171 
172 	ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
173 
174 	/* fence the command bo */
175 	virtio_gpu_unref_list(&validate_list);
176 	kvfree(buflist);
177 	dma_fence_put(&fence->f);
178 	return 0;
179 
180 out_unresv:
181 	ttm_eu_backoff_reservation(&ticket, &validate_list);
182 out_free:
183 	virtio_gpu_unref_list(&validate_list);
184 	kvfree(buflist);
185 	return ret;
186 }
187 
188 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
189 				     struct drm_file *file_priv)
190 {
191 	struct virtio_gpu_device *vgdev = dev->dev_private;
192 	struct drm_virtgpu_getparam *param = data;
193 	int value;
194 
195 	switch (param->param) {
196 	case VIRTGPU_PARAM_3D_FEATURES:
197 		value = vgdev->has_virgl_3d == true ? 1 : 0;
198 		break;
199 	default:
200 		return -EINVAL;
201 	}
202 	if (copy_to_user((void __user *)(unsigned long)param->value,
203 			 &value, sizeof(int))) {
204 		return -EFAULT;
205 	}
206 	return 0;
207 }
208 
209 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
210 					    struct drm_file *file_priv)
211 {
212 	struct virtio_gpu_device *vgdev = dev->dev_private;
213 	struct drm_virtgpu_resource_create *rc = data;
214 	int ret;
215 	uint32_t res_id;
216 	struct virtio_gpu_object *qobj;
217 	struct drm_gem_object *obj;
218 	uint32_t handle = 0;
219 	uint32_t size;
220 	struct list_head validate_list;
221 	struct ttm_validate_buffer mainbuf;
222 	struct virtio_gpu_fence *fence = NULL;
223 	struct ww_acquire_ctx ticket;
224 	struct virtio_gpu_resource_create_3d rc_3d;
225 
226 	if (vgdev->has_virgl_3d == false) {
227 		if (rc->depth > 1)
228 			return -EINVAL;
229 		if (rc->nr_samples > 1)
230 			return -EINVAL;
231 		if (rc->last_level > 1)
232 			return -EINVAL;
233 		if (rc->target != 2)
234 			return -EINVAL;
235 		if (rc->array_size > 1)
236 			return -EINVAL;
237 	}
238 
239 	INIT_LIST_HEAD(&validate_list);
240 	memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
241 
242 	virtio_gpu_resource_id_get(vgdev, &res_id);
243 
244 	size = rc->size;
245 
246 	/* allocate a single page size object */
247 	if (size == 0)
248 		size = PAGE_SIZE;
249 
250 	qobj = virtio_gpu_alloc_object(dev, size, false, false);
251 	if (IS_ERR(qobj)) {
252 		ret = PTR_ERR(qobj);
253 		goto fail_id;
254 	}
255 	obj = &qobj->gem_base;
256 
257 	if (!vgdev->has_virgl_3d) {
258 		virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
259 					       rc->width, rc->height);
260 
261 		ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
262 	} else {
263 		/* use a gem reference since unref list undoes them */
264 		drm_gem_object_reference(&qobj->gem_base);
265 		mainbuf.bo = &qobj->tbo;
266 		list_add(&mainbuf.head, &validate_list);
267 
268 		ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
269 		if (ret) {
270 			DRM_DEBUG("failed to validate\n");
271 			goto fail_unref;
272 		}
273 
274 		rc_3d.resource_id = cpu_to_le32(res_id);
275 		rc_3d.target = cpu_to_le32(rc->target);
276 		rc_3d.format = cpu_to_le32(rc->format);
277 		rc_3d.bind = cpu_to_le32(rc->bind);
278 		rc_3d.width = cpu_to_le32(rc->width);
279 		rc_3d.height = cpu_to_le32(rc->height);
280 		rc_3d.depth = cpu_to_le32(rc->depth);
281 		rc_3d.array_size = cpu_to_le32(rc->array_size);
282 		rc_3d.last_level = cpu_to_le32(rc->last_level);
283 		rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
284 		rc_3d.flags = cpu_to_le32(rc->flags);
285 
286 		virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
287 		ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
288 		if (ret) {
289 			ttm_eu_backoff_reservation(&ticket, &validate_list);
290 			goto fail_unref;
291 		}
292 		ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
293 	}
294 
295 	qobj->hw_res_handle = res_id;
296 
297 	ret = drm_gem_handle_create(file_priv, obj, &handle);
298 	if (ret) {
299 
300 		drm_gem_object_release(obj);
301 		if (vgdev->has_virgl_3d) {
302 			virtio_gpu_unref_list(&validate_list);
303 			dma_fence_put(&fence->f);
304 		}
305 		return ret;
306 	}
307 	drm_gem_object_unreference_unlocked(obj);
308 
309 	rc->res_handle = res_id; /* similiar to a VM address */
310 	rc->bo_handle = handle;
311 
312 	if (vgdev->has_virgl_3d) {
313 		virtio_gpu_unref_list(&validate_list);
314 		dma_fence_put(&fence->f);
315 	}
316 	return 0;
317 fail_unref:
318 	if (vgdev->has_virgl_3d) {
319 		virtio_gpu_unref_list(&validate_list);
320 		dma_fence_put(&fence->f);
321 	}
322 //fail_obj:
323 //	drm_gem_object_handle_unreference_unlocked(obj);
324 fail_id:
325 	virtio_gpu_resource_id_put(vgdev, res_id);
326 	return ret;
327 }
328 
329 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
330 					  struct drm_file *file_priv)
331 {
332 	struct drm_virtgpu_resource_info *ri = data;
333 	struct drm_gem_object *gobj = NULL;
334 	struct virtio_gpu_object *qobj = NULL;
335 
336 	gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
337 	if (gobj == NULL)
338 		return -ENOENT;
339 
340 	qobj = gem_to_virtio_gpu_obj(gobj);
341 
342 	ri->size = qobj->gem_base.size;
343 	ri->res_handle = qobj->hw_res_handle;
344 	drm_gem_object_unreference_unlocked(gobj);
345 	return 0;
346 }
347 
348 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
349 					       void *data,
350 					       struct drm_file *file)
351 {
352 	struct virtio_gpu_device *vgdev = dev->dev_private;
353 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
354 	struct drm_virtgpu_3d_transfer_from_host *args = data;
355 	struct drm_gem_object *gobj = NULL;
356 	struct virtio_gpu_object *qobj = NULL;
357 	struct virtio_gpu_fence *fence;
358 	int ret;
359 	u32 offset = args->offset;
360 	struct virtio_gpu_box box;
361 
362 	if (vgdev->has_virgl_3d == false)
363 		return -ENOSYS;
364 
365 	gobj = drm_gem_object_lookup(file, args->bo_handle);
366 	if (gobj == NULL)
367 		return -ENOENT;
368 
369 	qobj = gem_to_virtio_gpu_obj(gobj);
370 
371 	ret = virtio_gpu_object_reserve(qobj, false);
372 	if (ret)
373 		goto out;
374 
375 	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
376 			      true, false);
377 	if (unlikely(ret))
378 		goto out_unres;
379 
380 	convert_to_hw_box(&box, &args->box);
381 	virtio_gpu_cmd_transfer_from_host_3d
382 		(vgdev, qobj->hw_res_handle,
383 		 vfpriv->ctx_id, offset, args->level,
384 		 &box, &fence);
385 	reservation_object_add_excl_fence(qobj->tbo.resv,
386 					  &fence->f);
387 
388 	dma_fence_put(&fence->f);
389 out_unres:
390 	virtio_gpu_object_unreserve(qobj);
391 out:
392 	drm_gem_object_unreference_unlocked(gobj);
393 	return ret;
394 }
395 
396 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
397 					     struct drm_file *file)
398 {
399 	struct virtio_gpu_device *vgdev = dev->dev_private;
400 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
401 	struct drm_virtgpu_3d_transfer_to_host *args = data;
402 	struct drm_gem_object *gobj = NULL;
403 	struct virtio_gpu_object *qobj = NULL;
404 	struct virtio_gpu_fence *fence;
405 	struct virtio_gpu_box box;
406 	int ret;
407 	u32 offset = args->offset;
408 
409 	gobj = drm_gem_object_lookup(file, args->bo_handle);
410 	if (gobj == NULL)
411 		return -ENOENT;
412 
413 	qobj = gem_to_virtio_gpu_obj(gobj);
414 
415 	ret = virtio_gpu_object_reserve(qobj, false);
416 	if (ret)
417 		goto out;
418 
419 	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
420 			      true, false);
421 	if (unlikely(ret))
422 		goto out_unres;
423 
424 	convert_to_hw_box(&box, &args->box);
425 	if (!vgdev->has_virgl_3d) {
426 		virtio_gpu_cmd_transfer_to_host_2d
427 			(vgdev, qobj->hw_res_handle, offset,
428 			 box.w, box.h, box.x, box.y, NULL);
429 	} else {
430 		virtio_gpu_cmd_transfer_to_host_3d
431 			(vgdev, qobj->hw_res_handle,
432 			 vfpriv ? vfpriv->ctx_id : 0, offset,
433 			 args->level, &box, &fence);
434 		reservation_object_add_excl_fence(qobj->tbo.resv,
435 						  &fence->f);
436 		dma_fence_put(&fence->f);
437 	}
438 
439 out_unres:
440 	virtio_gpu_object_unreserve(qobj);
441 out:
442 	drm_gem_object_unreference_unlocked(gobj);
443 	return ret;
444 }
445 
446 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
447 			    struct drm_file *file)
448 {
449 	struct drm_virtgpu_3d_wait *args = data;
450 	struct drm_gem_object *gobj = NULL;
451 	struct virtio_gpu_object *qobj = NULL;
452 	int ret;
453 	bool nowait = false;
454 
455 	gobj = drm_gem_object_lookup(file, args->handle);
456 	if (gobj == NULL)
457 		return -ENOENT;
458 
459 	qobj = gem_to_virtio_gpu_obj(gobj);
460 
461 	if (args->flags & VIRTGPU_WAIT_NOWAIT)
462 		nowait = true;
463 	ret = virtio_gpu_object_wait(qobj, nowait);
464 
465 	drm_gem_object_unreference_unlocked(gobj);
466 	return ret;
467 }
468 
469 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
470 				void *data, struct drm_file *file)
471 {
472 	struct virtio_gpu_device *vgdev = dev->dev_private;
473 	struct drm_virtgpu_get_caps *args = data;
474 	int size;
475 	int i;
476 	int found_valid = -1;
477 	int ret;
478 	struct virtio_gpu_drv_cap_cache *cache_ent;
479 	void *ptr;
480 	if (vgdev->num_capsets == 0)
481 		return -ENOSYS;
482 
483 	spin_lock(&vgdev->display_info_lock);
484 	for (i = 0; i < vgdev->num_capsets; i++) {
485 		if (vgdev->capsets[i].id == args->cap_set_id) {
486 			if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
487 				found_valid = i;
488 				break;
489 			}
490 		}
491 	}
492 
493 	if (found_valid == -1) {
494 		spin_unlock(&vgdev->display_info_lock);
495 		return -EINVAL;
496 	}
497 
498 	size = vgdev->capsets[found_valid].max_size;
499 	if (args->size > size) {
500 		spin_unlock(&vgdev->display_info_lock);
501 		return -EINVAL;
502 	}
503 
504 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
505 		if (cache_ent->id == args->cap_set_id &&
506 		    cache_ent->version == args->cap_set_ver) {
507 			ptr = cache_ent->caps_cache;
508 			spin_unlock(&vgdev->display_info_lock);
509 			goto copy_exit;
510 		}
511 	}
512 	spin_unlock(&vgdev->display_info_lock);
513 
514 	/* not in cache - need to talk to hw */
515 	virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
516 				  &cache_ent);
517 
518 	ret = wait_event_timeout(vgdev->resp_wq,
519 				 atomic_read(&cache_ent->is_valid), 5 * HZ);
520 
521 	ptr = cache_ent->caps_cache;
522 
523 copy_exit:
524 	if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
525 		return -EFAULT;
526 
527 	return 0;
528 }
529 
530 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
531 	DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
532 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
533 
534 	DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
535 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
536 
537 	DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
538 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
539 
540 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
541 			  virtio_gpu_resource_create_ioctl,
542 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
543 
544 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
545 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
546 
547 	/* make transfer async to the main ring? - no sure, can we
548 	   thread these in the underlying GL */
549 	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
550 			  virtio_gpu_transfer_from_host_ioctl,
551 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
552 	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
553 			  virtio_gpu_transfer_to_host_ioctl,
554 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
555 
556 	DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
557 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
558 
559 	DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
560 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
561 };
562