1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie
7  *    Alon Levy
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  */
27 
28 #include <drm/drmP.h>
29 #include <drm/virtgpu_drm.h>
30 #include <drm/ttm/ttm_execbuf_util.h>
31 
32 #include "virtgpu_drv.h"
33 
34 static void convert_to_hw_box(struct virtio_gpu_box *dst,
35 			      const struct drm_virtgpu_3d_box *src)
36 {
37 	dst->x = cpu_to_le32(src->x);
38 	dst->y = cpu_to_le32(src->y);
39 	dst->z = cpu_to_le32(src->z);
40 	dst->w = cpu_to_le32(src->w);
41 	dst->h = cpu_to_le32(src->h);
42 	dst->d = cpu_to_le32(src->d);
43 }
44 
45 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
46 				struct drm_file *file_priv)
47 {
48 	struct virtio_gpu_device *vgdev = dev->dev_private;
49 	struct drm_virtgpu_map *virtio_gpu_map = data;
50 
51 	return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
52 					 virtio_gpu_map->handle,
53 					 &virtio_gpu_map->offset);
54 }
55 
56 static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
57 					   struct list_head *head)
58 {
59 	struct ttm_operation_ctx ctx = { false, false };
60 	struct ttm_validate_buffer *buf;
61 	struct ttm_buffer_object *bo;
62 	struct virtio_gpu_object *qobj;
63 	int ret;
64 
65 	ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
66 	if (ret != 0)
67 		return ret;
68 
69 	list_for_each_entry(buf, head, head) {
70 		bo = buf->bo;
71 		qobj = container_of(bo, struct virtio_gpu_object, tbo);
72 		ret = ttm_bo_validate(bo, &qobj->placement, &ctx);
73 		if (ret) {
74 			ttm_eu_backoff_reservation(ticket, head);
75 			return ret;
76 		}
77 	}
78 	return 0;
79 }
80 
81 static void virtio_gpu_unref_list(struct list_head *head)
82 {
83 	struct ttm_validate_buffer *buf;
84 	struct ttm_buffer_object *bo;
85 	struct virtio_gpu_object *qobj;
86 
87 	list_for_each_entry(buf, head, head) {
88 		bo = buf->bo;
89 		qobj = container_of(bo, struct virtio_gpu_object, tbo);
90 
91 		drm_gem_object_put_unlocked(&qobj->gem_base);
92 	}
93 }
94 
95 /*
96  * Usage of execbuffer:
97  * Relocations need to take into account the full VIRTIO_GPUDrawable size.
98  * However, the command as passed from user space must *not* contain the initial
99  * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
100  */
101 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
102 				 struct drm_file *drm_file)
103 {
104 	struct drm_virtgpu_execbuffer *exbuf = data;
105 	struct virtio_gpu_device *vgdev = dev->dev_private;
106 	struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
107 	struct drm_gem_object *gobj;
108 	struct virtio_gpu_fence *fence;
109 	struct virtio_gpu_object *qobj;
110 	int ret;
111 	uint32_t *bo_handles = NULL;
112 	void __user *user_bo_handles = NULL;
113 	struct list_head validate_list;
114 	struct ttm_validate_buffer *buflist = NULL;
115 	int i;
116 	struct ww_acquire_ctx ticket;
117 	void *buf;
118 
119 	if (vgdev->has_virgl_3d == false)
120 		return -ENOSYS;
121 
122 	INIT_LIST_HEAD(&validate_list);
123 	if (exbuf->num_bo_handles) {
124 
125 		bo_handles = kvmalloc_array(exbuf->num_bo_handles,
126 					   sizeof(uint32_t), GFP_KERNEL);
127 		buflist = kvmalloc_array(exbuf->num_bo_handles,
128 					   sizeof(struct ttm_validate_buffer),
129 					   GFP_KERNEL | __GFP_ZERO);
130 		if (!bo_handles || !buflist) {
131 			kvfree(bo_handles);
132 			kvfree(buflist);
133 			return -ENOMEM;
134 		}
135 
136 		user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
137 		if (copy_from_user(bo_handles, user_bo_handles,
138 				   exbuf->num_bo_handles * sizeof(uint32_t))) {
139 			ret = -EFAULT;
140 			kvfree(bo_handles);
141 			kvfree(buflist);
142 			return ret;
143 		}
144 
145 		for (i = 0; i < exbuf->num_bo_handles; i++) {
146 			gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
147 			if (!gobj) {
148 				kvfree(bo_handles);
149 				kvfree(buflist);
150 				return -ENOENT;
151 			}
152 
153 			qobj = gem_to_virtio_gpu_obj(gobj);
154 			buflist[i].bo = &qobj->tbo;
155 
156 			list_add(&buflist[i].head, &validate_list);
157 		}
158 		kvfree(bo_handles);
159 	}
160 
161 	ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
162 	if (ret)
163 		goto out_free;
164 
165 	buf = memdup_user((void __user *)(uintptr_t)exbuf->command,
166 			  exbuf->size);
167 	if (IS_ERR(buf)) {
168 		ret = PTR_ERR(buf);
169 		goto out_unresv;
170 	}
171 	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
172 			      vfpriv->ctx_id, &fence);
173 
174 	ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
175 
176 	/* fence the command bo */
177 	virtio_gpu_unref_list(&validate_list);
178 	kvfree(buflist);
179 	dma_fence_put(&fence->f);
180 	return 0;
181 
182 out_unresv:
183 	ttm_eu_backoff_reservation(&ticket, &validate_list);
184 out_free:
185 	virtio_gpu_unref_list(&validate_list);
186 	kvfree(buflist);
187 	return ret;
188 }
189 
190 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
191 				     struct drm_file *file_priv)
192 {
193 	struct virtio_gpu_device *vgdev = dev->dev_private;
194 	struct drm_virtgpu_getparam *param = data;
195 	int value;
196 
197 	switch (param->param) {
198 	case VIRTGPU_PARAM_3D_FEATURES:
199 		value = vgdev->has_virgl_3d == true ? 1 : 0;
200 		break;
201 	case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
202 		value = 1;
203 		break;
204 	default:
205 		return -EINVAL;
206 	}
207 	if (copy_to_user((void __user *)(unsigned long)param->value,
208 			 &value, sizeof(int))) {
209 		return -EFAULT;
210 	}
211 	return 0;
212 }
213 
214 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
215 					    struct drm_file *file_priv)
216 {
217 	struct virtio_gpu_device *vgdev = dev->dev_private;
218 	struct drm_virtgpu_resource_create *rc = data;
219 	int ret;
220 	uint32_t res_id;
221 	struct virtio_gpu_object *qobj;
222 	struct drm_gem_object *obj;
223 	uint32_t handle = 0;
224 	uint32_t size;
225 	struct list_head validate_list;
226 	struct ttm_validate_buffer mainbuf;
227 	struct virtio_gpu_fence *fence = NULL;
228 	struct ww_acquire_ctx ticket;
229 	struct virtio_gpu_resource_create_3d rc_3d;
230 
231 	if (vgdev->has_virgl_3d == false) {
232 		if (rc->depth > 1)
233 			return -EINVAL;
234 		if (rc->nr_samples > 1)
235 			return -EINVAL;
236 		if (rc->last_level > 1)
237 			return -EINVAL;
238 		if (rc->target != 2)
239 			return -EINVAL;
240 		if (rc->array_size > 1)
241 			return -EINVAL;
242 	}
243 
244 	INIT_LIST_HEAD(&validate_list);
245 	memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
246 
247 	virtio_gpu_resource_id_get(vgdev, &res_id);
248 
249 	size = rc->size;
250 
251 	/* allocate a single page size object */
252 	if (size == 0)
253 		size = PAGE_SIZE;
254 
255 	qobj = virtio_gpu_alloc_object(dev, size, false, false);
256 	if (IS_ERR(qobj)) {
257 		ret = PTR_ERR(qobj);
258 		goto fail_id;
259 	}
260 	obj = &qobj->gem_base;
261 
262 	if (!vgdev->has_virgl_3d) {
263 		virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
264 					       rc->width, rc->height);
265 
266 		ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
267 	} else {
268 		/* use a gem reference since unref list undoes them */
269 		drm_gem_object_get(&qobj->gem_base);
270 		mainbuf.bo = &qobj->tbo;
271 		list_add(&mainbuf.head, &validate_list);
272 
273 		ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
274 		if (ret) {
275 			DRM_DEBUG("failed to validate\n");
276 			goto fail_unref;
277 		}
278 
279 		rc_3d.resource_id = cpu_to_le32(res_id);
280 		rc_3d.target = cpu_to_le32(rc->target);
281 		rc_3d.format = cpu_to_le32(rc->format);
282 		rc_3d.bind = cpu_to_le32(rc->bind);
283 		rc_3d.width = cpu_to_le32(rc->width);
284 		rc_3d.height = cpu_to_le32(rc->height);
285 		rc_3d.depth = cpu_to_le32(rc->depth);
286 		rc_3d.array_size = cpu_to_le32(rc->array_size);
287 		rc_3d.last_level = cpu_to_le32(rc->last_level);
288 		rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
289 		rc_3d.flags = cpu_to_le32(rc->flags);
290 
291 		virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
292 		ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
293 		if (ret) {
294 			ttm_eu_backoff_reservation(&ticket, &validate_list);
295 			goto fail_unref;
296 		}
297 		ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
298 	}
299 
300 	qobj->hw_res_handle = res_id;
301 
302 	ret = drm_gem_handle_create(file_priv, obj, &handle);
303 	if (ret) {
304 
305 		drm_gem_object_release(obj);
306 		if (vgdev->has_virgl_3d) {
307 			virtio_gpu_unref_list(&validate_list);
308 			dma_fence_put(&fence->f);
309 		}
310 		return ret;
311 	}
312 	drm_gem_object_put_unlocked(obj);
313 
314 	rc->res_handle = res_id; /* similiar to a VM address */
315 	rc->bo_handle = handle;
316 
317 	if (vgdev->has_virgl_3d) {
318 		virtio_gpu_unref_list(&validate_list);
319 		dma_fence_put(&fence->f);
320 	}
321 	return 0;
322 fail_unref:
323 	if (vgdev->has_virgl_3d) {
324 		virtio_gpu_unref_list(&validate_list);
325 		dma_fence_put(&fence->f);
326 	}
327 //fail_obj:
328 //	drm_gem_object_handle_unreference_unlocked(obj);
329 fail_id:
330 	virtio_gpu_resource_id_put(vgdev, res_id);
331 	return ret;
332 }
333 
334 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
335 					  struct drm_file *file_priv)
336 {
337 	struct drm_virtgpu_resource_info *ri = data;
338 	struct drm_gem_object *gobj = NULL;
339 	struct virtio_gpu_object *qobj = NULL;
340 
341 	gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
342 	if (gobj == NULL)
343 		return -ENOENT;
344 
345 	qobj = gem_to_virtio_gpu_obj(gobj);
346 
347 	ri->size = qobj->gem_base.size;
348 	ri->res_handle = qobj->hw_res_handle;
349 	drm_gem_object_put_unlocked(gobj);
350 	return 0;
351 }
352 
353 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
354 					       void *data,
355 					       struct drm_file *file)
356 {
357 	struct virtio_gpu_device *vgdev = dev->dev_private;
358 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
359 	struct drm_virtgpu_3d_transfer_from_host *args = data;
360 	struct ttm_operation_ctx ctx = { true, false };
361 	struct drm_gem_object *gobj = NULL;
362 	struct virtio_gpu_object *qobj = NULL;
363 	struct virtio_gpu_fence *fence;
364 	int ret;
365 	u32 offset = args->offset;
366 	struct virtio_gpu_box box;
367 
368 	if (vgdev->has_virgl_3d == false)
369 		return -ENOSYS;
370 
371 	gobj = drm_gem_object_lookup(file, args->bo_handle);
372 	if (gobj == NULL)
373 		return -ENOENT;
374 
375 	qobj = gem_to_virtio_gpu_obj(gobj);
376 
377 	ret = virtio_gpu_object_reserve(qobj, false);
378 	if (ret)
379 		goto out;
380 
381 	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
382 	if (unlikely(ret))
383 		goto out_unres;
384 
385 	convert_to_hw_box(&box, &args->box);
386 	virtio_gpu_cmd_transfer_from_host_3d
387 		(vgdev, qobj->hw_res_handle,
388 		 vfpriv->ctx_id, offset, args->level,
389 		 &box, &fence);
390 	reservation_object_add_excl_fence(qobj->tbo.resv,
391 					  &fence->f);
392 
393 	dma_fence_put(&fence->f);
394 out_unres:
395 	virtio_gpu_object_unreserve(qobj);
396 out:
397 	drm_gem_object_put_unlocked(gobj);
398 	return ret;
399 }
400 
401 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
402 					     struct drm_file *file)
403 {
404 	struct virtio_gpu_device *vgdev = dev->dev_private;
405 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
406 	struct drm_virtgpu_3d_transfer_to_host *args = data;
407 	struct ttm_operation_ctx ctx = { true, false };
408 	struct drm_gem_object *gobj = NULL;
409 	struct virtio_gpu_object *qobj = NULL;
410 	struct virtio_gpu_fence *fence;
411 	struct virtio_gpu_box box;
412 	int ret;
413 	u32 offset = args->offset;
414 
415 	gobj = drm_gem_object_lookup(file, args->bo_handle);
416 	if (gobj == NULL)
417 		return -ENOENT;
418 
419 	qobj = gem_to_virtio_gpu_obj(gobj);
420 
421 	ret = virtio_gpu_object_reserve(qobj, false);
422 	if (ret)
423 		goto out;
424 
425 	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
426 	if (unlikely(ret))
427 		goto out_unres;
428 
429 	convert_to_hw_box(&box, &args->box);
430 	if (!vgdev->has_virgl_3d) {
431 		virtio_gpu_cmd_transfer_to_host_2d
432 			(vgdev, qobj->hw_res_handle, offset,
433 			 box.w, box.h, box.x, box.y, NULL);
434 	} else {
435 		virtio_gpu_cmd_transfer_to_host_3d
436 			(vgdev, qobj->hw_res_handle,
437 			 vfpriv ? vfpriv->ctx_id : 0, offset,
438 			 args->level, &box, &fence);
439 		reservation_object_add_excl_fence(qobj->tbo.resv,
440 						  &fence->f);
441 		dma_fence_put(&fence->f);
442 	}
443 
444 out_unres:
445 	virtio_gpu_object_unreserve(qobj);
446 out:
447 	drm_gem_object_put_unlocked(gobj);
448 	return ret;
449 }
450 
451 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
452 			    struct drm_file *file)
453 {
454 	struct drm_virtgpu_3d_wait *args = data;
455 	struct drm_gem_object *gobj = NULL;
456 	struct virtio_gpu_object *qobj = NULL;
457 	int ret;
458 	bool nowait = false;
459 
460 	gobj = drm_gem_object_lookup(file, args->handle);
461 	if (gobj == NULL)
462 		return -ENOENT;
463 
464 	qobj = gem_to_virtio_gpu_obj(gobj);
465 
466 	if (args->flags & VIRTGPU_WAIT_NOWAIT)
467 		nowait = true;
468 	ret = virtio_gpu_object_wait(qobj, nowait);
469 
470 	drm_gem_object_put_unlocked(gobj);
471 	return ret;
472 }
473 
474 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
475 				void *data, struct drm_file *file)
476 {
477 	struct virtio_gpu_device *vgdev = dev->dev_private;
478 	struct drm_virtgpu_get_caps *args = data;
479 	unsigned size, host_caps_size;
480 	int i;
481 	int found_valid = -1;
482 	int ret;
483 	struct virtio_gpu_drv_cap_cache *cache_ent;
484 	void *ptr;
485 
486 	if (vgdev->num_capsets == 0)
487 		return -ENOSYS;
488 
489 	/* don't allow userspace to pass 0 */
490 	if (args->size == 0)
491 		return -EINVAL;
492 
493 	spin_lock(&vgdev->display_info_lock);
494 	for (i = 0; i < vgdev->num_capsets; i++) {
495 		if (vgdev->capsets[i].id == args->cap_set_id) {
496 			if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
497 				found_valid = i;
498 				break;
499 			}
500 		}
501 	}
502 
503 	if (found_valid == -1) {
504 		spin_unlock(&vgdev->display_info_lock);
505 		return -EINVAL;
506 	}
507 
508 	host_caps_size = vgdev->capsets[found_valid].max_size;
509 	/* only copy to user the minimum of the host caps size or the guest caps size */
510 	size = min(args->size, host_caps_size);
511 
512 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
513 		if (cache_ent->id == args->cap_set_id &&
514 		    cache_ent->version == args->cap_set_ver) {
515 			ptr = cache_ent->caps_cache;
516 			spin_unlock(&vgdev->display_info_lock);
517 			goto copy_exit;
518 		}
519 	}
520 	spin_unlock(&vgdev->display_info_lock);
521 
522 	/* not in cache - need to talk to hw */
523 	virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
524 				  &cache_ent);
525 
526 	ret = wait_event_timeout(vgdev->resp_wq,
527 				 atomic_read(&cache_ent->is_valid), 5 * HZ);
528 	if (!ret)
529 		return -EBUSY;
530 
531 	ptr = cache_ent->caps_cache;
532 
533 copy_exit:
534 	if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
535 		return -EFAULT;
536 
537 	return 0;
538 }
539 
540 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
541 	DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
542 			  DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
543 
544 	DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
545 			  DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
546 
547 	DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
548 			  DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
549 
550 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
551 			  virtio_gpu_resource_create_ioctl,
552 			  DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
553 
554 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
555 			  DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
556 
557 	/* make transfer async to the main ring? - no sure, can we
558 	 * thread these in the underlying GL
559 	 */
560 	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
561 			  virtio_gpu_transfer_from_host_ioctl,
562 			  DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
563 	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
564 			  virtio_gpu_transfer_to_host_ioctl,
565 			  DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
566 
567 	DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
568 			  DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
569 
570 	DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
571 			  DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
572 };
573