1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie
7  *    Alon Levy
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  */
27 
28 #include <drm/drmP.h>
29 #include "virtgpu_drv.h"
30 #include <drm/virtgpu_drm.h>
31 #include "ttm/ttm_execbuf_util.h"
32 
33 static void convert_to_hw_box(struct virtio_gpu_box *dst,
34 			      const struct drm_virtgpu_3d_box *src)
35 {
36 	dst->x = cpu_to_le32(src->x);
37 	dst->y = cpu_to_le32(src->y);
38 	dst->z = cpu_to_le32(src->z);
39 	dst->w = cpu_to_le32(src->w);
40 	dst->h = cpu_to_le32(src->h);
41 	dst->d = cpu_to_le32(src->d);
42 }
43 
44 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
45 				struct drm_file *file_priv)
46 {
47 	struct virtio_gpu_device *vgdev = dev->dev_private;
48 	struct drm_virtgpu_map *virtio_gpu_map = data;
49 
50 	return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
51 					 virtio_gpu_map->handle,
52 					 &virtio_gpu_map->offset);
53 }
54 
55 static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
56 					   struct list_head *head)
57 {
58 	struct ttm_validate_buffer *buf;
59 	struct ttm_buffer_object *bo;
60 	struct virtio_gpu_object *qobj;
61 	int ret;
62 
63 	ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
64 	if (ret != 0)
65 		return ret;
66 
67 	list_for_each_entry(buf, head, head) {
68 		bo = buf->bo;
69 		qobj = container_of(bo, struct virtio_gpu_object, tbo);
70 		ret = ttm_bo_validate(bo, &qobj->placement, false, false);
71 		if (ret) {
72 			ttm_eu_backoff_reservation(ticket, head);
73 			return ret;
74 		}
75 	}
76 	return 0;
77 }
78 
79 static void virtio_gpu_unref_list(struct list_head *head)
80 {
81 	struct ttm_validate_buffer *buf;
82 	struct ttm_buffer_object *bo;
83 	struct virtio_gpu_object *qobj;
84 	list_for_each_entry(buf, head, head) {
85 		bo = buf->bo;
86 		qobj = container_of(bo, struct virtio_gpu_object, tbo);
87 
88 		drm_gem_object_unreference_unlocked(&qobj->gem_base);
89 	}
90 }
91 
92 static int virtio_gpu_execbuffer(struct drm_device *dev,
93 				 struct drm_virtgpu_execbuffer *exbuf,
94 				 struct drm_file *drm_file)
95 {
96 	struct virtio_gpu_device *vgdev = dev->dev_private;
97 	struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
98 	struct drm_gem_object *gobj;
99 	struct virtio_gpu_fence *fence;
100 	struct virtio_gpu_object *qobj;
101 	int ret;
102 	uint32_t *bo_handles = NULL;
103 	void __user *user_bo_handles = NULL;
104 	struct list_head validate_list;
105 	struct ttm_validate_buffer *buflist = NULL;
106 	int i;
107 	struct ww_acquire_ctx ticket;
108 	void *buf;
109 
110 	if (vgdev->has_virgl_3d == false)
111 		return -ENOSYS;
112 
113 	INIT_LIST_HEAD(&validate_list);
114 	if (exbuf->num_bo_handles) {
115 
116 		bo_handles = drm_malloc_ab(exbuf->num_bo_handles,
117 					   sizeof(uint32_t));
118 		buflist = drm_calloc_large(exbuf->num_bo_handles,
119 					   sizeof(struct ttm_validate_buffer));
120 		if (!bo_handles || !buflist) {
121 			drm_free_large(bo_handles);
122 			drm_free_large(buflist);
123 			return -ENOMEM;
124 		}
125 
126 		user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
127 		if (copy_from_user(bo_handles, user_bo_handles,
128 				   exbuf->num_bo_handles * sizeof(uint32_t))) {
129 			ret = -EFAULT;
130 			drm_free_large(bo_handles);
131 			drm_free_large(buflist);
132 			return ret;
133 		}
134 
135 		for (i = 0; i < exbuf->num_bo_handles; i++) {
136 			gobj = drm_gem_object_lookup(dev,
137 						     drm_file, bo_handles[i]);
138 			if (!gobj) {
139 				drm_free_large(bo_handles);
140 				drm_free_large(buflist);
141 				return -ENOENT;
142 			}
143 
144 			qobj = gem_to_virtio_gpu_obj(gobj);
145 			buflist[i].bo = &qobj->tbo;
146 
147 			list_add(&buflist[i].head, &validate_list);
148 		}
149 		drm_free_large(bo_handles);
150 	}
151 
152 	ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
153 	if (ret)
154 		goto out_free;
155 
156 	buf = kmalloc(exbuf->size, GFP_KERNEL);
157 	if (!buf) {
158 		ret = -ENOMEM;
159 		goto out_unresv;
160 	}
161 	if (copy_from_user(buf, (void __user *)(uintptr_t)exbuf->command,
162 			   exbuf->size)) {
163 		kfree(buf);
164 		ret = -EFAULT;
165 		goto out_unresv;
166 	}
167 	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
168 			      vfpriv->ctx_id, &fence);
169 
170 	ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
171 
172 	/* fence the command bo */
173 	virtio_gpu_unref_list(&validate_list);
174 	drm_free_large(buflist);
175 	fence_put(&fence->f);
176 	return 0;
177 
178 out_unresv:
179 	ttm_eu_backoff_reservation(&ticket, &validate_list);
180 out_free:
181 	virtio_gpu_unref_list(&validate_list);
182 	drm_free_large(buflist);
183 	return ret;
184 }
185 
186 /*
187  * Usage of execbuffer:
188  * Relocations need to take into account the full VIRTIO_GPUDrawable size.
189  * However, the command as passed from user space must *not* contain the initial
190  * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
191  */
192 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
193 				       struct drm_file *file_priv)
194 {
195 	struct drm_virtgpu_execbuffer *execbuffer = data;
196 	return virtio_gpu_execbuffer(dev, execbuffer, file_priv);
197 }
198 
199 
200 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
201 				     struct drm_file *file_priv)
202 {
203 	struct virtio_gpu_device *vgdev = dev->dev_private;
204 	struct drm_virtgpu_getparam *param = data;
205 	int value;
206 
207 	switch (param->param) {
208 	case VIRTGPU_PARAM_3D_FEATURES:
209 		value = vgdev->has_virgl_3d == true ? 1 : 0;
210 		break;
211 	default:
212 		return -EINVAL;
213 	}
214 	if (copy_to_user((void __user *)(unsigned long)param->value,
215 			 &value, sizeof(int))) {
216 		return -EFAULT;
217 	}
218 	return 0;
219 }
220 
221 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
222 					    struct drm_file *file_priv)
223 {
224 	struct virtio_gpu_device *vgdev = dev->dev_private;
225 	struct drm_virtgpu_resource_create *rc = data;
226 	int ret;
227 	uint32_t res_id;
228 	struct virtio_gpu_object *qobj;
229 	struct drm_gem_object *obj;
230 	uint32_t handle = 0;
231 	uint32_t size;
232 	struct list_head validate_list;
233 	struct ttm_validate_buffer mainbuf;
234 	struct virtio_gpu_fence *fence = NULL;
235 	struct ww_acquire_ctx ticket;
236 	struct virtio_gpu_resource_create_3d rc_3d;
237 
238 	if (vgdev->has_virgl_3d == false) {
239 		if (rc->depth > 1)
240 			return -EINVAL;
241 		if (rc->nr_samples > 1)
242 			return -EINVAL;
243 		if (rc->last_level > 1)
244 			return -EINVAL;
245 		if (rc->target != 2)
246 			return -EINVAL;
247 		if (rc->array_size > 1)
248 			return -EINVAL;
249 	}
250 
251 	INIT_LIST_HEAD(&validate_list);
252 	memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
253 
254 	virtio_gpu_resource_id_get(vgdev, &res_id);
255 
256 	size = rc->size;
257 
258 	/* allocate a single page size object */
259 	if (size == 0)
260 		size = PAGE_SIZE;
261 
262 	qobj = virtio_gpu_alloc_object(dev, size, false, false);
263 	if (IS_ERR(qobj)) {
264 		ret = PTR_ERR(qobj);
265 		goto fail_id;
266 	}
267 	obj = &qobj->gem_base;
268 
269 	if (!vgdev->has_virgl_3d) {
270 		virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
271 					       rc->width, rc->height);
272 
273 		ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
274 	} else {
275 		/* use a gem reference since unref list undoes them */
276 		drm_gem_object_reference(&qobj->gem_base);
277 		mainbuf.bo = &qobj->tbo;
278 		list_add(&mainbuf.head, &validate_list);
279 
280 		ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
281 		if (ret) {
282 			DRM_DEBUG("failed to validate\n");
283 			goto fail_unref;
284 		}
285 
286 		rc_3d.resource_id = cpu_to_le32(res_id);
287 		rc_3d.target = cpu_to_le32(rc->target);
288 		rc_3d.format = cpu_to_le32(rc->format);
289 		rc_3d.bind = cpu_to_le32(rc->bind);
290 		rc_3d.width = cpu_to_le32(rc->width);
291 		rc_3d.height = cpu_to_le32(rc->height);
292 		rc_3d.depth = cpu_to_le32(rc->depth);
293 		rc_3d.array_size = cpu_to_le32(rc->array_size);
294 		rc_3d.last_level = cpu_to_le32(rc->last_level);
295 		rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
296 		rc_3d.flags = cpu_to_le32(rc->flags);
297 
298 		virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
299 		ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
300 		if (ret) {
301 			ttm_eu_backoff_reservation(&ticket, &validate_list);
302 			goto fail_unref;
303 		}
304 		ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
305 	}
306 
307 	qobj->hw_res_handle = res_id;
308 
309 	ret = drm_gem_handle_create(file_priv, obj, &handle);
310 	if (ret) {
311 
312 		drm_gem_object_release(obj);
313 		if (vgdev->has_virgl_3d) {
314 			virtio_gpu_unref_list(&validate_list);
315 			fence_put(&fence->f);
316 		}
317 		return ret;
318 	}
319 	drm_gem_object_unreference_unlocked(obj);
320 
321 	rc->res_handle = res_id; /* similiar to a VM address */
322 	rc->bo_handle = handle;
323 
324 	if (vgdev->has_virgl_3d) {
325 		virtio_gpu_unref_list(&validate_list);
326 		fence_put(&fence->f);
327 	}
328 	return 0;
329 fail_unref:
330 	if (vgdev->has_virgl_3d) {
331 		virtio_gpu_unref_list(&validate_list);
332 		fence_put(&fence->f);
333 	}
334 //fail_obj:
335 //	drm_gem_object_handle_unreference_unlocked(obj);
336 fail_id:
337 	virtio_gpu_resource_id_put(vgdev, res_id);
338 	return ret;
339 }
340 
341 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
342 					  struct drm_file *file_priv)
343 {
344 	struct drm_virtgpu_resource_info *ri = data;
345 	struct drm_gem_object *gobj = NULL;
346 	struct virtio_gpu_object *qobj = NULL;
347 
348 	gobj = drm_gem_object_lookup(dev, file_priv, ri->bo_handle);
349 	if (gobj == NULL)
350 		return -ENOENT;
351 
352 	qobj = gem_to_virtio_gpu_obj(gobj);
353 
354 	ri->size = qobj->gem_base.size;
355 	ri->res_handle = qobj->hw_res_handle;
356 	drm_gem_object_unreference_unlocked(gobj);
357 	return 0;
358 }
359 
360 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
361 					       void *data,
362 					       struct drm_file *file)
363 {
364 	struct virtio_gpu_device *vgdev = dev->dev_private;
365 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
366 	struct drm_virtgpu_3d_transfer_from_host *args = data;
367 	struct drm_gem_object *gobj = NULL;
368 	struct virtio_gpu_object *qobj = NULL;
369 	struct virtio_gpu_fence *fence;
370 	int ret;
371 	u32 offset = args->offset;
372 	struct virtio_gpu_box box;
373 
374 	if (vgdev->has_virgl_3d == false)
375 		return -ENOSYS;
376 
377 	gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
378 	if (gobj == NULL)
379 		return -ENOENT;
380 
381 	qobj = gem_to_virtio_gpu_obj(gobj);
382 
383 	ret = virtio_gpu_object_reserve(qobj, false);
384 	if (ret)
385 		goto out;
386 
387 	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
388 			      true, false);
389 	if (unlikely(ret))
390 		goto out_unres;
391 
392 	convert_to_hw_box(&box, &args->box);
393 	virtio_gpu_cmd_transfer_from_host_3d
394 		(vgdev, qobj->hw_res_handle,
395 		 vfpriv->ctx_id, offset, args->level,
396 		 &box, &fence);
397 	reservation_object_add_excl_fence(qobj->tbo.resv,
398 					  &fence->f);
399 
400 	fence_put(&fence->f);
401 out_unres:
402 	virtio_gpu_object_unreserve(qobj);
403 out:
404 	drm_gem_object_unreference_unlocked(gobj);
405 	return ret;
406 }
407 
408 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
409 					     struct drm_file *file)
410 {
411 	struct virtio_gpu_device *vgdev = dev->dev_private;
412 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
413 	struct drm_virtgpu_3d_transfer_to_host *args = data;
414 	struct drm_gem_object *gobj = NULL;
415 	struct virtio_gpu_object *qobj = NULL;
416 	struct virtio_gpu_fence *fence;
417 	struct virtio_gpu_box box;
418 	int ret;
419 	u32 offset = args->offset;
420 
421 	gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
422 	if (gobj == NULL)
423 		return -ENOENT;
424 
425 	qobj = gem_to_virtio_gpu_obj(gobj);
426 
427 	ret = virtio_gpu_object_reserve(qobj, false);
428 	if (ret)
429 		goto out;
430 
431 	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
432 			      true, false);
433 	if (unlikely(ret))
434 		goto out_unres;
435 
436 	convert_to_hw_box(&box, &args->box);
437 	if (!vgdev->has_virgl_3d) {
438 		virtio_gpu_cmd_transfer_to_host_2d
439 			(vgdev, qobj->hw_res_handle, offset,
440 			 box.w, box.h, box.x, box.y, NULL);
441 	} else {
442 		virtio_gpu_cmd_transfer_to_host_3d
443 			(vgdev, qobj->hw_res_handle,
444 			 vfpriv ? vfpriv->ctx_id : 0, offset,
445 			 args->level, &box, &fence);
446 		reservation_object_add_excl_fence(qobj->tbo.resv,
447 						  &fence->f);
448 		fence_put(&fence->f);
449 	}
450 
451 out_unres:
452 	virtio_gpu_object_unreserve(qobj);
453 out:
454 	drm_gem_object_unreference_unlocked(gobj);
455 	return ret;
456 }
457 
458 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
459 			    struct drm_file *file)
460 {
461 	struct drm_virtgpu_3d_wait *args = data;
462 	struct drm_gem_object *gobj = NULL;
463 	struct virtio_gpu_object *qobj = NULL;
464 	int ret;
465 	bool nowait = false;
466 
467 	gobj = drm_gem_object_lookup(dev, file, args->handle);
468 	if (gobj == NULL)
469 		return -ENOENT;
470 
471 	qobj = gem_to_virtio_gpu_obj(gobj);
472 
473 	if (args->flags & VIRTGPU_WAIT_NOWAIT)
474 		nowait = true;
475 	ret = virtio_gpu_object_wait(qobj, nowait);
476 
477 	drm_gem_object_unreference_unlocked(gobj);
478 	return ret;
479 }
480 
481 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
482 				void *data, struct drm_file *file)
483 {
484 	struct virtio_gpu_device *vgdev = dev->dev_private;
485 	struct drm_virtgpu_get_caps *args = data;
486 	int size;
487 	int i;
488 	int found_valid = -1;
489 	int ret;
490 	struct virtio_gpu_drv_cap_cache *cache_ent;
491 	void *ptr;
492 	if (vgdev->num_capsets == 0)
493 		return -ENOSYS;
494 
495 	spin_lock(&vgdev->display_info_lock);
496 	for (i = 0; i < vgdev->num_capsets; i++) {
497 		if (vgdev->capsets[i].id == args->cap_set_id) {
498 			if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
499 				found_valid = i;
500 				break;
501 			}
502 		}
503 	}
504 
505 	if (found_valid == -1) {
506 		spin_unlock(&vgdev->display_info_lock);
507 		return -EINVAL;
508 	}
509 
510 	size = vgdev->capsets[found_valid].max_size;
511 	if (args->size > size) {
512 		spin_unlock(&vgdev->display_info_lock);
513 		return -EINVAL;
514 	}
515 
516 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
517 		if (cache_ent->id == args->cap_set_id &&
518 		    cache_ent->version == args->cap_set_ver) {
519 			ptr = cache_ent->caps_cache;
520 			spin_unlock(&vgdev->display_info_lock);
521 			goto copy_exit;
522 		}
523 	}
524 	spin_unlock(&vgdev->display_info_lock);
525 
526 	/* not in cache - need to talk to hw */
527 	virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
528 				  &cache_ent);
529 
530 	ret = wait_event_timeout(vgdev->resp_wq,
531 				 atomic_read(&cache_ent->is_valid), 5 * HZ);
532 
533 	ptr = cache_ent->caps_cache;
534 
535 copy_exit:
536 	if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
537 		return -EFAULT;
538 
539 	return 0;
540 }
541 
542 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
543 	DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
544 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
545 
546 	DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
547 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
548 
549 	DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
550 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
551 
552 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
553 			  virtio_gpu_resource_create_ioctl,
554 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
555 
556 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
557 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
558 
559 	/* make transfer async to the main ring? - no sure, can we
560 	   thread these in the underlying GL */
561 	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
562 			  virtio_gpu_transfer_from_host_ioctl,
563 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
564 	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
565 			  virtio_gpu_transfer_to_host_ioctl,
566 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
567 
568 	DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
569 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
570 
571 	DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
572 			  DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
573 };
574