1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include <drm/ttm/ttm_execbuf_util.h>
27 
28 #include "virtgpu_drv.h"
29 
30 static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
31 				       uint32_t *resid)
32 {
33 #if 0
34 	int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
35 
36 	if (handle < 0)
37 		return handle;
38 #else
39 	static int handle;
40 
41 	/*
42 	 * FIXME: dirty hack to avoid re-using IDs, virglrenderer
43 	 * can't deal with that.  Needs fixing in virglrenderer, also
44 	 * should figure a better way to handle that in the guest.
45 	 */
46 	handle++;
47 #endif
48 
49 	*resid = handle + 1;
50 	return 0;
51 }
52 
53 static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
54 {
55 #if 0
56 	ida_free(&vgdev->resource_ida, id - 1);
57 #endif
58 }
59 
60 static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
61 {
62 	struct virtio_gpu_object *bo;
63 	struct virtio_gpu_device *vgdev;
64 
65 	bo = container_of(tbo, struct virtio_gpu_object, tbo);
66 	vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
67 
68 	if (bo->created)
69 		virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
70 	if (bo->pages)
71 		virtio_gpu_object_free_sg_table(bo);
72 	if (bo->vmap)
73 		virtio_gpu_object_kunmap(bo);
74 	drm_gem_object_release(&bo->gem_base);
75 	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
76 	kfree(bo);
77 }
78 
79 static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo)
80 {
81 	u32 c = 1;
82 
83 	vgbo->placement.placement = &vgbo->placement_code;
84 	vgbo->placement.busy_placement = &vgbo->placement_code;
85 	vgbo->placement_code.fpfn = 0;
86 	vgbo->placement_code.lpfn = 0;
87 	vgbo->placement_code.flags =
88 		TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT |
89 		TTM_PL_FLAG_NO_EVICT;
90 	vgbo->placement.num_placement = c;
91 	vgbo->placement.num_busy_placement = c;
92 
93 }
94 
95 int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
96 			     struct virtio_gpu_object_params *params,
97 			     struct virtio_gpu_object **bo_ptr,
98 			     struct virtio_gpu_fence *fence)
99 {
100 	struct virtio_gpu_object *bo;
101 	size_t acc_size;
102 	int ret;
103 
104 	*bo_ptr = NULL;
105 
106 	acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, params->size,
107 				       sizeof(struct virtio_gpu_object));
108 
109 	bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
110 	if (bo == NULL)
111 		return -ENOMEM;
112 	ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
113 	if (ret < 0) {
114 		kfree(bo);
115 		return ret;
116 	}
117 	params->size = roundup(params->size, PAGE_SIZE);
118 	ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, params->size);
119 	if (ret != 0) {
120 		virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
121 		kfree(bo);
122 		return ret;
123 	}
124 	bo->dumb = params->dumb;
125 
126 	if (params->virgl) {
127 		virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, fence);
128 	} else {
129 		virtio_gpu_cmd_create_resource(vgdev, bo, params, fence);
130 	}
131 
132 	virtio_gpu_init_ttm_placement(bo);
133 	ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size,
134 			  ttm_bo_type_device, &bo->placement, 0,
135 			  true, acc_size, NULL, NULL,
136 			  &virtio_gpu_ttm_bo_destroy);
137 	/* ttm_bo_init failure will call the destroy */
138 	if (ret != 0)
139 		return ret;
140 
141 	if (fence) {
142 		struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
143 		struct list_head validate_list;
144 		struct ttm_validate_buffer mainbuf;
145 		struct ww_acquire_ctx ticket;
146 		unsigned long irq_flags;
147 		bool signaled;
148 
149 		INIT_LIST_HEAD(&validate_list);
150 		memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
151 
152 		/* use a gem reference since unref list undoes them */
153 		drm_gem_object_get(&bo->gem_base);
154 		mainbuf.bo = &bo->tbo;
155 		list_add(&mainbuf.head, &validate_list);
156 
157 		ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
158 		if (ret == 0) {
159 			spin_lock_irqsave(&drv->lock, irq_flags);
160 			signaled = virtio_fence_signaled(&fence->f);
161 			if (!signaled)
162 				/* virtio create command still in flight */
163 				ttm_eu_fence_buffer_objects(&ticket, &validate_list,
164 							    &fence->f);
165 			spin_unlock_irqrestore(&drv->lock, irq_flags);
166 			if (signaled)
167 				/* virtio create command finished */
168 				ttm_eu_backoff_reservation(&ticket, &validate_list);
169 		}
170 		virtio_gpu_unref_list(&validate_list);
171 	}
172 
173 	*bo_ptr = bo;
174 	return 0;
175 }
176 
177 void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo)
178 {
179 	bo->vmap = NULL;
180 	ttm_bo_kunmap(&bo->kmap);
181 }
182 
183 int virtio_gpu_object_kmap(struct virtio_gpu_object *bo)
184 {
185 	bool is_iomem;
186 	int r;
187 
188 	WARN_ON(bo->vmap);
189 
190 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
191 	if (r)
192 		return r;
193 	bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
194 	return 0;
195 }
196 
197 int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
198 				   struct virtio_gpu_object *bo)
199 {
200 	int ret;
201 	struct page **pages = bo->tbo.ttm->pages;
202 	int nr_pages = bo->tbo.num_pages;
203 	struct ttm_operation_ctx ctx = {
204 		.interruptible = false,
205 		.no_wait_gpu = false
206 	};
207 	size_t max_segment;
208 
209 	/* wtf swapping */
210 	if (bo->pages)
211 		return 0;
212 
213 	if (bo->tbo.ttm->state == tt_unpopulated)
214 		bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx);
215 	bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
216 	if (!bo->pages)
217 		goto out;
218 
219 	max_segment = virtio_max_dma_size(qdev->vdev);
220 	max_segment &= PAGE_MASK;
221 	if (max_segment > SCATTERLIST_MAX_SEGMENT)
222 		max_segment = SCATTERLIST_MAX_SEGMENT;
223 	ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
224 					  nr_pages << PAGE_SHIFT,
225 					  max_segment, GFP_KERNEL);
226 	if (ret)
227 		goto out;
228 	return 0;
229 out:
230 	kfree(bo->pages);
231 	bo->pages = NULL;
232 	return -ENOMEM;
233 }
234 
235 void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo)
236 {
237 	sg_free_table(bo->pages);
238 	kfree(bo->pages);
239 	bo->pages = NULL;
240 }
241 
242 int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait)
243 {
244 	int r;
245 
246 	r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
247 	if (unlikely(r != 0))
248 		return r;
249 	r = ttm_bo_wait(&bo->tbo, true, no_wait);
250 	ttm_bo_unreserve(&bo->tbo);
251 	return r;
252 }
253 
254