xref: /openbmc/linux/drivers/gpu/drm/qxl/qxl_object.c (revision c8ed9fc9)
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25 
26 #include "qxl_drv.h"
27 #include "qxl_object.h"
28 
29 #include <linux/io-mapping.h>
30 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
31 {
32 	struct qxl_bo *bo;
33 	struct qxl_device *qdev;
34 
35 	bo = to_qxl_bo(tbo);
36 	qdev = to_qxl(bo->tbo.base.dev);
37 
38 	qxl_surface_evict(qdev, bo, false);
39 	WARN_ON_ONCE(bo->map_count > 0);
40 	mutex_lock(&qdev->gem.mutex);
41 	list_del_init(&bo->list);
42 	mutex_unlock(&qdev->gem.mutex);
43 	drm_gem_object_release(&bo->tbo.base);
44 	kfree(bo);
45 }
46 
47 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
48 {
49 	if (bo->destroy == &qxl_ttm_bo_destroy)
50 		return true;
51 	return false;
52 }
53 
54 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
55 {
56 	u32 c = 0;
57 	u32 pflag = 0;
58 	unsigned int i;
59 
60 	if (pinned)
61 		pflag |= TTM_PL_FLAG_NO_EVICT;
62 	if (qbo->tbo.base.size <= PAGE_SIZE)
63 		pflag |= TTM_PL_FLAG_TOPDOWN;
64 
65 	qbo->placement.placement = qbo->placements;
66 	qbo->placement.busy_placement = qbo->placements;
67 	if (domain == QXL_GEM_DOMAIN_VRAM)
68 		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
69 	if (domain == QXL_GEM_DOMAIN_SURFACE) {
70 		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag;
71 		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
72 	}
73 	if (domain == QXL_GEM_DOMAIN_CPU)
74 		qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
75 	if (!c)
76 		qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
77 	qbo->placement.num_placement = c;
78 	qbo->placement.num_busy_placement = c;
79 	for (i = 0; i < c; ++i) {
80 		qbo->placements[i].fpfn = 0;
81 		qbo->placements[i].lpfn = 0;
82 	}
83 }
84 
85 static const struct drm_gem_object_funcs qxl_object_funcs = {
86 	.free = qxl_gem_object_free,
87 	.open = qxl_gem_object_open,
88 	.close = qxl_gem_object_close,
89 	.pin = qxl_gem_prime_pin,
90 	.unpin = qxl_gem_prime_unpin,
91 	.get_sg_table = qxl_gem_prime_get_sg_table,
92 	.vmap = qxl_gem_prime_vmap,
93 	.vunmap = qxl_gem_prime_vunmap,
94 	.mmap = drm_gem_ttm_mmap,
95 	.print_info = drm_gem_ttm_print_info,
96 };
97 
98 int qxl_bo_create(struct qxl_device *qdev,
99 		  unsigned long size, bool kernel, bool pinned, u32 domain,
100 		  struct qxl_surface *surf,
101 		  struct qxl_bo **bo_ptr)
102 {
103 	struct qxl_bo *bo;
104 	enum ttm_bo_type type;
105 	int r;
106 
107 	if (kernel)
108 		type = ttm_bo_type_kernel;
109 	else
110 		type = ttm_bo_type_device;
111 	*bo_ptr = NULL;
112 	bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
113 	if (bo == NULL)
114 		return -ENOMEM;
115 	size = roundup(size, PAGE_SIZE);
116 	r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
117 	if (unlikely(r)) {
118 		kfree(bo);
119 		return r;
120 	}
121 	bo->tbo.base.funcs = &qxl_object_funcs;
122 	bo->type = domain;
123 	bo->pin_count = pinned ? 1 : 0;
124 	bo->surface_id = 0;
125 	INIT_LIST_HEAD(&bo->list);
126 
127 	if (surf)
128 		bo->surf = *surf;
129 
130 	qxl_ttm_placement_from_domain(bo, domain, pinned);
131 
132 	r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
133 			&bo->placement, 0, !kernel, size,
134 			NULL, NULL, &qxl_ttm_bo_destroy);
135 	if (unlikely(r != 0)) {
136 		if (r != -ERESTARTSYS)
137 			dev_err(qdev->ddev.dev,
138 				"object_init failed for (%lu, 0x%08X)\n",
139 				size, domain);
140 		return r;
141 	}
142 	*bo_ptr = bo;
143 	return 0;
144 }
145 
146 int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
147 {
148 	bool is_iomem;
149 	int r;
150 
151 	if (bo->kptr) {
152 		if (ptr)
153 			*ptr = bo->kptr;
154 		bo->map_count++;
155 		return 0;
156 	}
157 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
158 	if (r)
159 		return r;
160 	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
161 	if (ptr)
162 		*ptr = bo->kptr;
163 	bo->map_count = 1;
164 	return 0;
165 }
166 
167 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
168 			      struct qxl_bo *bo, int page_offset)
169 {
170 	void *rptr;
171 	int ret;
172 	struct io_mapping *map;
173 
174 	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
175 		map = qdev->vram_mapping;
176 	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
177 		map = qdev->surface_mapping;
178 	else
179 		goto fallback;
180 
181 	ret = qxl_ttm_io_mem_reserve(bo->tbo.bdev, &bo->tbo.mem);
182 
183 	return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
184 fallback:
185 	if (bo->kptr) {
186 		rptr = bo->kptr + (page_offset * PAGE_SIZE);
187 		return rptr;
188 	}
189 
190 	ret = qxl_bo_kmap(bo, &rptr);
191 	if (ret)
192 		return NULL;
193 
194 	rptr += page_offset * PAGE_SIZE;
195 	return rptr;
196 }
197 
198 void qxl_bo_kunmap(struct qxl_bo *bo)
199 {
200 	if (bo->kptr == NULL)
201 		return;
202 	bo->map_count--;
203 	if (bo->map_count > 0)
204 		return;
205 	bo->kptr = NULL;
206 	ttm_bo_kunmap(&bo->kmap);
207 }
208 
209 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
210 			       struct qxl_bo *bo, void *pmap)
211 {
212 	if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
213 	    (bo->tbo.mem.mem_type != TTM_PL_PRIV))
214 		goto fallback;
215 
216 	io_mapping_unmap_atomic(pmap);
217 	return;
218  fallback:
219 	qxl_bo_kunmap(bo);
220 }
221 
222 void qxl_bo_unref(struct qxl_bo **bo)
223 {
224 	if ((*bo) == NULL)
225 		return;
226 
227 	drm_gem_object_put_unlocked(&(*bo)->tbo.base);
228 	*bo = NULL;
229 }
230 
231 struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
232 {
233 	drm_gem_object_get(&bo->tbo.base);
234 	return bo;
235 }
236 
237 static int __qxl_bo_pin(struct qxl_bo *bo)
238 {
239 	struct ttm_operation_ctx ctx = { false, false };
240 	struct drm_device *ddev = bo->tbo.base.dev;
241 	int r;
242 
243 	if (bo->pin_count) {
244 		bo->pin_count++;
245 		return 0;
246 	}
247 	qxl_ttm_placement_from_domain(bo, bo->type, true);
248 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
249 	if (likely(r == 0)) {
250 		bo->pin_count = 1;
251 	}
252 	if (unlikely(r != 0))
253 		dev_err(ddev->dev, "%p pin failed\n", bo);
254 	return r;
255 }
256 
257 static int __qxl_bo_unpin(struct qxl_bo *bo)
258 {
259 	struct ttm_operation_ctx ctx = { false, false };
260 	struct drm_device *ddev = bo->tbo.base.dev;
261 	int r, i;
262 
263 	if (!bo->pin_count) {
264 		dev_warn(ddev->dev, "%p unpin not necessary\n", bo);
265 		return 0;
266 	}
267 	bo->pin_count--;
268 	if (bo->pin_count)
269 		return 0;
270 	for (i = 0; i < bo->placement.num_placement; i++)
271 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
272 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
273 	if (unlikely(r != 0))
274 		dev_err(ddev->dev, "%p validate failed for unpin\n", bo);
275 	return r;
276 }
277 
278 /*
279  * Reserve the BO before pinning the object.  If the BO was reserved
280  * beforehand, use the internal version directly __qxl_bo_pin.
281  *
282  */
283 int qxl_bo_pin(struct qxl_bo *bo)
284 {
285 	int r;
286 
287 	r = qxl_bo_reserve(bo, false);
288 	if (r)
289 		return r;
290 
291 	r = __qxl_bo_pin(bo);
292 	qxl_bo_unreserve(bo);
293 	return r;
294 }
295 
296 /*
297  * Reserve the BO before pinning the object.  If the BO was reserved
298  * beforehand, use the internal version directly __qxl_bo_unpin.
299  *
300  */
301 int qxl_bo_unpin(struct qxl_bo *bo)
302 {
303 	int r;
304 
305 	r = qxl_bo_reserve(bo, false);
306 	if (r)
307 		return r;
308 
309 	r = __qxl_bo_unpin(bo);
310 	qxl_bo_unreserve(bo);
311 	return r;
312 }
313 
314 void qxl_bo_force_delete(struct qxl_device *qdev)
315 {
316 	struct qxl_bo *bo, *n;
317 
318 	if (list_empty(&qdev->gem.objects))
319 		return;
320 	dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
321 	list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
322 		dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
323 			&bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
324 			*((unsigned long *)&bo->tbo.base.refcount));
325 		mutex_lock(&qdev->gem.mutex);
326 		list_del_init(&bo->list);
327 		mutex_unlock(&qdev->gem.mutex);
328 		/* this should unref the ttm bo */
329 		drm_gem_object_put_unlocked(&bo->tbo.base);
330 	}
331 }
332 
333 int qxl_bo_init(struct qxl_device *qdev)
334 {
335 	return qxl_ttm_init(qdev);
336 }
337 
338 void qxl_bo_fini(struct qxl_device *qdev)
339 {
340 	qxl_ttm_fini(qdev);
341 }
342 
343 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
344 {
345 	int ret;
346 
347 	if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
348 		/* allocate a surface id for this surface now */
349 		ret = qxl_surface_id_alloc(qdev, bo);
350 		if (ret)
351 			return ret;
352 
353 		ret = qxl_hw_surface_alloc(qdev, bo);
354 		if (ret)
355 			return ret;
356 	}
357 	return 0;
358 }
359 
360 int qxl_surf_evict(struct qxl_device *qdev)
361 {
362 	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
363 }
364 
365 int qxl_vram_evict(struct qxl_device *qdev)
366 {
367 	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
368 }
369