xref: /openbmc/linux/drivers/gpu/drm/qxl/qxl_object.c (revision 176f011b)
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25 
26 #include "qxl_drv.h"
27 #include "qxl_object.h"
28 
29 #include <linux/io-mapping.h>
30 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
31 {
32 	struct qxl_bo *bo;
33 	struct qxl_device *qdev;
34 
35 	bo = to_qxl_bo(tbo);
36 	qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
37 
38 	qxl_surface_evict(qdev, bo, false);
39 	mutex_lock(&qdev->gem.mutex);
40 	list_del_init(&bo->list);
41 	mutex_unlock(&qdev->gem.mutex);
42 	drm_gem_object_release(&bo->gem_base);
43 	kfree(bo);
44 }
45 
46 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
47 {
48 	if (bo->destroy == &qxl_ttm_bo_destroy)
49 		return true;
50 	return false;
51 }
52 
53 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
54 {
55 	u32 c = 0;
56 	u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
57 	unsigned int i;
58 
59 	qbo->placement.placement = qbo->placements;
60 	qbo->placement.busy_placement = qbo->placements;
61 	if (domain == QXL_GEM_DOMAIN_VRAM)
62 		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
63 	if (domain == QXL_GEM_DOMAIN_SURFACE)
64 		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag;
65 	if (domain == QXL_GEM_DOMAIN_CPU)
66 		qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
67 	if (!c)
68 		qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
69 	qbo->placement.num_placement = c;
70 	qbo->placement.num_busy_placement = c;
71 	for (i = 0; i < c; ++i) {
72 		qbo->placements[i].fpfn = 0;
73 		qbo->placements[i].lpfn = 0;
74 	}
75 }
76 
77 int qxl_bo_create(struct qxl_device *qdev,
78 		  unsigned long size, bool kernel, bool pinned, u32 domain,
79 		  struct qxl_surface *surf,
80 		  struct qxl_bo **bo_ptr)
81 {
82 	struct qxl_bo *bo;
83 	enum ttm_bo_type type;
84 	int r;
85 
86 	if (kernel)
87 		type = ttm_bo_type_kernel;
88 	else
89 		type = ttm_bo_type_device;
90 	*bo_ptr = NULL;
91 	bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
92 	if (bo == NULL)
93 		return -ENOMEM;
94 	size = roundup(size, PAGE_SIZE);
95 	r = drm_gem_object_init(&qdev->ddev, &bo->gem_base, size);
96 	if (unlikely(r)) {
97 		kfree(bo);
98 		return r;
99 	}
100 	bo->type = domain;
101 	bo->pin_count = pinned ? 1 : 0;
102 	bo->surface_id = 0;
103 	INIT_LIST_HEAD(&bo->list);
104 
105 	if (surf)
106 		bo->surf = *surf;
107 
108 	qxl_ttm_placement_from_domain(bo, domain, pinned);
109 
110 	r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
111 			&bo->placement, 0, !kernel, size,
112 			NULL, NULL, &qxl_ttm_bo_destroy);
113 	if (unlikely(r != 0)) {
114 		if (r != -ERESTARTSYS)
115 			dev_err(qdev->ddev.dev,
116 				"object_init failed for (%lu, 0x%08X)\n",
117 				size, domain);
118 		return r;
119 	}
120 	*bo_ptr = bo;
121 	return 0;
122 }
123 
124 int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
125 {
126 	bool is_iomem;
127 	int r;
128 
129 	if (bo->kptr) {
130 		if (ptr)
131 			*ptr = bo->kptr;
132 		return 0;
133 	}
134 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
135 	if (r)
136 		return r;
137 	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
138 	if (ptr)
139 		*ptr = bo->kptr;
140 	return 0;
141 }
142 
143 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
144 			      struct qxl_bo *bo, int page_offset)
145 {
146 	struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
147 	void *rptr;
148 	int ret;
149 	struct io_mapping *map;
150 
151 	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
152 		map = qdev->vram_mapping;
153 	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
154 		map = qdev->surface_mapping;
155 	else
156 		goto fallback;
157 
158 	(void) ttm_mem_io_lock(man, false);
159 	ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
160 	ttm_mem_io_unlock(man);
161 
162 	return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
163 fallback:
164 	if (bo->kptr) {
165 		rptr = bo->kptr + (page_offset * PAGE_SIZE);
166 		return rptr;
167 	}
168 
169 	ret = qxl_bo_kmap(bo, &rptr);
170 	if (ret)
171 		return NULL;
172 
173 	rptr += page_offset * PAGE_SIZE;
174 	return rptr;
175 }
176 
177 void qxl_bo_kunmap(struct qxl_bo *bo)
178 {
179 	if (bo->kptr == NULL)
180 		return;
181 	bo->kptr = NULL;
182 	ttm_bo_kunmap(&bo->kmap);
183 }
184 
185 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
186 			       struct qxl_bo *bo, void *pmap)
187 {
188 	struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
189 
190 	if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
191 	    (bo->tbo.mem.mem_type != TTM_PL_PRIV))
192 		goto fallback;
193 
194 	io_mapping_unmap_atomic(pmap);
195 
196 	(void) ttm_mem_io_lock(man, false);
197 	ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
198 	ttm_mem_io_unlock(man);
199 	return;
200  fallback:
201 	qxl_bo_kunmap(bo);
202 }
203 
204 void qxl_bo_unref(struct qxl_bo **bo)
205 {
206 	if ((*bo) == NULL)
207 		return;
208 
209 	drm_gem_object_put_unlocked(&(*bo)->gem_base);
210 	*bo = NULL;
211 }
212 
213 struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
214 {
215 	drm_gem_object_get(&bo->gem_base);
216 	return bo;
217 }
218 
219 static int __qxl_bo_pin(struct qxl_bo *bo)
220 {
221 	struct ttm_operation_ctx ctx = { false, false };
222 	struct drm_device *ddev = bo->gem_base.dev;
223 	int r;
224 
225 	if (bo->pin_count) {
226 		bo->pin_count++;
227 		return 0;
228 	}
229 	qxl_ttm_placement_from_domain(bo, bo->type, true);
230 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
231 	if (likely(r == 0)) {
232 		bo->pin_count = 1;
233 	}
234 	if (unlikely(r != 0))
235 		dev_err(ddev->dev, "%p pin failed\n", bo);
236 	return r;
237 }
238 
239 static int __qxl_bo_unpin(struct qxl_bo *bo)
240 {
241 	struct ttm_operation_ctx ctx = { false, false };
242 	struct drm_device *ddev = bo->gem_base.dev;
243 	int r, i;
244 
245 	if (!bo->pin_count) {
246 		dev_warn(ddev->dev, "%p unpin not necessary\n", bo);
247 		return 0;
248 	}
249 	bo->pin_count--;
250 	if (bo->pin_count)
251 		return 0;
252 	for (i = 0; i < bo->placement.num_placement; i++)
253 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
254 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
255 	if (unlikely(r != 0))
256 		dev_err(ddev->dev, "%p validate failed for unpin\n", bo);
257 	return r;
258 }
259 
260 /*
261  * Reserve the BO before pinning the object.  If the BO was reserved
262  * beforehand, use the internal version directly __qxl_bo_pin.
263  *
264  */
265 int qxl_bo_pin(struct qxl_bo *bo)
266 {
267 	int r;
268 
269 	r = qxl_bo_reserve(bo, false);
270 	if (r)
271 		return r;
272 
273 	r = __qxl_bo_pin(bo);
274 	qxl_bo_unreserve(bo);
275 	return r;
276 }
277 
278 /*
279  * Reserve the BO before pinning the object.  If the BO was reserved
280  * beforehand, use the internal version directly __qxl_bo_unpin.
281  *
282  */
283 int qxl_bo_unpin(struct qxl_bo *bo)
284 {
285 	int r;
286 
287 	r = qxl_bo_reserve(bo, false);
288 	if (r)
289 		return r;
290 
291 	r = __qxl_bo_unpin(bo);
292 	qxl_bo_unreserve(bo);
293 	return r;
294 }
295 
296 void qxl_bo_force_delete(struct qxl_device *qdev)
297 {
298 	struct qxl_bo *bo, *n;
299 
300 	if (list_empty(&qdev->gem.objects))
301 		return;
302 	dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
303 	list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
304 		dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
305 			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
306 			*((unsigned long *)&bo->gem_base.refcount));
307 		mutex_lock(&qdev->gem.mutex);
308 		list_del_init(&bo->list);
309 		mutex_unlock(&qdev->gem.mutex);
310 		/* this should unref the ttm bo */
311 		drm_gem_object_put_unlocked(&bo->gem_base);
312 	}
313 }
314 
315 int qxl_bo_init(struct qxl_device *qdev)
316 {
317 	return qxl_ttm_init(qdev);
318 }
319 
320 void qxl_bo_fini(struct qxl_device *qdev)
321 {
322 	qxl_ttm_fini(qdev);
323 }
324 
325 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
326 {
327 	int ret;
328 
329 	if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
330 		/* allocate a surface id for this surface now */
331 		ret = qxl_surface_id_alloc(qdev, bo);
332 		if (ret)
333 			return ret;
334 
335 		ret = qxl_hw_surface_alloc(qdev, bo, NULL);
336 		if (ret)
337 			return ret;
338 	}
339 	return 0;
340 }
341 
342 int qxl_surf_evict(struct qxl_device *qdev)
343 {
344 	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
345 }
346 
347 int qxl_vram_evict(struct qxl_device *qdev)
348 {
349 	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
350 }
351