xref: /openbmc/linux/drivers/gpu/drm/radeon/radeon_object.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <drm/drmP.h>
35 #include "radeon_drm.h"
36 #include "radeon.h"
37 
38 
39 int radeon_ttm_init(struct radeon_device *rdev);
40 void radeon_ttm_fini(struct radeon_device *rdev);
41 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
42 
43 /*
44  * To exclude mutual BO access we rely on bo_reserve exclusion, as all
45  * function are calling it.
46  */
47 
48 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
49 {
50 	struct radeon_bo *bo;
51 
52 	bo = container_of(tbo, struct radeon_bo, tbo);
53 	mutex_lock(&bo->rdev->gem.mutex);
54 	list_del_init(&bo->list);
55 	mutex_unlock(&bo->rdev->gem.mutex);
56 	radeon_bo_clear_surface_reg(bo);
57 	kfree(bo);
58 }
59 
60 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
61 {
62 	if (bo->destroy == &radeon_ttm_bo_destroy)
63 		return true;
64 	return false;
65 }
66 
67 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
68 {
69 	u32 c = 0;
70 
71 	rbo->placement.fpfn = 0;
72 	rbo->placement.lpfn = 0;
73 	rbo->placement.placement = rbo->placements;
74 	rbo->placement.busy_placement = rbo->placements;
75 	if (domain & RADEON_GEM_DOMAIN_VRAM)
76 		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
77 					TTM_PL_FLAG_VRAM;
78 	if (domain & RADEON_GEM_DOMAIN_GTT)
79 		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
80 	if (domain & RADEON_GEM_DOMAIN_CPU)
81 		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
82 	if (!c)
83 		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
84 	rbo->placement.num_placement = c;
85 	rbo->placement.num_busy_placement = c;
86 }
87 
88 int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
89 		     unsigned long size, int byte_align, bool kernel, u32 domain,
90 		     struct radeon_bo **bo_ptr)
91 {
92 	struct radeon_bo *bo;
93 	enum ttm_bo_type type;
94 	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
95 	unsigned long max_size = 0;
96 	int r;
97 
98 	if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
99 		rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
100 	}
101 	if (kernel) {
102 		type = ttm_bo_type_kernel;
103 	} else {
104 		type = ttm_bo_type_device;
105 	}
106 	*bo_ptr = NULL;
107 
108 	/* maximun bo size is the minimun btw visible vram and gtt size */
109 	max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
110 	if ((page_align << PAGE_SHIFT) >= max_size) {
111 		printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
112 			__func__, __LINE__, page_align  >> (20 - PAGE_SHIFT), max_size >> 20);
113 		return -ENOMEM;
114 	}
115 
116 retry:
117 	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
118 	if (bo == NULL)
119 		return -ENOMEM;
120 	bo->rdev = rdev;
121 	bo->gobj = gobj;
122 	bo->surface_reg = -1;
123 	INIT_LIST_HEAD(&bo->list);
124 	radeon_ttm_placement_from_domain(bo, domain);
125 	/* Kernel allocation are uninterruptible */
126 	mutex_lock(&rdev->vram_mutex);
127 	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
128 			&bo->placement, page_align, 0, !kernel, NULL, size,
129 			&radeon_ttm_bo_destroy);
130 	mutex_unlock(&rdev->vram_mutex);
131 	if (unlikely(r != 0)) {
132 		if (r != -ERESTARTSYS) {
133 			if (domain == RADEON_GEM_DOMAIN_VRAM) {
134 				domain |= RADEON_GEM_DOMAIN_GTT;
135 				goto retry;
136 			}
137 			dev_err(rdev->dev,
138 				"object_init failed for (%lu, 0x%08X)\n",
139 				size, domain);
140 		}
141 		return r;
142 	}
143 	*bo_ptr = bo;
144 	if (gobj) {
145 		mutex_lock(&bo->rdev->gem.mutex);
146 		list_add_tail(&bo->list, &rdev->gem.objects);
147 		mutex_unlock(&bo->rdev->gem.mutex);
148 	}
149 	return 0;
150 }
151 
152 int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
153 {
154 	bool is_iomem;
155 	int r;
156 
157 	if (bo->kptr) {
158 		if (ptr) {
159 			*ptr = bo->kptr;
160 		}
161 		return 0;
162 	}
163 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
164 	if (r) {
165 		return r;
166 	}
167 	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
168 	if (ptr) {
169 		*ptr = bo->kptr;
170 	}
171 	radeon_bo_check_tiling(bo, 0, 0);
172 	return 0;
173 }
174 
175 void radeon_bo_kunmap(struct radeon_bo *bo)
176 {
177 	if (bo->kptr == NULL)
178 		return;
179 	bo->kptr = NULL;
180 	radeon_bo_check_tiling(bo, 0, 0);
181 	ttm_bo_kunmap(&bo->kmap);
182 }
183 
184 void radeon_bo_unref(struct radeon_bo **bo)
185 {
186 	struct ttm_buffer_object *tbo;
187 	struct radeon_device *rdev;
188 
189 	if ((*bo) == NULL)
190 		return;
191 	rdev = (*bo)->rdev;
192 	tbo = &((*bo)->tbo);
193 	mutex_lock(&rdev->vram_mutex);
194 	ttm_bo_unref(&tbo);
195 	mutex_unlock(&rdev->vram_mutex);
196 	if (tbo == NULL)
197 		*bo = NULL;
198 }
199 
200 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
201 {
202 	int r, i;
203 
204 	if (bo->pin_count) {
205 		bo->pin_count++;
206 		if (gpu_addr)
207 			*gpu_addr = radeon_bo_gpu_offset(bo);
208 		return 0;
209 	}
210 	radeon_ttm_placement_from_domain(bo, domain);
211 	if (domain == RADEON_GEM_DOMAIN_VRAM) {
212 		/* force to pin into visible video ram */
213 		bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
214 	}
215 	for (i = 0; i < bo->placement.num_placement; i++)
216 		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
217 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
218 	if (likely(r == 0)) {
219 		bo->pin_count = 1;
220 		if (gpu_addr != NULL)
221 			*gpu_addr = radeon_bo_gpu_offset(bo);
222 	}
223 	if (unlikely(r != 0))
224 		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
225 	return r;
226 }
227 
228 int radeon_bo_unpin(struct radeon_bo *bo)
229 {
230 	int r, i;
231 
232 	if (!bo->pin_count) {
233 		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
234 		return 0;
235 	}
236 	bo->pin_count--;
237 	if (bo->pin_count)
238 		return 0;
239 	for (i = 0; i < bo->placement.num_placement; i++)
240 		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
241 	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
242 	if (unlikely(r != 0))
243 		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
244 	return r;
245 }
246 
247 int radeon_bo_evict_vram(struct radeon_device *rdev)
248 {
249 	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
250 	if (0 && (rdev->flags & RADEON_IS_IGP)) {
251 		if (rdev->mc.igp_sideport_enabled == false)
252 			/* Useless to evict on IGP chips */
253 			return 0;
254 	}
255 	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
256 }
257 
258 void radeon_bo_force_delete(struct radeon_device *rdev)
259 {
260 	struct radeon_bo *bo, *n;
261 	struct drm_gem_object *gobj;
262 
263 	if (list_empty(&rdev->gem.objects)) {
264 		return;
265 	}
266 	dev_err(rdev->dev, "Userspace still has active objects !\n");
267 	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
268 		mutex_lock(&rdev->ddev->struct_mutex);
269 		gobj = bo->gobj;
270 		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
271 			gobj, bo, (unsigned long)gobj->size,
272 			*((unsigned long *)&gobj->refcount));
273 		mutex_lock(&bo->rdev->gem.mutex);
274 		list_del_init(&bo->list);
275 		mutex_unlock(&bo->rdev->gem.mutex);
276 		radeon_bo_unref(&bo);
277 		gobj->driver_private = NULL;
278 		drm_gem_object_unreference(gobj);
279 		mutex_unlock(&rdev->ddev->struct_mutex);
280 	}
281 }
282 
283 int radeon_bo_init(struct radeon_device *rdev)
284 {
285 	/* Add an MTRR for the VRAM */
286 	rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
287 			MTRR_TYPE_WRCOMB, 1);
288 	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
289 		rdev->mc.mc_vram_size >> 20,
290 		(unsigned long long)rdev->mc.aper_size >> 20);
291 	DRM_INFO("RAM width %dbits %cDR\n",
292 			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
293 	return radeon_ttm_init(rdev);
294 }
295 
296 void radeon_bo_fini(struct radeon_device *rdev)
297 {
298 	radeon_ttm_fini(rdev);
299 }
300 
301 void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
302 				struct list_head *head)
303 {
304 	if (lobj->wdomain) {
305 		list_add(&lobj->list, head);
306 	} else {
307 		list_add_tail(&lobj->list, head);
308 	}
309 }
310 
311 int radeon_bo_list_reserve(struct list_head *head)
312 {
313 	struct radeon_bo_list *lobj;
314 	int r;
315 
316 	list_for_each_entry(lobj, head, list){
317 		r = radeon_bo_reserve(lobj->bo, false);
318 		if (unlikely(r != 0))
319 			return r;
320 		lobj->reserved = true;
321 	}
322 	return 0;
323 }
324 
325 void radeon_bo_list_unreserve(struct list_head *head)
326 {
327 	struct radeon_bo_list *lobj;
328 
329 	list_for_each_entry(lobj, head, list) {
330 		/* only unreserve object we successfully reserved */
331 		if (lobj->reserved && radeon_bo_is_reserved(lobj->bo))
332 			radeon_bo_unreserve(lobj->bo);
333 	}
334 }
335 
336 int radeon_bo_list_validate(struct list_head *head)
337 {
338 	struct radeon_bo_list *lobj;
339 	struct radeon_bo *bo;
340 	u32 domain;
341 	int r;
342 
343 	list_for_each_entry(lobj, head, list) {
344 		lobj->reserved = false;
345 	}
346 	r = radeon_bo_list_reserve(head);
347 	if (unlikely(r != 0)) {
348 		return r;
349 	}
350 	list_for_each_entry(lobj, head, list) {
351 		bo = lobj->bo;
352 		if (!bo->pin_count) {
353 			domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
354 
355 		retry:
356 			radeon_ttm_placement_from_domain(bo, domain);
357 			r = ttm_bo_validate(&bo->tbo, &bo->placement,
358 						true, false, false);
359 			if (unlikely(r)) {
360 				if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
361 					domain |= RADEON_GEM_DOMAIN_GTT;
362 					goto retry;
363 				}
364 				return r;
365 			}
366 		}
367 		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
368 		lobj->tiling_flags = bo->tiling_flags;
369 	}
370 	return 0;
371 }
372 
373 void radeon_bo_list_fence(struct list_head *head, void *fence)
374 {
375 	struct radeon_bo_list *lobj;
376 	struct radeon_bo *bo;
377 	struct radeon_fence *old_fence = NULL;
378 
379 	list_for_each_entry(lobj, head, list) {
380 		bo = lobj->bo;
381 		spin_lock(&bo->tbo.lock);
382 		old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
383 		bo->tbo.sync_obj = radeon_fence_ref(fence);
384 		bo->tbo.sync_obj_arg = NULL;
385 		spin_unlock(&bo->tbo.lock);
386 		if (old_fence) {
387 			radeon_fence_unref(&old_fence);
388 		}
389 	}
390 }
391 
392 int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
393 			     struct vm_area_struct *vma)
394 {
395 	return ttm_fbdev_mmap(vma, &bo->tbo);
396 }
397 
398 int radeon_bo_get_surface_reg(struct radeon_bo *bo)
399 {
400 	struct radeon_device *rdev = bo->rdev;
401 	struct radeon_surface_reg *reg;
402 	struct radeon_bo *old_object;
403 	int steal;
404 	int i;
405 
406 	BUG_ON(!atomic_read(&bo->tbo.reserved));
407 
408 	if (!bo->tiling_flags)
409 		return 0;
410 
411 	if (bo->surface_reg >= 0) {
412 		reg = &rdev->surface_regs[bo->surface_reg];
413 		i = bo->surface_reg;
414 		goto out;
415 	}
416 
417 	steal = -1;
418 	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
419 
420 		reg = &rdev->surface_regs[i];
421 		if (!reg->bo)
422 			break;
423 
424 		old_object = reg->bo;
425 		if (old_object->pin_count == 0)
426 			steal = i;
427 	}
428 
429 	/* if we are all out */
430 	if (i == RADEON_GEM_MAX_SURFACES) {
431 		if (steal == -1)
432 			return -ENOMEM;
433 		/* find someone with a surface reg and nuke their BO */
434 		reg = &rdev->surface_regs[steal];
435 		old_object = reg->bo;
436 		/* blow away the mapping */
437 		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
438 		ttm_bo_unmap_virtual(&old_object->tbo);
439 		old_object->surface_reg = -1;
440 		i = steal;
441 	}
442 
443 	bo->surface_reg = i;
444 	reg->bo = bo;
445 
446 out:
447 	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
448 			       bo->tbo.mem.start << PAGE_SHIFT,
449 			       bo->tbo.num_pages << PAGE_SHIFT);
450 	return 0;
451 }
452 
453 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
454 {
455 	struct radeon_device *rdev = bo->rdev;
456 	struct radeon_surface_reg *reg;
457 
458 	if (bo->surface_reg == -1)
459 		return;
460 
461 	reg = &rdev->surface_regs[bo->surface_reg];
462 	radeon_clear_surface_reg(rdev, bo->surface_reg);
463 
464 	reg->bo = NULL;
465 	bo->surface_reg = -1;
466 }
467 
468 int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
469 				uint32_t tiling_flags, uint32_t pitch)
470 {
471 	int r;
472 
473 	r = radeon_bo_reserve(bo, false);
474 	if (unlikely(r != 0))
475 		return r;
476 	bo->tiling_flags = tiling_flags;
477 	bo->pitch = pitch;
478 	radeon_bo_unreserve(bo);
479 	return 0;
480 }
481 
482 void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
483 				uint32_t *tiling_flags,
484 				uint32_t *pitch)
485 {
486 	BUG_ON(!atomic_read(&bo->tbo.reserved));
487 	if (tiling_flags)
488 		*tiling_flags = bo->tiling_flags;
489 	if (pitch)
490 		*pitch = bo->pitch;
491 }
492 
493 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
494 				bool force_drop)
495 {
496 	BUG_ON(!atomic_read(&bo->tbo.reserved));
497 
498 	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
499 		return 0;
500 
501 	if (force_drop) {
502 		radeon_bo_clear_surface_reg(bo);
503 		return 0;
504 	}
505 
506 	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
507 		if (!has_moved)
508 			return 0;
509 
510 		if (bo->surface_reg >= 0)
511 			radeon_bo_clear_surface_reg(bo);
512 		return 0;
513 	}
514 
515 	if ((bo->surface_reg >= 0) && !has_moved)
516 		return 0;
517 
518 	return radeon_bo_get_surface_reg(bo);
519 }
520 
521 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
522 			   struct ttm_mem_reg *mem)
523 {
524 	struct radeon_bo *rbo;
525 	if (!radeon_ttm_bo_is_radeon_bo(bo))
526 		return;
527 	rbo = container_of(bo, struct radeon_bo, tbo);
528 	radeon_bo_check_tiling(rbo, 0, 1);
529 }
530 
531 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
532 {
533 	struct radeon_device *rdev;
534 	struct radeon_bo *rbo;
535 	unsigned long offset, size;
536 	int r;
537 
538 	if (!radeon_ttm_bo_is_radeon_bo(bo))
539 		return 0;
540 	rbo = container_of(bo, struct radeon_bo, tbo);
541 	radeon_bo_check_tiling(rbo, 0, 0);
542 	rdev = rbo->rdev;
543 	if (bo->mem.mem_type == TTM_PL_VRAM) {
544 		size = bo->mem.num_pages << PAGE_SHIFT;
545 		offset = bo->mem.start << PAGE_SHIFT;
546 		if ((offset + size) > rdev->mc.visible_vram_size) {
547 			/* hurrah the memory is not visible ! */
548 			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
549 			rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
550 			r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
551 			if (unlikely(r != 0))
552 				return r;
553 			offset = bo->mem.start << PAGE_SHIFT;
554 			/* this should not happen */
555 			if ((offset + size) > rdev->mc.visible_vram_size)
556 				return -EINVAL;
557 		}
558 	}
559 	return 0;
560 }
561