1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <drm/drmP.h>
34 #include "radeon_drm.h"
35 #include "radeon.h"
36 
37 struct radeon_object {
38 	struct ttm_buffer_object	tobj;
39 	struct list_head		list;
40 	struct radeon_device		*rdev;
41 	struct drm_gem_object		*gobj;
42 	struct ttm_bo_kmap_obj		kmap;
43 	unsigned			pin_count;
44 	uint64_t			gpu_addr;
45 	void				*kptr;
46 	bool				is_iomem;
47 	uint32_t			tiling_flags;
48 	uint32_t			pitch;
49 	int				surface_reg;
50 };
51 
52 int radeon_ttm_init(struct radeon_device *rdev);
53 void radeon_ttm_fini(struct radeon_device *rdev);
54 
55 /*
56  * To exclude mutual BO access we rely on bo_reserve exclusion, as all
57  * function are calling it.
58  */
59 
60 static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
61 {
62 	return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
63 }
64 
65 static void radeon_object_unreserve(struct radeon_object *robj)
66 {
67 	ttm_bo_unreserve(&robj->tobj);
68 }
69 
70 static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
71 {
72 	struct radeon_object *robj;
73 
74 	robj = container_of(tobj, struct radeon_object, tobj);
75 	list_del_init(&robj->list);
76 	radeon_object_clear_surface_reg(robj);
77 	kfree(robj);
78 }
79 
80 static inline void radeon_object_gpu_addr(struct radeon_object *robj)
81 {
82 	/* Default gpu address */
83 	robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
84 	if (robj->tobj.mem.mm_node == NULL) {
85 		return;
86 	}
87 	robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
88 	switch (robj->tobj.mem.mem_type) {
89 	case TTM_PL_VRAM:
90 		robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
91 		break;
92 	case TTM_PL_TT:
93 		robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
94 		break;
95 	default:
96 		DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
97 		robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
98 		return;
99 	}
100 }
101 
102 static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
103 {
104 	uint32_t flags = 0;
105 	if (domain & RADEON_GEM_DOMAIN_VRAM) {
106 		flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
107 	}
108 	if (domain & RADEON_GEM_DOMAIN_GTT) {
109 		flags |= TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
110 	}
111 	if (domain & RADEON_GEM_DOMAIN_CPU) {
112 		flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
113 	}
114 	if (!flags) {
115 		flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
116 	}
117 	return flags;
118 }
119 
120 int radeon_object_create(struct radeon_device *rdev,
121 			 struct drm_gem_object *gobj,
122 			 unsigned long size,
123 			 bool kernel,
124 			 uint32_t domain,
125 			 bool interruptible,
126 			 struct radeon_object **robj_ptr)
127 {
128 	struct radeon_object *robj;
129 	enum ttm_bo_type type;
130 	uint32_t flags;
131 	int r;
132 
133 	if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
134 		rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
135 	}
136 	if (kernel) {
137 		type = ttm_bo_type_kernel;
138 	} else {
139 		type = ttm_bo_type_device;
140 	}
141 	*robj_ptr = NULL;
142 	robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
143 	if (robj == NULL) {
144 		return -ENOMEM;
145 	}
146 	robj->rdev = rdev;
147 	robj->gobj = gobj;
148 	robj->surface_reg = -1;
149 	INIT_LIST_HEAD(&robj->list);
150 
151 	flags = radeon_object_flags_from_domain(domain);
152 	r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
153 				   0, 0, false, NULL, size,
154 				   &radeon_ttm_object_object_destroy);
155 	if (unlikely(r != 0)) {
156 		/* ttm call radeon_ttm_object_object_destroy if error happen */
157 		DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
158 			  size, flags, 0);
159 		return r;
160 	}
161 	*robj_ptr = robj;
162 	if (gobj) {
163 		list_add_tail(&robj->list, &rdev->gem.objects);
164 	}
165 	return 0;
166 }
167 
168 int radeon_object_kmap(struct radeon_object *robj, void **ptr)
169 {
170 	int r;
171 
172 	spin_lock(&robj->tobj.lock);
173 	if (robj->kptr) {
174 		if (ptr) {
175 			*ptr = robj->kptr;
176 		}
177 		spin_unlock(&robj->tobj.lock);
178 		return 0;
179 	}
180 	spin_unlock(&robj->tobj.lock);
181 	r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
182 	if (r) {
183 		return r;
184 	}
185 	spin_lock(&robj->tobj.lock);
186 	robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
187 	spin_unlock(&robj->tobj.lock);
188 	if (ptr) {
189 		*ptr = robj->kptr;
190 	}
191 	return 0;
192 }
193 
194 void radeon_object_kunmap(struct radeon_object *robj)
195 {
196 	spin_lock(&robj->tobj.lock);
197 	if (robj->kptr == NULL) {
198 		spin_unlock(&robj->tobj.lock);
199 		return;
200 	}
201 	robj->kptr = NULL;
202 	spin_unlock(&robj->tobj.lock);
203 	ttm_bo_kunmap(&robj->kmap);
204 }
205 
206 void radeon_object_unref(struct radeon_object **robj)
207 {
208 	struct ttm_buffer_object *tobj;
209 
210 	if ((*robj) == NULL) {
211 		return;
212 	}
213 	tobj = &((*robj)->tobj);
214 	ttm_bo_unref(&tobj);
215 	if (tobj == NULL) {
216 		*robj = NULL;
217 	}
218 }
219 
220 int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
221 {
222 	*offset = robj->tobj.addr_space_offset;
223 	return 0;
224 }
225 
226 int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
227 		      uint64_t *gpu_addr)
228 {
229 	uint32_t flags;
230 	uint32_t tmp;
231 	int r;
232 
233 	flags = radeon_object_flags_from_domain(domain);
234 	spin_lock(&robj->tobj.lock);
235 	if (robj->pin_count) {
236 		robj->pin_count++;
237 		if (gpu_addr != NULL) {
238 			*gpu_addr = robj->gpu_addr;
239 		}
240 		spin_unlock(&robj->tobj.lock);
241 		return 0;
242 	}
243 	spin_unlock(&robj->tobj.lock);
244 	r = radeon_object_reserve(robj, false);
245 	if (unlikely(r != 0)) {
246 		DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
247 		return r;
248 	}
249 	tmp = robj->tobj.mem.placement;
250 	ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
251 	robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
252 	r = ttm_buffer_object_validate(&robj->tobj,
253 				       robj->tobj.proposed_placement,
254 				       false, false);
255 	radeon_object_gpu_addr(robj);
256 	if (gpu_addr != NULL) {
257 		*gpu_addr = robj->gpu_addr;
258 	}
259 	robj->pin_count = 1;
260 	if (unlikely(r != 0)) {
261 		DRM_ERROR("radeon: failed to pin object.\n");
262 	}
263 	radeon_object_unreserve(robj);
264 	return r;
265 }
266 
267 void radeon_object_unpin(struct radeon_object *robj)
268 {
269 	uint32_t flags;
270 	int r;
271 
272 	spin_lock(&robj->tobj.lock);
273 	if (!robj->pin_count) {
274 		spin_unlock(&robj->tobj.lock);
275 		printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
276 		return;
277 	}
278 	robj->pin_count--;
279 	if (robj->pin_count) {
280 		spin_unlock(&robj->tobj.lock);
281 		return;
282 	}
283 	spin_unlock(&robj->tobj.lock);
284 	r = radeon_object_reserve(robj, false);
285 	if (unlikely(r != 0)) {
286 		DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
287 		return;
288 	}
289 	flags = robj->tobj.mem.placement;
290 	robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
291 	r = ttm_buffer_object_validate(&robj->tobj,
292 				       robj->tobj.proposed_placement,
293 				       false, false);
294 	if (unlikely(r != 0)) {
295 		DRM_ERROR("radeon: failed to unpin buffer.\n");
296 	}
297 	radeon_object_unreserve(robj);
298 }
299 
300 int radeon_object_wait(struct radeon_object *robj)
301 {
302 	int r = 0;
303 
304 	/* FIXME: should use block reservation instead */
305 	r = radeon_object_reserve(robj, true);
306 	if (unlikely(r != 0)) {
307 		DRM_ERROR("radeon: failed to reserve object for waiting.\n");
308 		return r;
309 	}
310 	spin_lock(&robj->tobj.lock);
311 	if (robj->tobj.sync_obj) {
312 		r = ttm_bo_wait(&robj->tobj, true, true, false);
313 	}
314 	spin_unlock(&robj->tobj.lock);
315 	radeon_object_unreserve(robj);
316 	return r;
317 }
318 
319 int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement)
320 {
321 	int r = 0;
322 
323 	r = radeon_object_reserve(robj, true);
324 	if (unlikely(r != 0)) {
325 		DRM_ERROR("radeon: failed to reserve object for waiting.\n");
326 		return r;
327 	}
328 	spin_lock(&robj->tobj.lock);
329 	*cur_placement = robj->tobj.mem.mem_type;
330 	if (robj->tobj.sync_obj) {
331 		r = ttm_bo_wait(&robj->tobj, true, true, true);
332 	}
333 	spin_unlock(&robj->tobj.lock);
334 	radeon_object_unreserve(robj);
335 	return r;
336 }
337 
338 int radeon_object_evict_vram(struct radeon_device *rdev)
339 {
340 	if (rdev->flags & RADEON_IS_IGP) {
341 		/* Useless to evict on IGP chips */
342 		return 0;
343 	}
344 	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
345 }
346 
347 void radeon_object_force_delete(struct radeon_device *rdev)
348 {
349 	struct radeon_object *robj, *n;
350 	struct drm_gem_object *gobj;
351 
352 	if (list_empty(&rdev->gem.objects)) {
353 		return;
354 	}
355 	DRM_ERROR("Userspace still has active objects !\n");
356 	list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
357 		mutex_lock(&rdev->ddev->struct_mutex);
358 		gobj = robj->gobj;
359 		DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
360 			  gobj, robj, (unsigned long)gobj->size,
361 			  *((unsigned long *)&gobj->refcount));
362 		list_del_init(&robj->list);
363 		radeon_object_unref(&robj);
364 		gobj->driver_private = NULL;
365 		drm_gem_object_unreference(gobj);
366 		mutex_unlock(&rdev->ddev->struct_mutex);
367 	}
368 }
369 
370 int radeon_object_init(struct radeon_device *rdev)
371 {
372 	return radeon_ttm_init(rdev);
373 }
374 
375 void radeon_object_fini(struct radeon_device *rdev)
376 {
377 	radeon_ttm_fini(rdev);
378 }
379 
380 void radeon_object_list_add_object(struct radeon_object_list *lobj,
381 				   struct list_head *head)
382 {
383 	if (lobj->wdomain) {
384 		list_add(&lobj->list, head);
385 	} else {
386 		list_add_tail(&lobj->list, head);
387 	}
388 }
389 
390 int radeon_object_list_reserve(struct list_head *head)
391 {
392 	struct radeon_object_list *lobj;
393 	struct list_head *i;
394 	int r;
395 
396 	list_for_each(i, head) {
397 		lobj = list_entry(i, struct radeon_object_list, list);
398 		if (!lobj->robj->pin_count) {
399 			r = radeon_object_reserve(lobj->robj, true);
400 			if (unlikely(r != 0)) {
401 				DRM_ERROR("radeon: failed to reserve object.\n");
402 				return r;
403 			}
404 		} else {
405 		}
406 	}
407 	return 0;
408 }
409 
410 void radeon_object_list_unreserve(struct list_head *head)
411 {
412 	struct radeon_object_list *lobj;
413 	struct list_head *i;
414 
415 	list_for_each(i, head) {
416 		lobj = list_entry(i, struct radeon_object_list, list);
417 		if (!lobj->robj->pin_count) {
418 			radeon_object_unreserve(lobj->robj);
419 		} else {
420 		}
421 	}
422 }
423 
424 int radeon_object_list_validate(struct list_head *head, void *fence)
425 {
426 	struct radeon_object_list *lobj;
427 	struct radeon_object *robj;
428 	struct radeon_fence *old_fence = NULL;
429 	struct list_head *i;
430 	int r;
431 
432 	r = radeon_object_list_reserve(head);
433 	if (unlikely(r != 0)) {
434 		radeon_object_list_unreserve(head);
435 		return r;
436 	}
437 	list_for_each(i, head) {
438 		lobj = list_entry(i, struct radeon_object_list, list);
439 		robj = lobj->robj;
440 		if (!robj->pin_count) {
441 			if (lobj->wdomain) {
442 				robj->tobj.proposed_placement =
443 					radeon_object_flags_from_domain(lobj->wdomain);
444 			} else {
445 				robj->tobj.proposed_placement =
446 					radeon_object_flags_from_domain(lobj->rdomain);
447 			}
448 			r = ttm_buffer_object_validate(&robj->tobj,
449 						       robj->tobj.proposed_placement,
450 						       true, false);
451 			if (unlikely(r)) {
452 				DRM_ERROR("radeon: failed to validate.\n");
453 				return r;
454 			}
455 			radeon_object_gpu_addr(robj);
456 		}
457 		lobj->gpu_offset = robj->gpu_addr;
458 		lobj->tiling_flags = robj->tiling_flags;
459 		if (fence) {
460 			old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
461 			robj->tobj.sync_obj = radeon_fence_ref(fence);
462 			robj->tobj.sync_obj_arg = NULL;
463 		}
464 		if (old_fence) {
465 			radeon_fence_unref(&old_fence);
466 		}
467 	}
468 	return 0;
469 }
470 
471 void radeon_object_list_unvalidate(struct list_head *head)
472 {
473 	struct radeon_object_list *lobj;
474 	struct radeon_fence *old_fence = NULL;
475 	struct list_head *i;
476 
477 	list_for_each(i, head) {
478 		lobj = list_entry(i, struct radeon_object_list, list);
479 		old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
480 		lobj->robj->tobj.sync_obj = NULL;
481 		if (old_fence) {
482 			radeon_fence_unref(&old_fence);
483 		}
484 	}
485 	radeon_object_list_unreserve(head);
486 }
487 
488 void radeon_object_list_clean(struct list_head *head)
489 {
490 	radeon_object_list_unreserve(head);
491 }
492 
493 int radeon_object_fbdev_mmap(struct radeon_object *robj,
494 			     struct vm_area_struct *vma)
495 {
496 	return ttm_fbdev_mmap(vma, &robj->tobj);
497 }
498 
499 unsigned long radeon_object_size(struct radeon_object *robj)
500 {
501 	return robj->tobj.num_pages << PAGE_SHIFT;
502 }
503 
504 int radeon_object_get_surface_reg(struct radeon_object *robj)
505 {
506 	struct radeon_device *rdev = robj->rdev;
507 	struct radeon_surface_reg *reg;
508 	struct radeon_object *old_object;
509 	int steal;
510 	int i;
511 
512 	if (!robj->tiling_flags)
513 		return 0;
514 
515 	if (robj->surface_reg >= 0) {
516 		reg = &rdev->surface_regs[robj->surface_reg];
517 		i = robj->surface_reg;
518 		goto out;
519 	}
520 
521 	steal = -1;
522 	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
523 
524 		reg = &rdev->surface_regs[i];
525 		if (!reg->robj)
526 			break;
527 
528 		old_object = reg->robj;
529 		if (old_object->pin_count == 0)
530 			steal = i;
531 	}
532 
533 	/* if we are all out */
534 	if (i == RADEON_GEM_MAX_SURFACES) {
535 		if (steal == -1)
536 			return -ENOMEM;
537 		/* find someone with a surface reg and nuke their BO */
538 		reg = &rdev->surface_regs[steal];
539 		old_object = reg->robj;
540 		/* blow away the mapping */
541 		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
542 		ttm_bo_unmap_virtual(&old_object->tobj);
543 		old_object->surface_reg = -1;
544 		i = steal;
545 	}
546 
547 	robj->surface_reg = i;
548 	reg->robj = robj;
549 
550 out:
551 	radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch,
552 			       robj->tobj.mem.mm_node->start << PAGE_SHIFT,
553 			       robj->tobj.num_pages << PAGE_SHIFT);
554 	return 0;
555 }
556 
557 void radeon_object_clear_surface_reg(struct radeon_object *robj)
558 {
559 	struct radeon_device *rdev = robj->rdev;
560 	struct radeon_surface_reg *reg;
561 
562 	if (robj->surface_reg == -1)
563 		return;
564 
565 	reg = &rdev->surface_regs[robj->surface_reg];
566 	radeon_clear_surface_reg(rdev, robj->surface_reg);
567 
568 	reg->robj = NULL;
569 	robj->surface_reg = -1;
570 }
571 
572 void radeon_object_set_tiling_flags(struct radeon_object *robj,
573 				    uint32_t tiling_flags, uint32_t pitch)
574 {
575 	robj->tiling_flags = tiling_flags;
576 	robj->pitch = pitch;
577 }
578 
579 void radeon_object_get_tiling_flags(struct radeon_object *robj,
580 				    uint32_t *tiling_flags,
581 				    uint32_t *pitch)
582 {
583 	if (tiling_flags)
584 		*tiling_flags = robj->tiling_flags;
585 	if (pitch)
586 		*pitch = robj->pitch;
587 }
588 
589 int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
590 			       bool force_drop)
591 {
592 	if (!(robj->tiling_flags & RADEON_TILING_SURFACE))
593 		return 0;
594 
595 	if (force_drop) {
596 		radeon_object_clear_surface_reg(robj);
597 		return 0;
598 	}
599 
600 	if (robj->tobj.mem.mem_type != TTM_PL_VRAM) {
601 		if (!has_moved)
602 			return 0;
603 
604 		if (robj->surface_reg >= 0)
605 			radeon_object_clear_surface_reg(robj);
606 		return 0;
607 	}
608 
609 	if ((robj->surface_reg >= 0) && !has_moved)
610 		return 0;
611 
612 	return radeon_object_get_surface_reg(robj);
613 }
614 
615 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
616 			  struct ttm_mem_reg *mem)
617 {
618 	struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
619 	radeon_object_check_tiling(robj, 0, 1);
620 }
621 
622 void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
623 {
624 	struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
625 	radeon_object_check_tiling(robj, 0, 0);
626 }
627