1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/seq_file.h>
29 #include "drmP.h"
30 #include "radeon_drm.h"
31 #include "radeon_reg.h"
32 #include "radeon.h"
33 #include "atom.h"
34 
35 int radeon_debugfs_ib_init(struct radeon_device *rdev);
36 
37 /*
38  * IB.
39  */
40 int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
41 {
42 	struct radeon_fence *fence;
43 	struct radeon_ib *nib;
44 	unsigned long i;
45 	int r = 0;
46 
47 	*ib = NULL;
48 	r = radeon_fence_create(rdev, &fence);
49 	if (r) {
50 		DRM_ERROR("failed to create fence for new IB\n");
51 		return r;
52 	}
53 	mutex_lock(&rdev->ib_pool.mutex);
54 	i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
55 	if (i < RADEON_IB_POOL_SIZE) {
56 		set_bit(i, rdev->ib_pool.alloc_bm);
57 		rdev->ib_pool.ibs[i].length_dw = 0;
58 		*ib = &rdev->ib_pool.ibs[i];
59 		mutex_unlock(&rdev->ib_pool.mutex);
60 		goto out;
61 	}
62 	if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
63 		/* we go do nothings here */
64 		mutex_unlock(&rdev->ib_pool.mutex);
65 		DRM_ERROR("all IB allocated none scheduled.\n");
66 		r = -EINVAL;
67 		goto out;
68 	}
69 	/* get the first ib on the scheduled list */
70 	nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
71 			 struct radeon_ib, list);
72 	if (nib->fence == NULL) {
73 		/* we go do nothings here */
74 		mutex_unlock(&rdev->ib_pool.mutex);
75 		DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
76 		r = -EINVAL;
77 		goto out;
78 	}
79 	mutex_unlock(&rdev->ib_pool.mutex);
80 
81 	r = radeon_fence_wait(nib->fence, false);
82 	if (r) {
83 		DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
84 			  (unsigned long)nib->gpu_addr, nib->length_dw);
85 		DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
86 		goto out;
87 	}
88 	radeon_fence_unref(&nib->fence);
89 
90 	nib->length_dw = 0;
91 
92 	/* scheduled list is accessed here */
93 	mutex_lock(&rdev->ib_pool.mutex);
94 	list_del(&nib->list);
95 	INIT_LIST_HEAD(&nib->list);
96 	mutex_unlock(&rdev->ib_pool.mutex);
97 
98 	*ib = nib;
99 out:
100 	if (r) {
101 		radeon_fence_unref(&fence);
102 	} else {
103 		(*ib)->fence = fence;
104 	}
105 	return r;
106 }
107 
108 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
109 {
110 	struct radeon_ib *tmp = *ib;
111 
112 	*ib = NULL;
113 	if (tmp == NULL) {
114 		return;
115 	}
116 	mutex_lock(&rdev->ib_pool.mutex);
117 	if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
118 		/* IB is scheduled & not signaled don't do anythings */
119 		mutex_unlock(&rdev->ib_pool.mutex);
120 		return;
121 	}
122 	list_del(&tmp->list);
123 	INIT_LIST_HEAD(&tmp->list);
124 	if (tmp->fence)
125 		radeon_fence_unref(&tmp->fence);
126 
127 	tmp->length_dw = 0;
128 	clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
129 	mutex_unlock(&rdev->ib_pool.mutex);
130 }
131 
132 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
133 {
134 	int r = 0;
135 
136 	if (!ib->length_dw || !rdev->cp.ready) {
137 		/* TODO: Nothings in the ib we should report. */
138 		DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
139 		return -EINVAL;
140 	}
141 
142 	/* 64 dwords should be enough for fence too */
143 	r = radeon_ring_lock(rdev, 64);
144 	if (r) {
145 		DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
146 		return r;
147 	}
148 	radeon_ring_ib_execute(rdev, ib);
149 	radeon_fence_emit(rdev, ib->fence);
150 	mutex_lock(&rdev->ib_pool.mutex);
151 	list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
152 	mutex_unlock(&rdev->ib_pool.mutex);
153 	radeon_ring_unlock_commit(rdev);
154 	return 0;
155 }
156 
157 int radeon_ib_pool_init(struct radeon_device *rdev)
158 {
159 	void *ptr;
160 	uint64_t gpu_addr;
161 	int i;
162 	int r = 0;
163 
164 	if (rdev->ib_pool.robj)
165 		return 0;
166 	/* Allocate 1M object buffer */
167 	INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
168 	r = radeon_bo_create(rdev, NULL,  RADEON_IB_POOL_SIZE*64*1024,
169 				true, RADEON_GEM_DOMAIN_GTT,
170 				&rdev->ib_pool.robj);
171 	if (r) {
172 		DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
173 		return r;
174 	}
175 	r = radeon_bo_reserve(rdev->ib_pool.robj, false);
176 	if (unlikely(r != 0))
177 		return r;
178 	r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
179 	if (r) {
180 		radeon_bo_unreserve(rdev->ib_pool.robj);
181 		DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
182 		return r;
183 	}
184 	r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
185 	radeon_bo_unreserve(rdev->ib_pool.robj);
186 	if (r) {
187 		DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
188 		return r;
189 	}
190 	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
191 		unsigned offset;
192 
193 		offset = i * 64 * 1024;
194 		rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
195 		rdev->ib_pool.ibs[i].ptr = ptr + offset;
196 		rdev->ib_pool.ibs[i].idx = i;
197 		rdev->ib_pool.ibs[i].length_dw = 0;
198 		INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
199 	}
200 	bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
201 	rdev->ib_pool.ready = true;
202 	DRM_INFO("radeon: ib pool ready.\n");
203 	if (radeon_debugfs_ib_init(rdev)) {
204 		DRM_ERROR("Failed to register debugfs file for IB !\n");
205 	}
206 	return r;
207 }
208 
209 void radeon_ib_pool_fini(struct radeon_device *rdev)
210 {
211 	int r;
212 
213 	if (!rdev->ib_pool.ready) {
214 		return;
215 	}
216 	mutex_lock(&rdev->ib_pool.mutex);
217 	bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
218 	if (rdev->ib_pool.robj) {
219 		r = radeon_bo_reserve(rdev->ib_pool.robj, false);
220 		if (likely(r == 0)) {
221 			radeon_bo_kunmap(rdev->ib_pool.robj);
222 			radeon_bo_unpin(rdev->ib_pool.robj);
223 			radeon_bo_unreserve(rdev->ib_pool.robj);
224 		}
225 		radeon_bo_unref(&rdev->ib_pool.robj);
226 		rdev->ib_pool.robj = NULL;
227 	}
228 	mutex_unlock(&rdev->ib_pool.mutex);
229 }
230 
231 
232 /*
233  * Ring.
234  */
235 void radeon_ring_free_size(struct radeon_device *rdev)
236 {
237 	if (rdev->family >= CHIP_R600)
238 		rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
239 	else
240 		rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
241 	/* This works because ring_size is a power of 2 */
242 	rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
243 	rdev->cp.ring_free_dw -= rdev->cp.wptr;
244 	rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
245 	if (!rdev->cp.ring_free_dw) {
246 		rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
247 	}
248 }
249 
250 int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
251 {
252 	int r;
253 
254 	/* Align requested size with padding so unlock_commit can
255 	 * pad safely */
256 	ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
257 	mutex_lock(&rdev->cp.mutex);
258 	while (ndw > (rdev->cp.ring_free_dw - 1)) {
259 		radeon_ring_free_size(rdev);
260 		if (ndw < rdev->cp.ring_free_dw) {
261 			break;
262 		}
263 		r = radeon_fence_wait_next(rdev);
264 		if (r) {
265 			mutex_unlock(&rdev->cp.mutex);
266 			return r;
267 		}
268 	}
269 	rdev->cp.count_dw = ndw;
270 	rdev->cp.wptr_old = rdev->cp.wptr;
271 	return 0;
272 }
273 
274 void radeon_ring_unlock_commit(struct radeon_device *rdev)
275 {
276 	unsigned count_dw_pad;
277 	unsigned i;
278 
279 	/* We pad to match fetch size */
280 	count_dw_pad = (rdev->cp.align_mask + 1) -
281 		       (rdev->cp.wptr & rdev->cp.align_mask);
282 	for (i = 0; i < count_dw_pad; i++) {
283 		radeon_ring_write(rdev, 2 << 30);
284 	}
285 	DRM_MEMORYBARRIER();
286 	radeon_cp_commit(rdev);
287 	mutex_unlock(&rdev->cp.mutex);
288 }
289 
290 void radeon_ring_unlock_undo(struct radeon_device *rdev)
291 {
292 	rdev->cp.wptr = rdev->cp.wptr_old;
293 	mutex_unlock(&rdev->cp.mutex);
294 }
295 
296 int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
297 {
298 	int r;
299 
300 	rdev->cp.ring_size = ring_size;
301 	/* Allocate ring buffer */
302 	if (rdev->cp.ring_obj == NULL) {
303 		r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
304 					RADEON_GEM_DOMAIN_GTT,
305 					&rdev->cp.ring_obj);
306 		if (r) {
307 			dev_err(rdev->dev, "(%d) ring create failed\n", r);
308 			return r;
309 		}
310 		r = radeon_bo_reserve(rdev->cp.ring_obj, false);
311 		if (unlikely(r != 0))
312 			return r;
313 		r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
314 					&rdev->cp.gpu_addr);
315 		if (r) {
316 			radeon_bo_unreserve(rdev->cp.ring_obj);
317 			dev_err(rdev->dev, "(%d) ring pin failed\n", r);
318 			return r;
319 		}
320 		r = radeon_bo_kmap(rdev->cp.ring_obj,
321 				       (void **)&rdev->cp.ring);
322 		radeon_bo_unreserve(rdev->cp.ring_obj);
323 		if (r) {
324 			dev_err(rdev->dev, "(%d) ring map failed\n", r);
325 			return r;
326 		}
327 	}
328 	rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
329 	rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
330 	return 0;
331 }
332 
333 void radeon_ring_fini(struct radeon_device *rdev)
334 {
335 	int r;
336 
337 	mutex_lock(&rdev->cp.mutex);
338 	if (rdev->cp.ring_obj) {
339 		r = radeon_bo_reserve(rdev->cp.ring_obj, false);
340 		if (likely(r == 0)) {
341 			radeon_bo_kunmap(rdev->cp.ring_obj);
342 			radeon_bo_unpin(rdev->cp.ring_obj);
343 			radeon_bo_unreserve(rdev->cp.ring_obj);
344 		}
345 		radeon_bo_unref(&rdev->cp.ring_obj);
346 		rdev->cp.ring = NULL;
347 		rdev->cp.ring_obj = NULL;
348 	}
349 	mutex_unlock(&rdev->cp.mutex);
350 }
351 
352 
353 /*
354  * Debugfs info
355  */
356 #if defined(CONFIG_DEBUG_FS)
357 static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
358 {
359 	struct drm_info_node *node = (struct drm_info_node *) m->private;
360 	struct radeon_ib *ib = node->info_ent->data;
361 	unsigned i;
362 
363 	if (ib == NULL) {
364 		return 0;
365 	}
366 	seq_printf(m, "IB %04lu\n", ib->idx);
367 	seq_printf(m, "IB fence %p\n", ib->fence);
368 	seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
369 	for (i = 0; i < ib->length_dw; i++) {
370 		seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
371 	}
372 	return 0;
373 }
374 
375 static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
376 static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
377 #endif
378 
379 int radeon_debugfs_ib_init(struct radeon_device *rdev)
380 {
381 #if defined(CONFIG_DEBUG_FS)
382 	unsigned i;
383 
384 	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
385 		sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
386 		radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
387 		radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
388 		radeon_debugfs_ib_list[i].driver_features = 0;
389 		radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
390 	}
391 	return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
392 					RADEON_IB_POOL_SIZE);
393 #else
394 	return 0;
395 #endif
396 }
397