1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include "drmP.h"
31 #include "radeon_drm.h"
32 #include "radeon_reg.h"
33 #include "radeon.h"
34 #include "atom.h"
35 
36 int radeon_debugfs_ib_init(struct radeon_device *rdev);
37 
38 u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
39 {
40 	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
41 	u32 pg_idx, pg_offset;
42 	u32 idx_value = 0;
43 	int new_page;
44 
45 	pg_idx = (idx * 4) / PAGE_SIZE;
46 	pg_offset = (idx * 4) % PAGE_SIZE;
47 
48 	if (ibc->kpage_idx[0] == pg_idx)
49 		return ibc->kpage[0][pg_offset/4];
50 	if (ibc->kpage_idx[1] == pg_idx)
51 		return ibc->kpage[1][pg_offset/4];
52 
53 	new_page = radeon_cs_update_pages(p, pg_idx);
54 	if (new_page < 0) {
55 		p->parser_error = new_page;
56 		return 0;
57 	}
58 
59 	idx_value = ibc->kpage[new_page][pg_offset/4];
60 	return idx_value;
61 }
62 
63 void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
64 {
65 #if DRM_DEBUG_CODE
66 	if (rdev->cp.count_dw <= 0) {
67 		DRM_ERROR("radeon: writting more dword to ring than expected !\n");
68 	}
69 #endif
70 	rdev->cp.ring[rdev->cp.wptr++] = v;
71 	rdev->cp.wptr &= rdev->cp.ptr_mask;
72 	rdev->cp.count_dw--;
73 	rdev->cp.ring_free_dw--;
74 }
75 
76 void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
77 {
78 	struct radeon_ib *ib, *n;
79 
80 	list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
81 		list_del(&ib->list);
82 		vfree(ib->ptr);
83 		kfree(ib);
84 	}
85 }
86 
87 void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
88 {
89 	struct radeon_ib *bib;
90 
91 	bib = kmalloc(sizeof(*bib), GFP_KERNEL);
92 	if (bib == NULL)
93 		return;
94 	bib->ptr = vmalloc(ib->length_dw * 4);
95 	if (bib->ptr == NULL) {
96 		kfree(bib);
97 		return;
98 	}
99 	memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
100 	bib->length_dw = ib->length_dw;
101 	mutex_lock(&rdev->ib_pool.mutex);
102 	list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
103 	mutex_unlock(&rdev->ib_pool.mutex);
104 }
105 
106 /*
107  * IB.
108  */
109 int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
110 {
111 	struct radeon_fence *fence;
112 	struct radeon_ib *nib;
113 	int r = 0, i, c;
114 
115 	*ib = NULL;
116 	r = radeon_fence_create(rdev, &fence);
117 	if (r) {
118 		dev_err(rdev->dev, "failed to create fence for new IB\n");
119 		return r;
120 	}
121 	mutex_lock(&rdev->ib_pool.mutex);
122 	for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
123 		i &= (RADEON_IB_POOL_SIZE - 1);
124 		if (rdev->ib_pool.ibs[i].free) {
125 			nib = &rdev->ib_pool.ibs[i];
126 			break;
127 		}
128 	}
129 	if (nib == NULL) {
130 		/* This should never happen, it means we allocated all
131 		 * IB and haven't scheduled one yet, return EBUSY to
132 		 * userspace hoping that on ioctl recall we get better
133 		 * luck
134 		 */
135 		dev_err(rdev->dev, "no free indirect buffer !\n");
136 		mutex_unlock(&rdev->ib_pool.mutex);
137 		radeon_fence_unref(&fence);
138 		return -EBUSY;
139 	}
140 	rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
141 	nib->free = false;
142 	if (nib->fence) {
143 		mutex_unlock(&rdev->ib_pool.mutex);
144 		r = radeon_fence_wait(nib->fence, false);
145 		if (r) {
146 			dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
147 				nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
148 			mutex_lock(&rdev->ib_pool.mutex);
149 			nib->free = true;
150 			mutex_unlock(&rdev->ib_pool.mutex);
151 			radeon_fence_unref(&fence);
152 			return r;
153 		}
154 		mutex_lock(&rdev->ib_pool.mutex);
155 	}
156 	radeon_fence_unref(&nib->fence);
157 	nib->fence = fence;
158 	nib->length_dw = 0;
159 	mutex_unlock(&rdev->ib_pool.mutex);
160 	*ib = nib;
161 	return 0;
162 }
163 
164 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
165 {
166 	struct radeon_ib *tmp = *ib;
167 
168 	*ib = NULL;
169 	if (tmp == NULL) {
170 		return;
171 	}
172 	if (!tmp->fence->emited)
173 		radeon_fence_unref(&tmp->fence);
174 	mutex_lock(&rdev->ib_pool.mutex);
175 	tmp->free = true;
176 	mutex_unlock(&rdev->ib_pool.mutex);
177 }
178 
179 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
180 {
181 	int r = 0;
182 
183 	if (!ib->length_dw || !rdev->cp.ready) {
184 		/* TODO: Nothings in the ib we should report. */
185 		DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
186 		return -EINVAL;
187 	}
188 
189 	/* 64 dwords should be enough for fence too */
190 	r = radeon_ring_lock(rdev, 64);
191 	if (r) {
192 		DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
193 		return r;
194 	}
195 	radeon_ring_ib_execute(rdev, ib);
196 	radeon_fence_emit(rdev, ib->fence);
197 	mutex_lock(&rdev->ib_pool.mutex);
198 	/* once scheduled IB is considered free and protected by the fence */
199 	ib->free = true;
200 	mutex_unlock(&rdev->ib_pool.mutex);
201 	radeon_ring_unlock_commit(rdev);
202 	return 0;
203 }
204 
205 int radeon_ib_pool_init(struct radeon_device *rdev)
206 {
207 	void *ptr;
208 	uint64_t gpu_addr;
209 	int i;
210 	int r = 0;
211 
212 	if (rdev->ib_pool.robj)
213 		return 0;
214 	INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
215 	/* Allocate 1M object buffer */
216 	r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
217 			     PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
218 			     &rdev->ib_pool.robj);
219 	if (r) {
220 		DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
221 		return r;
222 	}
223 	r = radeon_bo_reserve(rdev->ib_pool.robj, false);
224 	if (unlikely(r != 0))
225 		return r;
226 	r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
227 	if (r) {
228 		radeon_bo_unreserve(rdev->ib_pool.robj);
229 		DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
230 		return r;
231 	}
232 	r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
233 	radeon_bo_unreserve(rdev->ib_pool.robj);
234 	if (r) {
235 		DRM_ERROR("radeon: failed to map ib pool (%d).\n", r);
236 		return r;
237 	}
238 	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
239 		unsigned offset;
240 
241 		offset = i * 64 * 1024;
242 		rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
243 		rdev->ib_pool.ibs[i].ptr = ptr + offset;
244 		rdev->ib_pool.ibs[i].idx = i;
245 		rdev->ib_pool.ibs[i].length_dw = 0;
246 		rdev->ib_pool.ibs[i].free = true;
247 	}
248 	rdev->ib_pool.head_id = 0;
249 	rdev->ib_pool.ready = true;
250 	DRM_INFO("radeon: ib pool ready.\n");
251 	if (radeon_debugfs_ib_init(rdev)) {
252 		DRM_ERROR("Failed to register debugfs file for IB !\n");
253 	}
254 	return r;
255 }
256 
257 void radeon_ib_pool_fini(struct radeon_device *rdev)
258 {
259 	int r;
260 	struct radeon_bo *robj;
261 
262 	if (!rdev->ib_pool.ready) {
263 		return;
264 	}
265 	mutex_lock(&rdev->ib_pool.mutex);
266 	radeon_ib_bogus_cleanup(rdev);
267 	robj = rdev->ib_pool.robj;
268 	rdev->ib_pool.robj = NULL;
269 	mutex_unlock(&rdev->ib_pool.mutex);
270 
271 	if (robj) {
272 		r = radeon_bo_reserve(robj, false);
273 		if (likely(r == 0)) {
274 			radeon_bo_kunmap(robj);
275 			radeon_bo_unpin(robj);
276 			radeon_bo_unreserve(robj);
277 		}
278 		radeon_bo_unref(&robj);
279 	}
280 }
281 
282 
283 /*
284  * Ring.
285  */
286 void radeon_ring_free_size(struct radeon_device *rdev)
287 {
288 	if (rdev->wb.enabled)
289 		rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
290 	else {
291 		if (rdev->family >= CHIP_R600)
292 			rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
293 		else
294 			rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
295 	}
296 	/* This works because ring_size is a power of 2 */
297 	rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
298 	rdev->cp.ring_free_dw -= rdev->cp.wptr;
299 	rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
300 	if (!rdev->cp.ring_free_dw) {
301 		rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
302 	}
303 }
304 
305 int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
306 {
307 	int r;
308 
309 	/* Align requested size with padding so unlock_commit can
310 	 * pad safely */
311 	ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
312 	while (ndw > (rdev->cp.ring_free_dw - 1)) {
313 		radeon_ring_free_size(rdev);
314 		if (ndw < rdev->cp.ring_free_dw) {
315 			break;
316 		}
317 		r = radeon_fence_wait_next(rdev);
318 		if (r)
319 			return r;
320 	}
321 	rdev->cp.count_dw = ndw;
322 	rdev->cp.wptr_old = rdev->cp.wptr;
323 	return 0;
324 }
325 
326 int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
327 {
328 	int r;
329 
330 	mutex_lock(&rdev->cp.mutex);
331 	r = radeon_ring_alloc(rdev, ndw);
332 	if (r) {
333 		mutex_unlock(&rdev->cp.mutex);
334 		return r;
335 	}
336 	return 0;
337 }
338 
339 void radeon_ring_commit(struct radeon_device *rdev)
340 {
341 	unsigned count_dw_pad;
342 	unsigned i;
343 
344 	/* We pad to match fetch size */
345 	count_dw_pad = (rdev->cp.align_mask + 1) -
346 		       (rdev->cp.wptr & rdev->cp.align_mask);
347 	for (i = 0; i < count_dw_pad; i++) {
348 		radeon_ring_write(rdev, 2 << 30);
349 	}
350 	DRM_MEMORYBARRIER();
351 	radeon_cp_commit(rdev);
352 }
353 
354 void radeon_ring_unlock_commit(struct radeon_device *rdev)
355 {
356 	radeon_ring_commit(rdev);
357 	mutex_unlock(&rdev->cp.mutex);
358 }
359 
360 void radeon_ring_unlock_undo(struct radeon_device *rdev)
361 {
362 	rdev->cp.wptr = rdev->cp.wptr_old;
363 	mutex_unlock(&rdev->cp.mutex);
364 }
365 
366 int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
367 {
368 	int r;
369 
370 	rdev->cp.ring_size = ring_size;
371 	/* Allocate ring buffer */
372 	if (rdev->cp.ring_obj == NULL) {
373 		r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
374 					RADEON_GEM_DOMAIN_GTT,
375 					&rdev->cp.ring_obj);
376 		if (r) {
377 			dev_err(rdev->dev, "(%d) ring create failed\n", r);
378 			return r;
379 		}
380 		r = radeon_bo_reserve(rdev->cp.ring_obj, false);
381 		if (unlikely(r != 0))
382 			return r;
383 		r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
384 					&rdev->cp.gpu_addr);
385 		if (r) {
386 			radeon_bo_unreserve(rdev->cp.ring_obj);
387 			dev_err(rdev->dev, "(%d) ring pin failed\n", r);
388 			return r;
389 		}
390 		r = radeon_bo_kmap(rdev->cp.ring_obj,
391 				       (void **)&rdev->cp.ring);
392 		radeon_bo_unreserve(rdev->cp.ring_obj);
393 		if (r) {
394 			dev_err(rdev->dev, "(%d) ring map failed\n", r);
395 			return r;
396 		}
397 	}
398 	rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
399 	rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
400 	return 0;
401 }
402 
403 void radeon_ring_fini(struct radeon_device *rdev)
404 {
405 	int r;
406 	struct radeon_bo *ring_obj;
407 
408 	mutex_lock(&rdev->cp.mutex);
409 	ring_obj = rdev->cp.ring_obj;
410 	rdev->cp.ring = NULL;
411 	rdev->cp.ring_obj = NULL;
412 	mutex_unlock(&rdev->cp.mutex);
413 
414 	if (ring_obj) {
415 		r = radeon_bo_reserve(ring_obj, false);
416 		if (likely(r == 0)) {
417 			radeon_bo_kunmap(ring_obj);
418 			radeon_bo_unpin(ring_obj);
419 			radeon_bo_unreserve(ring_obj);
420 		}
421 		radeon_bo_unref(&ring_obj);
422 	}
423 }
424 
425 
426 /*
427  * Debugfs info
428  */
429 #if defined(CONFIG_DEBUG_FS)
430 static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
431 {
432 	struct drm_info_node *node = (struct drm_info_node *) m->private;
433 	struct radeon_ib *ib = node->info_ent->data;
434 	unsigned i;
435 
436 	if (ib == NULL) {
437 		return 0;
438 	}
439 	seq_printf(m, "IB %04u\n", ib->idx);
440 	seq_printf(m, "IB fence %p\n", ib->fence);
441 	seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
442 	for (i = 0; i < ib->length_dw; i++) {
443 		seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
444 	}
445 	return 0;
446 }
447 
448 static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
449 {
450 	struct drm_info_node *node = (struct drm_info_node *) m->private;
451 	struct radeon_device *rdev = node->info_ent->data;
452 	struct radeon_ib *ib;
453 	unsigned i;
454 
455 	mutex_lock(&rdev->ib_pool.mutex);
456 	if (list_empty(&rdev->ib_pool.bogus_ib)) {
457 		mutex_unlock(&rdev->ib_pool.mutex);
458 		seq_printf(m, "no bogus IB recorded\n");
459 		return 0;
460 	}
461 	ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
462 	list_del_init(&ib->list);
463 	mutex_unlock(&rdev->ib_pool.mutex);
464 	seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
465 	for (i = 0; i < ib->length_dw; i++) {
466 		seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
467 	}
468 	vfree(ib->ptr);
469 	kfree(ib);
470 	return 0;
471 }
472 
473 static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
474 static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
475 
476 static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
477 	{"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
478 };
479 #endif
480 
481 int radeon_debugfs_ib_init(struct radeon_device *rdev)
482 {
483 #if defined(CONFIG_DEBUG_FS)
484 	unsigned i;
485 	int r;
486 
487 	radeon_debugfs_ib_bogus_info_list[0].data = rdev;
488 	r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
489 	if (r)
490 		return r;
491 	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
492 		sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
493 		radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
494 		radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
495 		radeon_debugfs_ib_list[i].driver_features = 0;
496 		radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
497 	}
498 	return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
499 					RADEON_IB_POOL_SIZE);
500 #else
501 	return 0;
502 #endif
503 }
504