xref: /openbmc/linux/drivers/gpu/drm/radeon/radeon_ring.c (revision ce932d0c5589e9766e089c22c66890dfc48fbd94)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include "drmP.h"
31 #include "radeon_drm.h"
32 #include "radeon_reg.h"
33 #include "radeon.h"
34 #include "atom.h"
35 
36 int radeon_debugfs_ib_init(struct radeon_device *rdev);
37 int radeon_debugfs_ring_init(struct radeon_device *rdev);
38 
39 u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
40 {
41 	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
42 	u32 pg_idx, pg_offset;
43 	u32 idx_value = 0;
44 	int new_page;
45 
46 	pg_idx = (idx * 4) / PAGE_SIZE;
47 	pg_offset = (idx * 4) % PAGE_SIZE;
48 
49 	if (ibc->kpage_idx[0] == pg_idx)
50 		return ibc->kpage[0][pg_offset/4];
51 	if (ibc->kpage_idx[1] == pg_idx)
52 		return ibc->kpage[1][pg_offset/4];
53 
54 	new_page = radeon_cs_update_pages(p, pg_idx);
55 	if (new_page < 0) {
56 		p->parser_error = new_page;
57 		return 0;
58 	}
59 
60 	idx_value = ibc->kpage[new_page][pg_offset/4];
61 	return idx_value;
62 }
63 
64 void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
65 {
66 #if DRM_DEBUG_CODE
67 	if (ring->count_dw <= 0) {
68 		DRM_ERROR("radeon: writting more dword to ring than expected !\n");
69 	}
70 #endif
71 	ring->ring[ring->wptr++] = v;
72 	ring->wptr &= ring->ptr_mask;
73 	ring->count_dw--;
74 	ring->ring_free_dw--;
75 }
76 
77 /*
78  * IB.
79  */
80 bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
81 {
82 	bool done = false;
83 
84 	/* only free ib which have been emited */
85 	if (ib->fence && ib->fence->emitted) {
86 		if (radeon_fence_signaled(ib->fence)) {
87 			radeon_fence_unref(&ib->fence);
88 			radeon_sa_bo_free(rdev, &ib->sa_bo);
89 			done = true;
90 		}
91 	}
92 	return done;
93 }
94 
95 int radeon_ib_get(struct radeon_device *rdev, int ring,
96 		  struct radeon_ib **ib, unsigned size)
97 {
98 	struct radeon_fence *fence;
99 	unsigned cretry = 0;
100 	int r = 0, i, idx;
101 
102 	*ib = NULL;
103 	/* align size on 256 bytes */
104 	size = ALIGN(size, 256);
105 
106 	r = radeon_fence_create(rdev, &fence, ring);
107 	if (r) {
108 		dev_err(rdev->dev, "failed to create fence for new IB\n");
109 		return r;
110 	}
111 
112 	radeon_mutex_lock(&rdev->ib_pool.mutex);
113 	idx = rdev->ib_pool.head_id;
114 retry:
115 	if (cretry > 5) {
116 		dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
117 		radeon_mutex_unlock(&rdev->ib_pool.mutex);
118 		radeon_fence_unref(&fence);
119 		return -ENOMEM;
120 	}
121 	cretry++;
122 	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
123 		radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
124 		if (rdev->ib_pool.ibs[idx].fence == NULL) {
125 			r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
126 					     &rdev->ib_pool.ibs[idx].sa_bo,
127 					     size, 256);
128 			if (!r) {
129 				*ib = &rdev->ib_pool.ibs[idx];
130 				(*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
131 				(*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
132 				(*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
133 				(*ib)->gpu_addr += (*ib)->sa_bo.offset;
134 				(*ib)->fence = fence;
135 				(*ib)->vm_id = 0;
136 				(*ib)->is_const_ib = false;
137 				/* ib are most likely to be allocated in a ring fashion
138 				 * thus rdev->ib_pool.head_id should be the id of the
139 				 * oldest ib
140 				 */
141 				rdev->ib_pool.head_id = (1 + idx);
142 				rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
143 				radeon_mutex_unlock(&rdev->ib_pool.mutex);
144 				return 0;
145 			}
146 		}
147 		idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
148 	}
149 	/* this should be rare event, ie all ib scheduled none signaled yet.
150 	 */
151 	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
152 		if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) {
153 			r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
154 			if (!r) {
155 				goto retry;
156 			}
157 			/* an error happened */
158 			break;
159 		}
160 		idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
161 	}
162 	radeon_mutex_unlock(&rdev->ib_pool.mutex);
163 	radeon_fence_unref(&fence);
164 	return r;
165 }
166 
167 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
168 {
169 	struct radeon_ib *tmp = *ib;
170 
171 	*ib = NULL;
172 	if (tmp == NULL) {
173 		return;
174 	}
175 	radeon_mutex_lock(&rdev->ib_pool.mutex);
176 	if (tmp->fence && !tmp->fence->emitted) {
177 		radeon_sa_bo_free(rdev, &tmp->sa_bo);
178 		radeon_fence_unref(&tmp->fence);
179 	}
180 	radeon_mutex_unlock(&rdev->ib_pool.mutex);
181 }
182 
183 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
184 {
185 	struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
186 	int r = 0;
187 
188 	if (!ib->length_dw || !ring->ready) {
189 		/* TODO: Nothings in the ib we should report. */
190 		DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
191 		return -EINVAL;
192 	}
193 
194 	/* 64 dwords should be enough for fence too */
195 	r = radeon_ring_lock(rdev, ring, 64);
196 	if (r) {
197 		DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
198 		return r;
199 	}
200 	radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
201 	radeon_fence_emit(rdev, ib->fence);
202 	radeon_ring_unlock_commit(rdev, ring);
203 	return 0;
204 }
205 
206 int radeon_ib_pool_init(struct radeon_device *rdev)
207 {
208 	struct radeon_sa_manager tmp;
209 	int i, r;
210 
211 	r = radeon_sa_bo_manager_init(rdev, &tmp,
212 				      RADEON_IB_POOL_SIZE*64*1024,
213 				      RADEON_GEM_DOMAIN_GTT);
214 	if (r) {
215 		return r;
216 	}
217 
218 	radeon_mutex_lock(&rdev->ib_pool.mutex);
219 	if (rdev->ib_pool.ready) {
220 		radeon_mutex_unlock(&rdev->ib_pool.mutex);
221 		radeon_sa_bo_manager_fini(rdev, &tmp);
222 		return 0;
223 	}
224 
225 	rdev->ib_pool.sa_manager = tmp;
226 	INIT_LIST_HEAD(&rdev->ib_pool.sa_manager.sa_bo);
227 	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
228 		rdev->ib_pool.ibs[i].fence = NULL;
229 		rdev->ib_pool.ibs[i].idx = i;
230 		rdev->ib_pool.ibs[i].length_dw = 0;
231 		INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
232 	}
233 	rdev->ib_pool.head_id = 0;
234 	rdev->ib_pool.ready = true;
235 	DRM_INFO("radeon: ib pool ready.\n");
236 
237 	if (radeon_debugfs_ib_init(rdev)) {
238 		DRM_ERROR("Failed to register debugfs file for IB !\n");
239 	}
240 	if (radeon_debugfs_ring_init(rdev)) {
241 		DRM_ERROR("Failed to register debugfs file for rings !\n");
242 	}
243 	radeon_mutex_unlock(&rdev->ib_pool.mutex);
244 	return 0;
245 }
246 
247 void radeon_ib_pool_fini(struct radeon_device *rdev)
248 {
249 	unsigned i;
250 
251 	radeon_mutex_lock(&rdev->ib_pool.mutex);
252 	if (rdev->ib_pool.ready) {
253 		for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
254 			radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
255 			radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
256 		}
257 		radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
258 		rdev->ib_pool.ready = false;
259 	}
260 	radeon_mutex_unlock(&rdev->ib_pool.mutex);
261 }
262 
263 int radeon_ib_pool_start(struct radeon_device *rdev)
264 {
265 	return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
266 }
267 
268 int radeon_ib_pool_suspend(struct radeon_device *rdev)
269 {
270 	return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
271 }
272 
273 /*
274  * Ring.
275  */
276 int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
277 {
278 	/* r1xx-r5xx only has CP ring */
279 	if (rdev->family < CHIP_R600)
280 		return RADEON_RING_TYPE_GFX_INDEX;
281 
282 	if (rdev->family >= CHIP_CAYMAN) {
283 		if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
284 			return CAYMAN_RING_TYPE_CP1_INDEX;
285 		else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
286 			return CAYMAN_RING_TYPE_CP2_INDEX;
287 	}
288 	return RADEON_RING_TYPE_GFX_INDEX;
289 }
290 
291 void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
292 {
293 	u32 rptr;
294 
295 	if (rdev->wb.enabled)
296 		rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
297 	else
298 		rptr = RREG32(ring->rptr_reg);
299 	ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
300 	/* This works because ring_size is a power of 2 */
301 	ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
302 	ring->ring_free_dw -= ring->wptr;
303 	ring->ring_free_dw &= ring->ptr_mask;
304 	if (!ring->ring_free_dw) {
305 		ring->ring_free_dw = ring->ring_size / 4;
306 	}
307 }
308 
309 
310 int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
311 {
312 	int r;
313 
314 	/* Align requested size with padding so unlock_commit can
315 	 * pad safely */
316 	ndw = (ndw + ring->align_mask) & ~ring->align_mask;
317 	while (ndw > (ring->ring_free_dw - 1)) {
318 		radeon_ring_free_size(rdev, ring);
319 		if (ndw < ring->ring_free_dw) {
320 			break;
321 		}
322 		r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
323 		if (r)
324 			return r;
325 	}
326 	ring->count_dw = ndw;
327 	ring->wptr_old = ring->wptr;
328 	return 0;
329 }
330 
331 int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
332 {
333 	int r;
334 
335 	mutex_lock(&ring->mutex);
336 	r = radeon_ring_alloc(rdev, ring, ndw);
337 	if (r) {
338 		mutex_unlock(&ring->mutex);
339 		return r;
340 	}
341 	return 0;
342 }
343 
344 void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
345 {
346 	unsigned count_dw_pad;
347 	unsigned i;
348 
349 	/* We pad to match fetch size */
350 	count_dw_pad = (ring->align_mask + 1) -
351 		       (ring->wptr & ring->align_mask);
352 	for (i = 0; i < count_dw_pad; i++) {
353 		radeon_ring_write(ring, ring->nop);
354 	}
355 	DRM_MEMORYBARRIER();
356 	WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
357 	(void)RREG32(ring->wptr_reg);
358 }
359 
360 void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
361 {
362 	radeon_ring_commit(rdev, ring);
363 	mutex_unlock(&ring->mutex);
364 }
365 
366 void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
367 {
368 	ring->wptr = ring->wptr_old;
369 	mutex_unlock(&ring->mutex);
370 }
371 
372 int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
373 		     unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
374 		     u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
375 {
376 	int r;
377 
378 	ring->ring_size = ring_size;
379 	ring->rptr_offs = rptr_offs;
380 	ring->rptr_reg = rptr_reg;
381 	ring->wptr_reg = wptr_reg;
382 	ring->ptr_reg_shift = ptr_reg_shift;
383 	ring->ptr_reg_mask = ptr_reg_mask;
384 	ring->nop = nop;
385 	/* Allocate ring buffer */
386 	if (ring->ring_obj == NULL) {
387 		r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
388 					RADEON_GEM_DOMAIN_GTT,
389 					&ring->ring_obj);
390 		if (r) {
391 			dev_err(rdev->dev, "(%d) ring create failed\n", r);
392 			return r;
393 		}
394 		r = radeon_bo_reserve(ring->ring_obj, false);
395 		if (unlikely(r != 0))
396 			return r;
397 		r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
398 					&ring->gpu_addr);
399 		if (r) {
400 			radeon_bo_unreserve(ring->ring_obj);
401 			dev_err(rdev->dev, "(%d) ring pin failed\n", r);
402 			return r;
403 		}
404 		r = radeon_bo_kmap(ring->ring_obj,
405 				       (void **)&ring->ring);
406 		radeon_bo_unreserve(ring->ring_obj);
407 		if (r) {
408 			dev_err(rdev->dev, "(%d) ring map failed\n", r);
409 			return r;
410 		}
411 	}
412 	ring->ptr_mask = (ring->ring_size / 4) - 1;
413 	ring->ring_free_dw = ring->ring_size / 4;
414 	return 0;
415 }
416 
417 void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
418 {
419 	int r;
420 	struct radeon_bo *ring_obj;
421 
422 	mutex_lock(&ring->mutex);
423 	ring_obj = ring->ring_obj;
424 	ring->ring = NULL;
425 	ring->ring_obj = NULL;
426 	mutex_unlock(&ring->mutex);
427 
428 	if (ring_obj) {
429 		r = radeon_bo_reserve(ring_obj, false);
430 		if (likely(r == 0)) {
431 			radeon_bo_kunmap(ring_obj);
432 			radeon_bo_unpin(ring_obj);
433 			radeon_bo_unreserve(ring_obj);
434 		}
435 		radeon_bo_unref(&ring_obj);
436 	}
437 }
438 
439 /*
440  * Debugfs info
441  */
442 #if defined(CONFIG_DEBUG_FS)
443 
444 static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
445 {
446 	struct drm_info_node *node = (struct drm_info_node *) m->private;
447 	struct drm_device *dev = node->minor->dev;
448 	struct radeon_device *rdev = dev->dev_private;
449 	int ridx = *(int*)node->info_ent->data;
450 	struct radeon_ring *ring = &rdev->ring[ridx];
451 	unsigned count, i, j;
452 
453 	radeon_ring_free_size(rdev, ring);
454 	count = (ring->ring_size / 4) - ring->ring_free_dw;
455 	seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
456 	seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
457 	seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
458 	seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
459 	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
460 	seq_printf(m, "%u dwords in ring\n", count);
461 	i = ring->rptr;
462 	for (j = 0; j <= count; j++) {
463 		seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
464 		i = (i + 1) & ring->ptr_mask;
465 	}
466 	return 0;
467 }
468 
469 static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
470 static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
471 static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
472 
473 static struct drm_info_list radeon_debugfs_ring_info_list[] = {
474 	{"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
475 	{"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
476 	{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
477 };
478 
479 static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
480 {
481 	struct drm_info_node *node = (struct drm_info_node *) m->private;
482 	struct drm_device *dev = node->minor->dev;
483 	struct radeon_device *rdev = dev->dev_private;
484 	struct radeon_ib *ib = &rdev->ib_pool.ibs[*((unsigned*)node->info_ent->data)];
485 	unsigned i;
486 
487 	if (ib == NULL) {
488 		return 0;
489 	}
490 	seq_printf(m, "IB %04u\n", ib->idx);
491 	seq_printf(m, "IB fence %p\n", ib->fence);
492 	seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
493 	for (i = 0; i < ib->length_dw; i++) {
494 		seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
495 	}
496 	return 0;
497 }
498 
499 static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
500 static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
501 static unsigned radeon_debugfs_ib_idx[RADEON_IB_POOL_SIZE];
502 #endif
503 
504 int radeon_debugfs_ring_init(struct radeon_device *rdev)
505 {
506 #if defined(CONFIG_DEBUG_FS)
507 	if (rdev->family >= CHIP_CAYMAN)
508 		return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list,
509 						ARRAY_SIZE(radeon_debugfs_ring_info_list));
510 	else
511 		return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list, 1);
512 #else
513 	return 0;
514 #endif
515 }
516 
517 int radeon_debugfs_ib_init(struct radeon_device *rdev)
518 {
519 #if defined(CONFIG_DEBUG_FS)
520 	unsigned i;
521 
522 	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
523 		sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
524 		radeon_debugfs_ib_idx[i] = i;
525 		radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
526 		radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
527 		radeon_debugfs_ib_list[i].driver_features = 0;
528 		radeon_debugfs_ib_list[i].data = &radeon_debugfs_ib_idx[i];
529 	}
530 	return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
531 					RADEON_IB_POOL_SIZE);
532 #else
533 	return 0;
534 #endif
535 }
536