1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 * Christian König 28 */ 29 #include <linux/seq_file.h> 30 #include <linux/slab.h> 31 #include <drm/drmP.h> 32 #include <drm/amdgpu_drm.h> 33 #include "amdgpu.h" 34 #include "atom.h" 35 36 /* 37 * Rings 38 * Most engines on the GPU are fed via ring buffers. Ring 39 * buffers are areas of GPU accessible memory that the host 40 * writes commands into and the GPU reads commands out of. 41 * There is a rptr (read pointer) that determines where the 42 * GPU is currently reading, and a wptr (write pointer) 43 * which determines where the host has written. When the 44 * pointers are equal, the ring is idle. When the host 45 * writes commands to the ring buffer, it increments the 46 * wptr. The GPU then starts fetching commands and executes 47 * them until the pointers are equal again. 48 */ 49 static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring); 50 51 /** 52 * amdgpu_ring_alloc - allocate space on the ring buffer 53 * 54 * @adev: amdgpu_device pointer 55 * @ring: amdgpu_ring structure holding ring information 56 * @ndw: number of dwords to allocate in the ring buffer 57 * 58 * Allocate @ndw dwords in the ring buffer (all asics). 59 * Returns 0 on success, error on failure. 60 */ 61 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) 62 { 63 /* Align requested size with padding so unlock_commit can 64 * pad safely */ 65 ndw = (ndw + ring->align_mask) & ~ring->align_mask; 66 67 /* Make sure we aren't trying to allocate more space 68 * than the maximum for one submission 69 */ 70 if (WARN_ON_ONCE(ndw > ring->max_dw)) 71 return -ENOMEM; 72 73 ring->count_dw = ndw; 74 ring->wptr_old = ring->wptr; 75 return 0; 76 } 77 78 /** amdgpu_ring_insert_nop - insert NOP packets 79 * 80 * @ring: amdgpu_ring structure holding ring information 81 * @count: the number of NOP packets to insert 82 * 83 * This is the generic insert_nop function for rings except SDMA 84 */ 85 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 86 { 87 int i; 88 89 for (i = 0; i < count; i++) 90 amdgpu_ring_write(ring, ring->nop); 91 } 92 93 /** 94 * amdgpu_ring_commit - tell the GPU to execute the new 95 * commands on the ring buffer 96 * 97 * @adev: amdgpu_device pointer 98 * @ring: amdgpu_ring structure holding ring information 99 * 100 * Update the wptr (write pointer) to tell the GPU to 101 * execute new commands on the ring buffer (all asics). 102 */ 103 void amdgpu_ring_commit(struct amdgpu_ring *ring) 104 { 105 uint32_t count; 106 107 /* We pad to match fetch size */ 108 count = ring->align_mask + 1 - (ring->wptr & ring->align_mask); 109 count %= ring->align_mask + 1; 110 ring->funcs->insert_nop(ring, count); 111 112 mb(); 113 amdgpu_ring_set_wptr(ring); 114 } 115 116 /** 117 * amdgpu_ring_undo - reset the wptr 118 * 119 * @ring: amdgpu_ring structure holding ring information 120 * 121 * Reset the driver's copy of the wptr (all asics). 122 */ 123 void amdgpu_ring_undo(struct amdgpu_ring *ring) 124 { 125 ring->wptr = ring->wptr_old; 126 } 127 128 /** 129 * amdgpu_ring_backup - Back up the content of a ring 130 * 131 * @ring: the ring we want to back up 132 * 133 * Saves all unprocessed commits from a ring, returns the number of dwords saved. 134 */ 135 unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, 136 uint32_t **data) 137 { 138 unsigned size, ptr, i; 139 140 *data = NULL; 141 142 if (ring->ring_obj == NULL) 143 return 0; 144 145 /* it doesn't make sense to save anything if all fences are signaled */ 146 if (!amdgpu_fence_count_emitted(ring)) 147 return 0; 148 149 ptr = le32_to_cpu(*ring->next_rptr_cpu_addr); 150 151 size = ring->wptr + (ring->ring_size / 4); 152 size -= ptr; 153 size &= ring->ptr_mask; 154 if (size == 0) 155 return 0; 156 157 /* and then save the content of the ring */ 158 *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); 159 if (!*data) 160 return 0; 161 for (i = 0; i < size; ++i) { 162 (*data)[i] = ring->ring[ptr++]; 163 ptr &= ring->ptr_mask; 164 } 165 166 return size; 167 } 168 169 /** 170 * amdgpu_ring_restore - append saved commands to the ring again 171 * 172 * @ring: ring to append commands to 173 * @size: number of dwords we want to write 174 * @data: saved commands 175 * 176 * Allocates space on the ring and restore the previously saved commands. 177 */ 178 int amdgpu_ring_restore(struct amdgpu_ring *ring, 179 unsigned size, uint32_t *data) 180 { 181 int i, r; 182 183 if (!size || !data) 184 return 0; 185 186 /* restore the saved ring content */ 187 r = amdgpu_ring_alloc(ring, size); 188 if (r) 189 return r; 190 191 for (i = 0; i < size; ++i) { 192 amdgpu_ring_write(ring, data[i]); 193 } 194 195 amdgpu_ring_commit(ring); 196 kfree(data); 197 return 0; 198 } 199 200 /** 201 * amdgpu_ring_init - init driver ring struct. 202 * 203 * @adev: amdgpu_device pointer 204 * @ring: amdgpu_ring structure holding ring information 205 * @ring_size: size of the ring 206 * @nop: nop packet for this ring 207 * 208 * Initialize the driver information for the selected ring (all asics). 209 * Returns 0 on success, error on failure. 210 */ 211 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, 212 unsigned ring_size, u32 nop, u32 align_mask, 213 struct amdgpu_irq_src *irq_src, unsigned irq_type, 214 enum amdgpu_ring_type ring_type) 215 { 216 u32 rb_bufsz; 217 int r; 218 219 if (ring->adev == NULL) { 220 if (adev->num_rings >= AMDGPU_MAX_RINGS) 221 return -EINVAL; 222 223 ring->adev = adev; 224 ring->idx = adev->num_rings++; 225 adev->rings[ring->idx] = ring; 226 r = amdgpu_fence_driver_init_ring(ring); 227 if (r) 228 return r; 229 } 230 231 r = amdgpu_wb_get(adev, &ring->rptr_offs); 232 if (r) { 233 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); 234 return r; 235 } 236 237 r = amdgpu_wb_get(adev, &ring->wptr_offs); 238 if (r) { 239 dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); 240 return r; 241 } 242 243 r = amdgpu_wb_get(adev, &ring->fence_offs); 244 if (r) { 245 dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); 246 return r; 247 } 248 249 r = amdgpu_wb_get(adev, &ring->next_rptr_offs); 250 if (r) { 251 dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r); 252 return r; 253 } 254 ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4); 255 ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs]; 256 spin_lock_init(&ring->fence_lock); 257 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type); 258 if (r) { 259 dev_err(adev->dev, "failed initializing fences (%d).\n", r); 260 return r; 261 } 262 263 /* Align ring size */ 264 rb_bufsz = order_base_2(ring_size / 8); 265 ring_size = (1 << (rb_bufsz + 1)) * 4; 266 ring->ring_size = ring_size; 267 ring->align_mask = align_mask; 268 ring->nop = nop; 269 ring->type = ring_type; 270 271 /* Allocate ring buffer */ 272 if (ring->ring_obj == NULL) { 273 r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true, 274 AMDGPU_GEM_DOMAIN_GTT, 0, 275 NULL, NULL, &ring->ring_obj); 276 if (r) { 277 dev_err(adev->dev, "(%d) ring create failed\n", r); 278 return r; 279 } 280 r = amdgpu_bo_reserve(ring->ring_obj, false); 281 if (unlikely(r != 0)) 282 return r; 283 r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT, 284 &ring->gpu_addr); 285 if (r) { 286 amdgpu_bo_unreserve(ring->ring_obj); 287 dev_err(adev->dev, "(%d) ring pin failed\n", r); 288 return r; 289 } 290 r = amdgpu_bo_kmap(ring->ring_obj, 291 (void **)&ring->ring); 292 amdgpu_bo_unreserve(ring->ring_obj); 293 if (r) { 294 dev_err(adev->dev, "(%d) ring map failed\n", r); 295 return r; 296 } 297 } 298 ring->ptr_mask = (ring->ring_size / 4) - 1; 299 ring->max_dw = DIV_ROUND_UP(ring->ring_size / 4, 300 amdgpu_sched_hw_submission); 301 302 if (amdgpu_debugfs_ring_init(adev, ring)) { 303 DRM_ERROR("Failed to register debugfs file for rings !\n"); 304 } 305 return 0; 306 } 307 308 /** 309 * amdgpu_ring_fini - tear down the driver ring struct. 310 * 311 * @adev: amdgpu_device pointer 312 * @ring: amdgpu_ring structure holding ring information 313 * 314 * Tear down the driver information for the selected ring (all asics). 315 */ 316 void amdgpu_ring_fini(struct amdgpu_ring *ring) 317 { 318 int r; 319 struct amdgpu_bo *ring_obj; 320 321 ring_obj = ring->ring_obj; 322 ring->ready = false; 323 ring->ring = NULL; 324 ring->ring_obj = NULL; 325 326 amdgpu_wb_free(ring->adev, ring->fence_offs); 327 amdgpu_wb_free(ring->adev, ring->rptr_offs); 328 amdgpu_wb_free(ring->adev, ring->wptr_offs); 329 amdgpu_wb_free(ring->adev, ring->next_rptr_offs); 330 331 if (ring_obj) { 332 r = amdgpu_bo_reserve(ring_obj, false); 333 if (likely(r == 0)) { 334 amdgpu_bo_kunmap(ring_obj); 335 amdgpu_bo_unpin(ring_obj); 336 amdgpu_bo_unreserve(ring_obj); 337 } 338 amdgpu_bo_unref(&ring_obj); 339 } 340 } 341 342 /** 343 * amdgpu_ring_from_fence - get ring from fence 344 * 345 * @f: fence structure 346 * 347 * Extract the ring a fence belongs to. Handles both scheduler as 348 * well as hardware fences. 349 */ 350 struct amdgpu_ring *amdgpu_ring_from_fence(struct fence *f) 351 { 352 struct amdgpu_fence *a_fence; 353 struct amd_sched_fence *s_fence; 354 355 s_fence = to_amd_sched_fence(f); 356 if (s_fence) 357 return container_of(s_fence->sched, struct amdgpu_ring, sched); 358 359 a_fence = to_amdgpu_fence(f); 360 if (a_fence) 361 return a_fence->ring; 362 363 return NULL; 364 } 365 366 /* 367 * Debugfs info 368 */ 369 #if defined(CONFIG_DEBUG_FS) 370 371 static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data) 372 { 373 struct drm_info_node *node = (struct drm_info_node *) m->private; 374 struct drm_device *dev = node->minor->dev; 375 struct amdgpu_device *adev = dev->dev_private; 376 int roffset = *(int*)node->info_ent->data; 377 struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset); 378 379 uint32_t rptr, wptr, rptr_next; 380 unsigned i; 381 382 wptr = amdgpu_ring_get_wptr(ring); 383 seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr); 384 385 rptr = amdgpu_ring_get_rptr(ring); 386 rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr); 387 388 seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr); 389 390 seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", 391 ring->wptr, ring->wptr); 392 393 if (!ring->ready) 394 return 0; 395 396 /* print 8 dw before current rptr as often it's the last executed 397 * packet that is the root issue 398 */ 399 i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; 400 while (i != rptr) { 401 seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); 402 if (i == rptr) 403 seq_puts(m, " *"); 404 if (i == rptr_next) 405 seq_puts(m, " #"); 406 seq_puts(m, "\n"); 407 i = (i + 1) & ring->ptr_mask; 408 } 409 while (i != wptr) { 410 seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); 411 if (i == rptr) 412 seq_puts(m, " *"); 413 if (i == rptr_next) 414 seq_puts(m, " #"); 415 seq_puts(m, "\n"); 416 i = (i + 1) & ring->ptr_mask; 417 } 418 return 0; 419 } 420 421 /* TODO: clean this up !*/ 422 static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]); 423 static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]); 424 static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]); 425 static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma.instance[0].ring); 426 static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma.instance[1].ring); 427 static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring); 428 static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]); 429 static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]); 430 431 static struct drm_info_list amdgpu_debugfs_ring_info_list[] = { 432 {"amdgpu_ring_gfx", amdgpu_debugfs_ring_info, 0, &amdgpu_gfx_index}, 433 {"amdgpu_ring_cp1", amdgpu_debugfs_ring_info, 0, &cayman_cp1_index}, 434 {"amdgpu_ring_cp2", amdgpu_debugfs_ring_info, 0, &cayman_cp2_index}, 435 {"amdgpu_ring_dma1", amdgpu_debugfs_ring_info, 0, &amdgpu_dma1_index}, 436 {"amdgpu_ring_dma2", amdgpu_debugfs_ring_info, 0, &amdgpu_dma2_index}, 437 {"amdgpu_ring_uvd", amdgpu_debugfs_ring_info, 0, &r600_uvd_index}, 438 {"amdgpu_ring_vce1", amdgpu_debugfs_ring_info, 0, &si_vce1_index}, 439 {"amdgpu_ring_vce2", amdgpu_debugfs_ring_info, 0, &si_vce2_index}, 440 }; 441 442 #endif 443 444 static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring) 445 { 446 #if defined(CONFIG_DEBUG_FS) 447 unsigned i; 448 for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) { 449 struct drm_info_list *info = &amdgpu_debugfs_ring_info_list[i]; 450 int roffset = *(int*)amdgpu_debugfs_ring_info_list[i].data; 451 struct amdgpu_ring *other = (void *)(((uint8_t*)adev) + roffset); 452 unsigned r; 453 454 if (other != ring) 455 continue; 456 457 r = amdgpu_debugfs_add_files(adev, info, 1); 458 if (r) 459 return r; 460 } 461 #endif 462 return 0; 463 } 464