1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 * Christian König 28 */ 29 #include <linux/seq_file.h> 30 #include <linux/slab.h> 31 #include <drm/drmP.h> 32 #include <drm/amdgpu_drm.h> 33 #include "amdgpu.h" 34 #include "atom.h" 35 36 /* 37 * Rings 38 * Most engines on the GPU are fed via ring buffers. Ring 39 * buffers are areas of GPU accessible memory that the host 40 * writes commands into and the GPU reads commands out of. 41 * There is a rptr (read pointer) that determines where the 42 * GPU is currently reading, and a wptr (write pointer) 43 * which determines where the host has written. When the 44 * pointers are equal, the ring is idle. When the host 45 * writes commands to the ring buffer, it increments the 46 * wptr. The GPU then starts fetching commands and executes 47 * them until the pointers are equal again. 48 */ 49 static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring); 50 51 /** 52 * amdgpu_ring_alloc - allocate space on the ring buffer 53 * 54 * @adev: amdgpu_device pointer 55 * @ring: amdgpu_ring structure holding ring information 56 * @ndw: number of dwords to allocate in the ring buffer 57 * 58 * Allocate @ndw dwords in the ring buffer (all asics). 59 * Returns 0 on success, error on failure. 60 */ 61 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) 62 { 63 /* Align requested size with padding so unlock_commit can 64 * pad safely */ 65 ndw = (ndw + ring->align_mask) & ~ring->align_mask; 66 67 /* Make sure we aren't trying to allocate more space 68 * than the maximum for one submission 69 */ 70 if (WARN_ON_ONCE(ndw > ring->max_dw)) 71 return -ENOMEM; 72 73 ring->count_dw = ndw; 74 ring->wptr_old = ring->wptr; 75 return 0; 76 } 77 78 /** amdgpu_ring_insert_nop - insert NOP packets 79 * 80 * @ring: amdgpu_ring structure holding ring information 81 * @count: the number of NOP packets to insert 82 * 83 * This is the generic insert_nop function for rings except SDMA 84 */ 85 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 86 { 87 int i; 88 89 for (i = 0; i < count; i++) 90 amdgpu_ring_write(ring, ring->nop); 91 } 92 93 /** amdgpu_ring_generic_pad_ib - pad IB with NOP packets 94 * 95 * @ring: amdgpu_ring structure holding ring information 96 * @ib: IB to add NOP packets to 97 * 98 * This is the generic pad_ib function for rings except SDMA 99 */ 100 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) 101 { 102 while (ib->length_dw & ring->align_mask) 103 ib->ptr[ib->length_dw++] = ring->nop; 104 } 105 106 /** 107 * amdgpu_ring_commit - tell the GPU to execute the new 108 * commands on the ring buffer 109 * 110 * @adev: amdgpu_device pointer 111 * @ring: amdgpu_ring structure holding ring information 112 * 113 * Update the wptr (write pointer) to tell the GPU to 114 * execute new commands on the ring buffer (all asics). 115 */ 116 void amdgpu_ring_commit(struct amdgpu_ring *ring) 117 { 118 uint32_t count; 119 120 /* We pad to match fetch size */ 121 count = ring->align_mask + 1 - (ring->wptr & ring->align_mask); 122 count %= ring->align_mask + 1; 123 ring->funcs->insert_nop(ring, count); 124 125 mb(); 126 amdgpu_ring_set_wptr(ring); 127 } 128 129 /** 130 * amdgpu_ring_undo - reset the wptr 131 * 132 * @ring: amdgpu_ring structure holding ring information 133 * 134 * Reset the driver's copy of the wptr (all asics). 135 */ 136 void amdgpu_ring_undo(struct amdgpu_ring *ring) 137 { 138 ring->wptr = ring->wptr_old; 139 } 140 141 /** 142 * amdgpu_ring_backup - Back up the content of a ring 143 * 144 * @ring: the ring we want to back up 145 * 146 * Saves all unprocessed commits from a ring, returns the number of dwords saved. 147 */ 148 unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, 149 uint32_t **data) 150 { 151 unsigned size, ptr, i; 152 153 *data = NULL; 154 155 if (ring->ring_obj == NULL) 156 return 0; 157 158 /* it doesn't make sense to save anything if all fences are signaled */ 159 if (!amdgpu_fence_count_emitted(ring)) 160 return 0; 161 162 ptr = le32_to_cpu(*ring->next_rptr_cpu_addr); 163 164 size = ring->wptr + (ring->ring_size / 4); 165 size -= ptr; 166 size &= ring->ptr_mask; 167 if (size == 0) 168 return 0; 169 170 /* and then save the content of the ring */ 171 *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); 172 if (!*data) 173 return 0; 174 for (i = 0; i < size; ++i) { 175 (*data)[i] = ring->ring[ptr++]; 176 ptr &= ring->ptr_mask; 177 } 178 179 return size; 180 } 181 182 /** 183 * amdgpu_ring_restore - append saved commands to the ring again 184 * 185 * @ring: ring to append commands to 186 * @size: number of dwords we want to write 187 * @data: saved commands 188 * 189 * Allocates space on the ring and restore the previously saved commands. 190 */ 191 int amdgpu_ring_restore(struct amdgpu_ring *ring, 192 unsigned size, uint32_t *data) 193 { 194 int i, r; 195 196 if (!size || !data) 197 return 0; 198 199 /* restore the saved ring content */ 200 r = amdgpu_ring_alloc(ring, size); 201 if (r) 202 return r; 203 204 for (i = 0; i < size; ++i) { 205 amdgpu_ring_write(ring, data[i]); 206 } 207 208 amdgpu_ring_commit(ring); 209 kfree(data); 210 return 0; 211 } 212 213 /** 214 * amdgpu_ring_init - init driver ring struct. 215 * 216 * @adev: amdgpu_device pointer 217 * @ring: amdgpu_ring structure holding ring information 218 * @ring_size: size of the ring 219 * @nop: nop packet for this ring 220 * 221 * Initialize the driver information for the selected ring (all asics). 222 * Returns 0 on success, error on failure. 223 */ 224 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, 225 unsigned ring_size, u32 nop, u32 align_mask, 226 struct amdgpu_irq_src *irq_src, unsigned irq_type, 227 enum amdgpu_ring_type ring_type) 228 { 229 u32 rb_bufsz; 230 int r; 231 232 if (ring->adev == NULL) { 233 if (adev->num_rings >= AMDGPU_MAX_RINGS) 234 return -EINVAL; 235 236 ring->adev = adev; 237 ring->idx = adev->num_rings++; 238 adev->rings[ring->idx] = ring; 239 r = amdgpu_fence_driver_init_ring(ring, 240 amdgpu_sched_hw_submission); 241 if (r) 242 return r; 243 } 244 245 r = amdgpu_wb_get(adev, &ring->rptr_offs); 246 if (r) { 247 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); 248 return r; 249 } 250 251 r = amdgpu_wb_get(adev, &ring->wptr_offs); 252 if (r) { 253 dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); 254 return r; 255 } 256 257 r = amdgpu_wb_get(adev, &ring->fence_offs); 258 if (r) { 259 dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); 260 return r; 261 } 262 263 r = amdgpu_wb_get(adev, &ring->next_rptr_offs); 264 if (r) { 265 dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r); 266 return r; 267 } 268 ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4); 269 ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs]; 270 spin_lock_init(&ring->fence_lock); 271 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type); 272 if (r) { 273 dev_err(adev->dev, "failed initializing fences (%d).\n", r); 274 return r; 275 } 276 277 /* Align ring size */ 278 rb_bufsz = order_base_2(ring_size / 8); 279 ring_size = (1 << (rb_bufsz + 1)) * 4; 280 ring->ring_size = ring_size; 281 ring->align_mask = align_mask; 282 ring->nop = nop; 283 ring->type = ring_type; 284 285 /* Allocate ring buffer */ 286 if (ring->ring_obj == NULL) { 287 r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true, 288 AMDGPU_GEM_DOMAIN_GTT, 0, 289 NULL, NULL, &ring->ring_obj); 290 if (r) { 291 dev_err(adev->dev, "(%d) ring create failed\n", r); 292 return r; 293 } 294 r = amdgpu_bo_reserve(ring->ring_obj, false); 295 if (unlikely(r != 0)) 296 return r; 297 r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT, 298 &ring->gpu_addr); 299 if (r) { 300 amdgpu_bo_unreserve(ring->ring_obj); 301 dev_err(adev->dev, "(%d) ring pin failed\n", r); 302 return r; 303 } 304 r = amdgpu_bo_kmap(ring->ring_obj, 305 (void **)&ring->ring); 306 amdgpu_bo_unreserve(ring->ring_obj); 307 if (r) { 308 dev_err(adev->dev, "(%d) ring map failed\n", r); 309 return r; 310 } 311 } 312 ring->ptr_mask = (ring->ring_size / 4) - 1; 313 ring->max_dw = DIV_ROUND_UP(ring->ring_size / 4, 314 amdgpu_sched_hw_submission); 315 316 if (amdgpu_debugfs_ring_init(adev, ring)) { 317 DRM_ERROR("Failed to register debugfs file for rings !\n"); 318 } 319 return 0; 320 } 321 322 /** 323 * amdgpu_ring_fini - tear down the driver ring struct. 324 * 325 * @adev: amdgpu_device pointer 326 * @ring: amdgpu_ring structure holding ring information 327 * 328 * Tear down the driver information for the selected ring (all asics). 329 */ 330 void amdgpu_ring_fini(struct amdgpu_ring *ring) 331 { 332 int r; 333 struct amdgpu_bo *ring_obj; 334 335 ring_obj = ring->ring_obj; 336 ring->ready = false; 337 ring->ring = NULL; 338 ring->ring_obj = NULL; 339 340 amdgpu_wb_free(ring->adev, ring->fence_offs); 341 amdgpu_wb_free(ring->adev, ring->rptr_offs); 342 amdgpu_wb_free(ring->adev, ring->wptr_offs); 343 amdgpu_wb_free(ring->adev, ring->next_rptr_offs); 344 345 if (ring_obj) { 346 r = amdgpu_bo_reserve(ring_obj, false); 347 if (likely(r == 0)) { 348 amdgpu_bo_kunmap(ring_obj); 349 amdgpu_bo_unpin(ring_obj); 350 amdgpu_bo_unreserve(ring_obj); 351 } 352 amdgpu_bo_unref(&ring_obj); 353 } 354 } 355 356 /* 357 * Debugfs info 358 */ 359 #if defined(CONFIG_DEBUG_FS) 360 361 static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data) 362 { 363 struct drm_info_node *node = (struct drm_info_node *) m->private; 364 struct drm_device *dev = node->minor->dev; 365 struct amdgpu_device *adev = dev->dev_private; 366 int roffset = *(int*)node->info_ent->data; 367 struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset); 368 369 uint32_t rptr, wptr, rptr_next; 370 unsigned i; 371 372 wptr = amdgpu_ring_get_wptr(ring); 373 seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr); 374 375 rptr = amdgpu_ring_get_rptr(ring); 376 rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr); 377 378 seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr); 379 380 seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", 381 ring->wptr, ring->wptr); 382 383 if (!ring->ready) 384 return 0; 385 386 /* print 8 dw before current rptr as often it's the last executed 387 * packet that is the root issue 388 */ 389 i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; 390 while (i != rptr) { 391 seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); 392 if (i == rptr) 393 seq_puts(m, " *"); 394 if (i == rptr_next) 395 seq_puts(m, " #"); 396 seq_puts(m, "\n"); 397 i = (i + 1) & ring->ptr_mask; 398 } 399 while (i != wptr) { 400 seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); 401 if (i == rptr) 402 seq_puts(m, " *"); 403 if (i == rptr_next) 404 seq_puts(m, " #"); 405 seq_puts(m, "\n"); 406 i = (i + 1) & ring->ptr_mask; 407 } 408 return 0; 409 } 410 411 /* TODO: clean this up !*/ 412 static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]); 413 static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]); 414 static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]); 415 static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma.instance[0].ring); 416 static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma.instance[1].ring); 417 static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring); 418 static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]); 419 static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]); 420 421 static struct drm_info_list amdgpu_debugfs_ring_info_list[] = { 422 {"amdgpu_ring_gfx", amdgpu_debugfs_ring_info, 0, &amdgpu_gfx_index}, 423 {"amdgpu_ring_cp1", amdgpu_debugfs_ring_info, 0, &cayman_cp1_index}, 424 {"amdgpu_ring_cp2", amdgpu_debugfs_ring_info, 0, &cayman_cp2_index}, 425 {"amdgpu_ring_dma1", amdgpu_debugfs_ring_info, 0, &amdgpu_dma1_index}, 426 {"amdgpu_ring_dma2", amdgpu_debugfs_ring_info, 0, &amdgpu_dma2_index}, 427 {"amdgpu_ring_uvd", amdgpu_debugfs_ring_info, 0, &r600_uvd_index}, 428 {"amdgpu_ring_vce1", amdgpu_debugfs_ring_info, 0, &si_vce1_index}, 429 {"amdgpu_ring_vce2", amdgpu_debugfs_ring_info, 0, &si_vce2_index}, 430 }; 431 432 #endif 433 434 static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring) 435 { 436 #if defined(CONFIG_DEBUG_FS) 437 unsigned i; 438 for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) { 439 struct drm_info_list *info = &amdgpu_debugfs_ring_info_list[i]; 440 int roffset = *(int*)amdgpu_debugfs_ring_info_list[i].data; 441 struct amdgpu_ring *other = (void *)(((uint8_t*)adev) + roffset); 442 unsigned r; 443 444 if (other != ring) 445 continue; 446 447 r = amdgpu_debugfs_add_files(adev, info, 1); 448 if (r) 449 return r; 450 } 451 #endif 452 return 0; 453 } 454