1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 * Christian König 28 */ 29 #include <linux/seq_file.h> 30 #include <linux/slab.h> 31 #include <drm/drmP.h> 32 #include <drm/amdgpu_drm.h> 33 #include "amdgpu.h" 34 #include "atom.h" 35 36 /* 37 * IB 38 * IBs (Indirect Buffers) and areas of GPU accessible memory where 39 * commands are stored. You can put a pointer to the IB in the 40 * command ring and the hw will fetch the commands from the IB 41 * and execute them. Generally userspace acceleration drivers 42 * produce command buffers which are send to the kernel and 43 * put in IBs for execution by the requested ring. 44 */ 45 static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev); 46 47 /** 48 * amdgpu_ib_get - request an IB (Indirect Buffer) 49 * 50 * @ring: ring index the IB is associated with 51 * @size: requested IB size 52 * @ib: IB object returned 53 * 54 * Request an IB (all asics). IBs are allocated using the 55 * suballocator. 56 * Returns 0 on success, error on failure. 57 */ 58 int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, 59 unsigned size, struct amdgpu_ib *ib) 60 { 61 struct amdgpu_device *adev = ring->adev; 62 int r; 63 64 if (size) { 65 r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, 66 &ib->sa_bo, size, 256); 67 if (r) { 68 dev_err(adev->dev, "failed to get a new IB (%d)\n", r); 69 return r; 70 } 71 72 ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo); 73 74 if (!vm) 75 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); 76 } 77 78 amdgpu_sync_create(&ib->sync); 79 80 ib->ring = ring; 81 ib->vm = vm; 82 83 return 0; 84 } 85 86 /** 87 * amdgpu_ib_free - free an IB (Indirect Buffer) 88 * 89 * @adev: amdgpu_device pointer 90 * @ib: IB object to free 91 * 92 * Free an IB (all asics). 93 */ 94 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib) 95 { 96 amdgpu_sync_free(adev, &ib->sync, &ib->fence->base); 97 amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base); 98 if (ib->fence) 99 fence_put(&ib->fence->base); 100 } 101 102 /** 103 * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring 104 * 105 * @adev: amdgpu_device pointer 106 * @num_ibs: number of IBs to schedule 107 * @ibs: IB objects to schedule 108 * @owner: owner for creating the fences 109 * 110 * Schedule an IB on the associated ring (all asics). 111 * Returns 0 on success, error on failure. 112 * 113 * On SI, there are two parallel engines fed from the primary ring, 114 * the CE (Constant Engine) and the DE (Drawing Engine). Since 115 * resource descriptors have moved to memory, the CE allows you to 116 * prime the caches while the DE is updating register state so that 117 * the resource descriptors will be already in cache when the draw is 118 * processed. To accomplish this, the userspace driver submits two 119 * IBs, one for the CE and one for the DE. If there is a CE IB (called 120 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior 121 * to SI there was just a DE IB. 122 */ 123 int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, 124 struct amdgpu_ib *ibs, void *owner) 125 { 126 struct amdgpu_ib *ib = &ibs[0]; 127 struct amdgpu_ring *ring; 128 struct amdgpu_ctx *ctx, *old_ctx; 129 struct amdgpu_vm *vm; 130 unsigned i; 131 int r = 0; 132 133 if (num_ibs == 0) 134 return -EINVAL; 135 136 ring = ibs->ring; 137 ctx = ibs->ctx; 138 vm = ibs->vm; 139 140 if (!ring->ready) { 141 dev_err(adev->dev, "couldn't schedule ib\n"); 142 return -EINVAL; 143 } 144 r = amdgpu_sync_wait(&ibs->sync); 145 if (r) { 146 dev_err(adev->dev, "IB sync failed (%d).\n", r); 147 return r; 148 } 149 r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs); 150 if (r) { 151 dev_err(adev->dev, "scheduling IB failed (%d).\n", r); 152 return r; 153 } 154 155 if (vm) { 156 /* grab a vm id if necessary */ 157 r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync); 158 if (r) { 159 amdgpu_ring_unlock_undo(ring); 160 return r; 161 } 162 } 163 164 r = amdgpu_sync_rings(&ibs->sync, ring); 165 if (r) { 166 amdgpu_ring_unlock_undo(ring); 167 dev_err(adev->dev, "failed to sync rings (%d)\n", r); 168 return r; 169 } 170 171 if (vm) { 172 /* do context switch */ 173 amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update); 174 175 if (ring->funcs->emit_gds_switch) 176 amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, 177 ib->gds_base, ib->gds_size, 178 ib->gws_base, ib->gws_size, 179 ib->oa_base, ib->oa_size); 180 181 if (ring->funcs->emit_hdp_flush) 182 amdgpu_ring_emit_hdp_flush(ring); 183 } 184 185 old_ctx = ring->current_ctx; 186 for (i = 0; i < num_ibs; ++i) { 187 ib = &ibs[i]; 188 189 if (ib->ring != ring || ib->ctx != ctx || ib->vm != vm) { 190 ring->current_ctx = old_ctx; 191 amdgpu_ring_unlock_undo(ring); 192 return -EINVAL; 193 } 194 amdgpu_ring_emit_ib(ring, ib); 195 ring->current_ctx = ctx; 196 } 197 198 r = amdgpu_fence_emit(ring, owner, &ib->fence); 199 if (r) { 200 dev_err(adev->dev, "failed to emit fence (%d)\n", r); 201 ring->current_ctx = old_ctx; 202 amdgpu_ring_unlock_undo(ring); 203 return r; 204 } 205 206 if (!amdgpu_enable_scheduler && ib->ctx) 207 ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring, 208 &ib->fence->base); 209 210 /* wrap the last IB with fence */ 211 if (ib->user) { 212 uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo); 213 addr += ib->user->offset; 214 amdgpu_ring_emit_fence(ring, addr, ib->sequence, 215 AMDGPU_FENCE_FLAG_64BIT); 216 } 217 218 if (ib->vm) 219 amdgpu_vm_fence(adev, ib->vm, &ib->fence->base); 220 221 amdgpu_ring_unlock_commit(ring); 222 return 0; 223 } 224 225 /** 226 * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool 227 * 228 * @adev: amdgpu_device pointer 229 * 230 * Initialize the suballocator to manage a pool of memory 231 * for use as IBs (all asics). 232 * Returns 0 on success, error on failure. 233 */ 234 int amdgpu_ib_pool_init(struct amdgpu_device *adev) 235 { 236 int r; 237 238 if (adev->ib_pool_ready) { 239 return 0; 240 } 241 r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo, 242 AMDGPU_IB_POOL_SIZE*64*1024, 243 AMDGPU_GPU_PAGE_SIZE, 244 AMDGPU_GEM_DOMAIN_GTT); 245 if (r) { 246 return r; 247 } 248 249 r = amdgpu_sa_bo_manager_start(adev, &adev->ring_tmp_bo); 250 if (r) { 251 return r; 252 } 253 254 adev->ib_pool_ready = true; 255 if (amdgpu_debugfs_sa_init(adev)) { 256 dev_err(adev->dev, "failed to register debugfs file for SA\n"); 257 } 258 return 0; 259 } 260 261 /** 262 * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool 263 * 264 * @adev: amdgpu_device pointer 265 * 266 * Tear down the suballocator managing the pool of memory 267 * for use as IBs (all asics). 268 */ 269 void amdgpu_ib_pool_fini(struct amdgpu_device *adev) 270 { 271 if (adev->ib_pool_ready) { 272 amdgpu_sa_bo_manager_suspend(adev, &adev->ring_tmp_bo); 273 amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo); 274 adev->ib_pool_ready = false; 275 } 276 } 277 278 /** 279 * amdgpu_ib_ring_tests - test IBs on the rings 280 * 281 * @adev: amdgpu_device pointer 282 * 283 * Test an IB (Indirect Buffer) on each ring. 284 * If the test fails, disable the ring. 285 * Returns 0 on success, error if the primary GFX ring 286 * IB test fails. 287 */ 288 int amdgpu_ib_ring_tests(struct amdgpu_device *adev) 289 { 290 unsigned i; 291 int r; 292 293 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 294 struct amdgpu_ring *ring = adev->rings[i]; 295 296 if (!ring || !ring->ready) 297 continue; 298 299 r = amdgpu_ring_test_ib(ring); 300 if (r) { 301 ring->ready = false; 302 303 if (ring == &adev->gfx.gfx_ring[0]) { 304 /* oh, oh, that's really bad */ 305 DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r); 306 adev->accel_working = false; 307 return r; 308 309 } else { 310 /* still not good, but we can live with it */ 311 DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r); 312 } 313 } 314 } 315 return 0; 316 } 317 318 /* 319 * Debugfs info 320 */ 321 #if defined(CONFIG_DEBUG_FS) 322 323 static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data) 324 { 325 struct drm_info_node *node = (struct drm_info_node *) m->private; 326 struct drm_device *dev = node->minor->dev; 327 struct amdgpu_device *adev = dev->dev_private; 328 329 amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m); 330 331 return 0; 332 333 } 334 335 static struct drm_info_list amdgpu_debugfs_sa_list[] = { 336 {"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL}, 337 }; 338 339 #endif 340 341 static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev) 342 { 343 #if defined(CONFIG_DEBUG_FS) 344 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1); 345 #else 346 return 0; 347 #endif 348 } 349