1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 * Christian König 28 */ 29 #include <linux/seq_file.h> 30 #include <linux/slab.h> 31 #include <drm/drmP.h> 32 #include <drm/amdgpu_drm.h> 33 #include "amdgpu.h" 34 #include "atom.h" 35 36 #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000) 37 38 /* 39 * IB 40 * IBs (Indirect Buffers) and areas of GPU accessible memory where 41 * commands are stored. You can put a pointer to the IB in the 42 * command ring and the hw will fetch the commands from the IB 43 * and execute them. Generally userspace acceleration drivers 44 * produce command buffers which are send to the kernel and 45 * put in IBs for execution by the requested ring. 46 */ 47 static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev); 48 49 /** 50 * amdgpu_ib_get - request an IB (Indirect Buffer) 51 * 52 * @ring: ring index the IB is associated with 53 * @size: requested IB size 54 * @ib: IB object returned 55 * 56 * Request an IB (all asics). IBs are allocated using the 57 * suballocator. 58 * Returns 0 on success, error on failure. 59 */ 60 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, 61 unsigned size, struct amdgpu_ib *ib) 62 { 63 int r; 64 65 if (size) { 66 r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, 67 &ib->sa_bo, size, 256); 68 if (r) { 69 dev_err(adev->dev, "failed to get a new IB (%d)\n", r); 70 return r; 71 } 72 73 ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo); 74 75 if (!vm) 76 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); 77 } 78 79 return 0; 80 } 81 82 /** 83 * amdgpu_ib_free - free an IB (Indirect Buffer) 84 * 85 * @adev: amdgpu_device pointer 86 * @ib: IB object to free 87 * @f: the fence SA bo need wait on for the ib alloation 88 * 89 * Free an IB (all asics). 90 */ 91 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, 92 struct dma_fence *f) 93 { 94 amdgpu_sa_bo_free(adev, &ib->sa_bo, f); 95 } 96 97 /** 98 * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring 99 * 100 * @adev: amdgpu_device pointer 101 * @num_ibs: number of IBs to schedule 102 * @ibs: IB objects to schedule 103 * @f: fence created during this submission 104 * 105 * Schedule an IB on the associated ring (all asics). 106 * Returns 0 on success, error on failure. 107 * 108 * On SI, there are two parallel engines fed from the primary ring, 109 * the CE (Constant Engine) and the DE (Drawing Engine). Since 110 * resource descriptors have moved to memory, the CE allows you to 111 * prime the caches while the DE is updating register state so that 112 * the resource descriptors will be already in cache when the draw is 113 * processed. To accomplish this, the userspace driver submits two 114 * IBs, one for the CE and one for the DE. If there is a CE IB (called 115 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior 116 * to SI there was just a DE IB. 117 */ 118 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, 119 struct amdgpu_ib *ibs, struct dma_fence *last_vm_update, 120 struct amdgpu_job *job, struct dma_fence **f) 121 { 122 struct amdgpu_device *adev = ring->adev; 123 struct amdgpu_ib *ib = &ibs[0]; 124 bool skip_preamble, need_ctx_switch; 125 unsigned patch_offset = ~0; 126 struct amdgpu_vm *vm; 127 uint64_t fence_ctx; 128 uint32_t status = 0, alloc_size; 129 130 unsigned i; 131 int r = 0; 132 133 if (num_ibs == 0) 134 return -EINVAL; 135 136 /* ring tests don't use a job */ 137 if (job) { 138 vm = job->vm; 139 fence_ctx = job->fence_ctx; 140 } else { 141 vm = NULL; 142 fence_ctx = 0; 143 } 144 145 if (!ring->ready) { 146 dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name); 147 return -EINVAL; 148 } 149 150 if (vm && !job->vm_id) { 151 dev_err(adev->dev, "VM IB without ID\n"); 152 return -EINVAL; 153 } 154 155 alloc_size = ring->funcs->emit_frame_size + num_ibs * 156 ring->funcs->emit_ib_size; 157 158 r = amdgpu_ring_alloc(ring, alloc_size); 159 if (r) { 160 dev_err(adev->dev, "scheduling IB failed (%d).\n", r); 161 return r; 162 } 163 164 if (ring->funcs->init_cond_exec) 165 patch_offset = amdgpu_ring_init_cond_exec(ring); 166 167 if (vm) { 168 r = amdgpu_vm_flush(ring, job); 169 if (r) { 170 amdgpu_ring_undo(ring); 171 return r; 172 } 173 } 174 175 if (ring->funcs->emit_hdp_flush) 176 amdgpu_ring_emit_hdp_flush(ring); 177 178 /* always set cond_exec_polling to CONTINUE */ 179 *ring->cond_exe_cpu_addr = 1; 180 181 skip_preamble = ring->current_ctx == fence_ctx; 182 need_ctx_switch = ring->current_ctx != fence_ctx; 183 if (job && ring->funcs->emit_cntxcntl) { 184 if (need_ctx_switch) 185 status |= AMDGPU_HAVE_CTX_SWITCH; 186 status |= job->preamble_status; 187 amdgpu_ring_emit_cntxcntl(ring, status); 188 } 189 190 for (i = 0; i < num_ibs; ++i) { 191 ib = &ibs[i]; 192 193 /* drop preamble IBs if we don't have a context switch */ 194 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && 195 skip_preamble && 196 !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST)) 197 continue; 198 199 amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0, 200 need_ctx_switch); 201 need_ctx_switch = false; 202 } 203 204 if (ring->funcs->emit_hdp_invalidate) 205 amdgpu_ring_emit_hdp_invalidate(ring); 206 207 r = amdgpu_fence_emit(ring, f); 208 if (r) { 209 dev_err(adev->dev, "failed to emit fence (%d)\n", r); 210 if (job && job->vm_id) 211 amdgpu_vm_reset_id(adev, job->vm_id); 212 amdgpu_ring_undo(ring); 213 return r; 214 } 215 216 /* wrap the last IB with fence */ 217 if (job && job->uf_addr) { 218 amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, 219 AMDGPU_FENCE_FLAG_64BIT); 220 } 221 222 if (patch_offset != ~0 && ring->funcs->patch_cond_exec) 223 amdgpu_ring_patch_cond_exec(ring, patch_offset); 224 225 ring->current_ctx = fence_ctx; 226 if (ring->funcs->emit_switch_buffer) 227 amdgpu_ring_emit_switch_buffer(ring); 228 amdgpu_ring_commit(ring); 229 return 0; 230 } 231 232 /** 233 * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool 234 * 235 * @adev: amdgpu_device pointer 236 * 237 * Initialize the suballocator to manage a pool of memory 238 * for use as IBs (all asics). 239 * Returns 0 on success, error on failure. 240 */ 241 int amdgpu_ib_pool_init(struct amdgpu_device *adev) 242 { 243 int r; 244 245 if (adev->ib_pool_ready) { 246 return 0; 247 } 248 r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo, 249 AMDGPU_IB_POOL_SIZE*64*1024, 250 AMDGPU_GPU_PAGE_SIZE, 251 AMDGPU_GEM_DOMAIN_GTT); 252 if (r) { 253 return r; 254 } 255 256 r = amdgpu_sa_bo_manager_start(adev, &adev->ring_tmp_bo); 257 if (r) { 258 return r; 259 } 260 261 adev->ib_pool_ready = true; 262 if (amdgpu_debugfs_sa_init(adev)) { 263 dev_err(adev->dev, "failed to register debugfs file for SA\n"); 264 } 265 return 0; 266 } 267 268 /** 269 * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool 270 * 271 * @adev: amdgpu_device pointer 272 * 273 * Tear down the suballocator managing the pool of memory 274 * for use as IBs (all asics). 275 */ 276 void amdgpu_ib_pool_fini(struct amdgpu_device *adev) 277 { 278 if (adev->ib_pool_ready) { 279 amdgpu_sa_bo_manager_suspend(adev, &adev->ring_tmp_bo); 280 amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo); 281 adev->ib_pool_ready = false; 282 } 283 } 284 285 /** 286 * amdgpu_ib_ring_tests - test IBs on the rings 287 * 288 * @adev: amdgpu_device pointer 289 * 290 * Test an IB (Indirect Buffer) on each ring. 291 * If the test fails, disable the ring. 292 * Returns 0 on success, error if the primary GFX ring 293 * IB test fails. 294 */ 295 int amdgpu_ib_ring_tests(struct amdgpu_device *adev) 296 { 297 unsigned i; 298 int r, ret = 0; 299 300 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 301 struct amdgpu_ring *ring = adev->rings[i]; 302 303 if (!ring || !ring->ready) 304 continue; 305 306 r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT); 307 if (r) { 308 ring->ready = false; 309 310 if (ring == &adev->gfx.gfx_ring[0]) { 311 /* oh, oh, that's really bad */ 312 DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r); 313 adev->accel_working = false; 314 return r; 315 316 } else { 317 /* still not good, but we can live with it */ 318 DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r); 319 ret = r; 320 } 321 } 322 } 323 return ret; 324 } 325 326 /* 327 * Debugfs info 328 */ 329 #if defined(CONFIG_DEBUG_FS) 330 331 static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data) 332 { 333 struct drm_info_node *node = (struct drm_info_node *) m->private; 334 struct drm_device *dev = node->minor->dev; 335 struct amdgpu_device *adev = dev->dev_private; 336 337 amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m); 338 339 return 0; 340 341 } 342 343 static const struct drm_info_list amdgpu_debugfs_sa_list[] = { 344 {"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL}, 345 }; 346 347 #endif 348 349 static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev) 350 { 351 #if defined(CONFIG_DEBUG_FS) 352 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1); 353 #else 354 return 0; 355 #endif 356 } 357