1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 * Christian König 28 */ 29 #include <linux/seq_file.h> 30 #include <linux/slab.h> 31 #include <drm/drmP.h> 32 #include <drm/amdgpu_drm.h> 33 #include "amdgpu.h" 34 #include "atom.h" 35 #include "amdgpu_trace.h" 36 37 #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000) 38 39 /* 40 * IB 41 * IBs (Indirect Buffers) and areas of GPU accessible memory where 42 * commands are stored. You can put a pointer to the IB in the 43 * command ring and the hw will fetch the commands from the IB 44 * and execute them. Generally userspace acceleration drivers 45 * produce command buffers which are send to the kernel and 46 * put in IBs for execution by the requested ring. 47 */ 48 static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev); 49 50 /** 51 * amdgpu_ib_get - request an IB (Indirect Buffer) 52 * 53 * @ring: ring index the IB is associated with 54 * @size: requested IB size 55 * @ib: IB object returned 56 * 57 * Request an IB (all asics). IBs are allocated using the 58 * suballocator. 59 * Returns 0 on success, error on failure. 60 */ 61 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, 62 unsigned size, struct amdgpu_ib *ib) 63 { 64 int r; 65 66 if (size) { 67 r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, 68 &ib->sa_bo, size, 256); 69 if (r) { 70 dev_err(adev->dev, "failed to get a new IB (%d)\n", r); 71 return r; 72 } 73 74 ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo); 75 76 if (!vm) 77 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); 78 } 79 80 return 0; 81 } 82 83 /** 84 * amdgpu_ib_free - free an IB (Indirect Buffer) 85 * 86 * @adev: amdgpu_device pointer 87 * @ib: IB object to free 88 * @f: the fence SA bo need wait on for the ib alloation 89 * 90 * Free an IB (all asics). 91 */ 92 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, 93 struct dma_fence *f) 94 { 95 amdgpu_sa_bo_free(adev, &ib->sa_bo, f); 96 } 97 98 /** 99 * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring 100 * 101 * @adev: amdgpu_device pointer 102 * @num_ibs: number of IBs to schedule 103 * @ibs: IB objects to schedule 104 * @f: fence created during this submission 105 * 106 * Schedule an IB on the associated ring (all asics). 107 * Returns 0 on success, error on failure. 108 * 109 * On SI, there are two parallel engines fed from the primary ring, 110 * the CE (Constant Engine) and the DE (Drawing Engine). Since 111 * resource descriptors have moved to memory, the CE allows you to 112 * prime the caches while the DE is updating register state so that 113 * the resource descriptors will be already in cache when the draw is 114 * processed. To accomplish this, the userspace driver submits two 115 * IBs, one for the CE and one for the DE. If there is a CE IB (called 116 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior 117 * to SI there was just a DE IB. 118 */ 119 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, 120 struct amdgpu_ib *ibs, struct amdgpu_job *job, 121 struct dma_fence **f) 122 { 123 struct amdgpu_device *adev = ring->adev; 124 struct amdgpu_ib *ib = &ibs[0]; 125 struct dma_fence *tmp = NULL; 126 bool skip_preamble, need_ctx_switch; 127 unsigned patch_offset = ~0; 128 struct amdgpu_vm *vm; 129 uint64_t fence_ctx; 130 uint32_t status = 0, alloc_size; 131 unsigned fence_flags = 0; 132 133 unsigned i; 134 int r = 0; 135 bool need_pipe_sync = false; 136 137 if (num_ibs == 0) 138 return -EINVAL; 139 140 /* ring tests don't use a job */ 141 if (job) { 142 vm = job->vm; 143 fence_ctx = job->base.s_fence->scheduled.context; 144 } else { 145 vm = NULL; 146 fence_ctx = 0; 147 } 148 149 if (!ring->sched.ready) { 150 dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name); 151 return -EINVAL; 152 } 153 154 if (vm && !job->vmid) { 155 dev_err(adev->dev, "VM IB without ID\n"); 156 return -EINVAL; 157 } 158 159 alloc_size = ring->funcs->emit_frame_size + num_ibs * 160 ring->funcs->emit_ib_size; 161 162 r = amdgpu_ring_alloc(ring, alloc_size); 163 if (r) { 164 dev_err(adev->dev, "scheduling IB failed (%d).\n", r); 165 return r; 166 } 167 168 need_ctx_switch = ring->current_ctx != fence_ctx; 169 if (ring->funcs->emit_pipeline_sync && job && 170 ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || 171 (amdgpu_sriov_vf(adev) && need_ctx_switch) || 172 amdgpu_vm_need_pipeline_sync(ring, job))) { 173 need_pipe_sync = true; 174 175 if (tmp) 176 trace_amdgpu_ib_pipe_sync(job, tmp); 177 178 dma_fence_put(tmp); 179 } 180 181 if (ring->funcs->insert_start) 182 ring->funcs->insert_start(ring); 183 184 if (job) { 185 r = amdgpu_vm_flush(ring, job, need_pipe_sync); 186 if (r) { 187 amdgpu_ring_undo(ring); 188 return r; 189 } 190 } 191 192 if (job && ring->funcs->init_cond_exec) 193 patch_offset = amdgpu_ring_init_cond_exec(ring); 194 195 #ifdef CONFIG_X86_64 196 if (!(adev->flags & AMD_IS_APU)) 197 #endif 198 { 199 if (ring->funcs->emit_hdp_flush) 200 amdgpu_ring_emit_hdp_flush(ring); 201 else 202 amdgpu_asic_flush_hdp(adev, ring); 203 } 204 205 skip_preamble = ring->current_ctx == fence_ctx; 206 if (job && ring->funcs->emit_cntxcntl) { 207 if (need_ctx_switch) 208 status |= AMDGPU_HAVE_CTX_SWITCH; 209 status |= job->preamble_status; 210 211 amdgpu_ring_emit_cntxcntl(ring, status); 212 } 213 214 for (i = 0; i < num_ibs; ++i) { 215 ib = &ibs[i]; 216 217 /* drop preamble IBs if we don't have a context switch */ 218 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && 219 skip_preamble && 220 !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) && 221 !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */ 222 continue; 223 224 amdgpu_ring_emit_ib(ring, job, ib, need_ctx_switch); 225 need_ctx_switch = false; 226 } 227 228 if (ring->funcs->emit_tmz) 229 amdgpu_ring_emit_tmz(ring, false); 230 231 #ifdef CONFIG_X86_64 232 if (!(adev->flags & AMD_IS_APU)) 233 #endif 234 amdgpu_asic_invalidate_hdp(adev, ring); 235 236 if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE) 237 fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY; 238 239 /* wrap the last IB with fence */ 240 if (job && job->uf_addr) { 241 amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, 242 fence_flags | AMDGPU_FENCE_FLAG_64BIT); 243 } 244 245 r = amdgpu_fence_emit(ring, f, fence_flags); 246 if (r) { 247 dev_err(adev->dev, "failed to emit fence (%d)\n", r); 248 if (job && job->vmid) 249 amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid); 250 amdgpu_ring_undo(ring); 251 return r; 252 } 253 254 if (ring->funcs->insert_end) 255 ring->funcs->insert_end(ring); 256 257 if (patch_offset != ~0 && ring->funcs->patch_cond_exec) 258 amdgpu_ring_patch_cond_exec(ring, patch_offset); 259 260 ring->current_ctx = fence_ctx; 261 if (vm && ring->funcs->emit_switch_buffer) 262 amdgpu_ring_emit_switch_buffer(ring); 263 amdgpu_ring_commit(ring); 264 return 0; 265 } 266 267 /** 268 * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool 269 * 270 * @adev: amdgpu_device pointer 271 * 272 * Initialize the suballocator to manage a pool of memory 273 * for use as IBs (all asics). 274 * Returns 0 on success, error on failure. 275 */ 276 int amdgpu_ib_pool_init(struct amdgpu_device *adev) 277 { 278 int r; 279 280 if (adev->ib_pool_ready) { 281 return 0; 282 } 283 r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo, 284 AMDGPU_IB_POOL_SIZE*64*1024, 285 AMDGPU_GPU_PAGE_SIZE, 286 AMDGPU_GEM_DOMAIN_GTT); 287 if (r) { 288 return r; 289 } 290 291 adev->ib_pool_ready = true; 292 if (amdgpu_debugfs_sa_init(adev)) { 293 dev_err(adev->dev, "failed to register debugfs file for SA\n"); 294 } 295 return 0; 296 } 297 298 /** 299 * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool 300 * 301 * @adev: amdgpu_device pointer 302 * 303 * Tear down the suballocator managing the pool of memory 304 * for use as IBs (all asics). 305 */ 306 void amdgpu_ib_pool_fini(struct amdgpu_device *adev) 307 { 308 if (adev->ib_pool_ready) { 309 amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo); 310 adev->ib_pool_ready = false; 311 } 312 } 313 314 /** 315 * amdgpu_ib_ring_tests - test IBs on the rings 316 * 317 * @adev: amdgpu_device pointer 318 * 319 * Test an IB (Indirect Buffer) on each ring. 320 * If the test fails, disable the ring. 321 * Returns 0 on success, error if the primary GFX ring 322 * IB test fails. 323 */ 324 int amdgpu_ib_ring_tests(struct amdgpu_device *adev) 325 { 326 unsigned i; 327 int r, ret = 0; 328 long tmo_gfx, tmo_mm; 329 330 tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT; 331 if (amdgpu_sriov_vf(adev)) { 332 /* for MM engines in hypervisor side they are not scheduled together 333 * with CP and SDMA engines, so even in exclusive mode MM engine could 334 * still running on other VF thus the IB TEST TIMEOUT for MM engines 335 * under SR-IOV should be set to a long time. 8 sec should be enough 336 * for the MM comes back to this VF. 337 */ 338 tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT; 339 } 340 341 if (amdgpu_sriov_runtime(adev)) { 342 /* for CP & SDMA engines since they are scheduled together so 343 * need to make the timeout width enough to cover the time 344 * cost waiting for it coming back under RUNTIME only 345 */ 346 tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT; 347 } 348 349 for (i = 0; i < adev->num_rings; ++i) { 350 struct amdgpu_ring *ring = adev->rings[i]; 351 long tmo; 352 353 /* KIQ rings don't have an IB test because we never submit IBs 354 * to them and they have no interrupt support. 355 */ 356 if (!ring->sched.ready || !ring->funcs->test_ib) 357 continue; 358 359 /* MM engine need more time */ 360 if (ring->funcs->type == AMDGPU_RING_TYPE_UVD || 361 ring->funcs->type == AMDGPU_RING_TYPE_VCE || 362 ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC || 363 ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC || 364 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC || 365 ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) 366 tmo = tmo_mm; 367 else 368 tmo = tmo_gfx; 369 370 r = amdgpu_ring_test_ib(ring, tmo); 371 if (!r) { 372 DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n", 373 ring->name); 374 continue; 375 } 376 377 ring->sched.ready = false; 378 DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n", 379 ring->name, r); 380 381 if (ring == &adev->gfx.gfx_ring[0]) { 382 /* oh, oh, that's really bad */ 383 adev->accel_working = false; 384 return r; 385 386 } else { 387 ret = r; 388 } 389 } 390 return ret; 391 } 392 393 /* 394 * Debugfs info 395 */ 396 #if defined(CONFIG_DEBUG_FS) 397 398 static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data) 399 { 400 struct drm_info_node *node = (struct drm_info_node *) m->private; 401 struct drm_device *dev = node->minor->dev; 402 struct amdgpu_device *adev = dev->dev_private; 403 404 amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m); 405 406 return 0; 407 408 } 409 410 static const struct drm_info_list amdgpu_debugfs_sa_list[] = { 411 {"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL}, 412 }; 413 414 #endif 415 416 static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev) 417 { 418 #if defined(CONFIG_DEBUG_FS) 419 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1); 420 #else 421 return 0; 422 #endif 423 } 424