1d38ceaf9SAlex Deucher /* 2d38ceaf9SAlex Deucher * Copyright 2008 Advanced Micro Devices, Inc. 3d38ceaf9SAlex Deucher * Copyright 2008 Red Hat Inc. 4d38ceaf9SAlex Deucher * Copyright 2009 Jerome Glisse. 5d38ceaf9SAlex Deucher * 6d38ceaf9SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a 7d38ceaf9SAlex Deucher * copy of this software and associated documentation files (the "Software"), 8d38ceaf9SAlex Deucher * to deal in the Software without restriction, including without limitation 9d38ceaf9SAlex Deucher * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10d38ceaf9SAlex Deucher * and/or sell copies of the Software, and to permit persons to whom the 11d38ceaf9SAlex Deucher * Software is furnished to do so, subject to the following conditions: 12d38ceaf9SAlex Deucher * 13d38ceaf9SAlex Deucher * The above copyright notice and this permission notice shall be included in 14d38ceaf9SAlex Deucher * all copies or substantial portions of the Software. 15d38ceaf9SAlex Deucher * 16d38ceaf9SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17d38ceaf9SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18d38ceaf9SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19d38ceaf9SAlex Deucher * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20d38ceaf9SAlex Deucher * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21d38ceaf9SAlex Deucher * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22d38ceaf9SAlex Deucher * OTHER DEALINGS IN THE SOFTWARE. 23d38ceaf9SAlex Deucher * 24d38ceaf9SAlex Deucher * Authors: Dave Airlie 25d38ceaf9SAlex Deucher * Alex Deucher 26d38ceaf9SAlex Deucher * Jerome Glisse 27d38ceaf9SAlex Deucher * Christian König 28d38ceaf9SAlex Deucher */ 29d38ceaf9SAlex Deucher #include <linux/seq_file.h> 30d38ceaf9SAlex Deucher #include <linux/slab.h> 31fdf2f6c5SSam Ravnborg 32d38ceaf9SAlex Deucher #include <drm/amdgpu_drm.h> 33fdf2f6c5SSam Ravnborg 34d38ceaf9SAlex Deucher #include "amdgpu.h" 35d38ceaf9SAlex Deucher #include "atom.h" 3665f7260bSAndrey Grodzovsky #include "amdgpu_trace.h" 37d38ceaf9SAlex Deucher 38bb7ad55bSChunming Zhou #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000) 39d4162c61Sshaoyunl #define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT msecs_to_jiffies(2000) 40bbec97aaSChristian König 41d38ceaf9SAlex Deucher /* 42d38ceaf9SAlex Deucher * IB 43d38ceaf9SAlex Deucher * IBs (Indirect Buffers) and areas of GPU accessible memory where 44d38ceaf9SAlex Deucher * commands are stored. You can put a pointer to the IB in the 45d38ceaf9SAlex Deucher * command ring and the hw will fetch the commands from the IB 46d38ceaf9SAlex Deucher * and execute them. Generally userspace acceleration drivers 47d38ceaf9SAlex Deucher * produce command buffers which are send to the kernel and 48d38ceaf9SAlex Deucher * put in IBs for execution by the requested ring. 49d38ceaf9SAlex Deucher */ 50d38ceaf9SAlex Deucher 51d38ceaf9SAlex Deucher /** 52d38ceaf9SAlex Deucher * amdgpu_ib_get - request an IB (Indirect Buffer) 53d38ceaf9SAlex Deucher * 54ad8eb024SLee Jones * @adev: amdgpu_device pointer 55ad8eb024SLee Jones * @vm: amdgpu_vm pointer 56d38ceaf9SAlex Deucher * @size: requested IB size 57ad8eb024SLee Jones * @pool_type: IB pool type (delayed, immediate, direct) 58d38ceaf9SAlex Deucher * @ib: IB object returned 59d38ceaf9SAlex Deucher * 60d38ceaf9SAlex Deucher * Request an IB (all asics). IBs are allocated using the 61d38ceaf9SAlex Deucher * suballocator. 62d38ceaf9SAlex Deucher * Returns 0 on success, error on failure. 63d38ceaf9SAlex Deucher */ 64b07c60c0SChristian König int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, 659ecefb19SChristian König unsigned size, enum amdgpu_ib_pool_type pool_type, 66c8e42d57Sxinhui pan struct amdgpu_ib *ib) 67d38ceaf9SAlex Deucher { 68d38ceaf9SAlex Deucher int r; 69d38ceaf9SAlex Deucher 70d38ceaf9SAlex Deucher if (size) { 719ecefb19SChristian König r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type], 72c103a23fSMaarten Lankhorst &ib->sa_bo, size); 73d38ceaf9SAlex Deucher if (r) { 74d38ceaf9SAlex Deucher dev_err(adev->dev, "failed to get a new IB (%d)\n", r); 75d38ceaf9SAlex Deucher return r; 76d38ceaf9SAlex Deucher } 77d38ceaf9SAlex Deucher 78d38ceaf9SAlex Deucher ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo); 795c88e3b8SJinzhou Su /* flush the cache before commit the IB */ 805c88e3b8SJinzhou Su ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC; 81d38ceaf9SAlex Deucher 82d38ceaf9SAlex Deucher if (!vm) 83d38ceaf9SAlex Deucher ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); 84d38ceaf9SAlex Deucher } 85d38ceaf9SAlex Deucher 86d38ceaf9SAlex Deucher return 0; 87d38ceaf9SAlex Deucher } 88d38ceaf9SAlex Deucher 89d38ceaf9SAlex Deucher /** 90d38ceaf9SAlex Deucher * amdgpu_ib_free - free an IB (Indirect Buffer) 91d38ceaf9SAlex Deucher * 92d38ceaf9SAlex Deucher * @adev: amdgpu_device pointer 93d38ceaf9SAlex Deucher * @ib: IB object to free 94cc55c45dSMonk Liu * @f: the fence SA bo need wait on for the ib alloation 95d38ceaf9SAlex Deucher * 96d38ceaf9SAlex Deucher * Free an IB (all asics). 97d38ceaf9SAlex Deucher */ 984d9c514dSChristian König void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, 99f54d1867SChris Wilson struct dma_fence *f) 100d38ceaf9SAlex Deucher { 101cc55c45dSMonk Liu amdgpu_sa_bo_free(adev, &ib->sa_bo, f); 102d38ceaf9SAlex Deucher } 103d38ceaf9SAlex Deucher 104d38ceaf9SAlex Deucher /** 105d38ceaf9SAlex Deucher * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring 106d38ceaf9SAlex Deucher * 107ad8eb024SLee Jones * @ring: ring index the IB is associated with 108d38ceaf9SAlex Deucher * @num_ibs: number of IBs to schedule 109d38ceaf9SAlex Deucher * @ibs: IB objects to schedule 1107ad0c80cSLee Jones * @job: job to schedule 111ec72b800SChristian König * @f: fence created during this submission 112d38ceaf9SAlex Deucher * 113d38ceaf9SAlex Deucher * Schedule an IB on the associated ring (all asics). 114d38ceaf9SAlex Deucher * Returns 0 on success, error on failure. 115d38ceaf9SAlex Deucher * 116d38ceaf9SAlex Deucher * On SI, there are two parallel engines fed from the primary ring, 117d38ceaf9SAlex Deucher * the CE (Constant Engine) and the DE (Drawing Engine). Since 118d38ceaf9SAlex Deucher * resource descriptors have moved to memory, the CE allows you to 119d38ceaf9SAlex Deucher * prime the caches while the DE is updating register state so that 120d38ceaf9SAlex Deucher * the resource descriptors will be already in cache when the draw is 121d38ceaf9SAlex Deucher * processed. To accomplish this, the userspace driver submits two 122d38ceaf9SAlex Deucher * IBs, one for the CE and one for the DE. If there is a CE IB (called 123d38ceaf9SAlex Deucher * a CONST_IB), it will be put on the ring prior to the DE IB. Prior 124d38ceaf9SAlex Deucher * to SI there was just a DE IB. 125d38ceaf9SAlex Deucher */ 126b07c60c0SChristian König int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, 12750ddc75eSJunwei Zhang struct amdgpu_ib *ibs, struct amdgpu_job *job, 12850ddc75eSJunwei Zhang struct dma_fence **f) 129d38ceaf9SAlex Deucher { 130b07c60c0SChristian König struct amdgpu_device *adev = ring->adev; 131d38ceaf9SAlex Deucher struct amdgpu_ib *ib = &ibs[0]; 132b9bf33d5SChunming Zhou struct dma_fence *tmp = NULL; 1337d9c70d2SJiansong Chen bool need_ctx_switch; 13492f25098SChristian König unsigned patch_offset = ~0; 13592f25098SChristian König struct amdgpu_vm *vm; 1363aecd24cSMonk Liu uint64_t fence_ctx; 1379a9db6efSAlex Deucher uint32_t status = 0, alloc_size; 138d240cd9eSMarek Olšák unsigned fence_flags = 0; 139ac928705SChristian König bool secure, init_shadow; 140ac928705SChristian König u64 shadow_va, csa_va, gds_va; 141ac928705SChristian König int vmid = AMDGPU_JOB_GET_VMID(job); 14203ccf481SMonk Liu 14392f25098SChristian König unsigned i; 144d38ceaf9SAlex Deucher int r = 0; 1458fdf074fSMonk Liu bool need_pipe_sync = false; 146d38ceaf9SAlex Deucher 147d38ceaf9SAlex Deucher if (num_ibs == 0) 148d38ceaf9SAlex Deucher return -EINVAL; 149d38ceaf9SAlex Deucher 15092f25098SChristian König /* ring tests don't use a job */ 15192f25098SChristian König if (job) { 152c5637837SMonk Liu vm = job->vm; 153dcafbd50SFelix Kuehling fence_ctx = job->base.s_fence ? 154dcafbd50SFelix Kuehling job->base.s_fence->scheduled.context : 0; 155ac928705SChristian König shadow_va = job->shadow_va; 156ac928705SChristian König csa_va = job->csa_va; 157ac928705SChristian König gds_va = job->gds_va; 158ac928705SChristian König init_shadow = job->init_shadow; 15992f25098SChristian König } else { 16092f25098SChristian König vm = NULL; 1613aecd24cSMonk Liu fence_ctx = 0; 162ac928705SChristian König shadow_va = 0; 163ac928705SChristian König csa_va = 0; 164ac928705SChristian König gds_va = 0; 165ac928705SChristian König init_shadow = false; 16692f25098SChristian König } 167d919ad49SChristian König 168f89703f5SJack Xiao if (!ring->sched.ready && !ring->is_mes_queue) { 1691b583649STom St Denis dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name); 170d38ceaf9SAlex Deucher return -EINVAL; 171d38ceaf9SAlex Deucher } 172be86c606SChunming Zhou 173f89703f5SJack Xiao if (vm && !job->vmid && !ring->is_mes_queue) { 1748d0a7ceaSChristian König dev_err(adev->dev, "VM IB without ID\n"); 1758d0a7ceaSChristian König return -EINVAL; 1768d0a7ceaSChristian König } 1778d0a7ceaSChristian König 178b33f9d70SAlex Deucher if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) && 1798c0f11ffSLang Yu (!ring->funcs->secure_submission_supported)) { 1808c0f11ffSLang Yu dev_err(adev->dev, "secure submissions not supported on ring <%s>\n", ring->name); 181b33f9d70SAlex Deucher return -EINVAL; 182b33f9d70SAlex Deucher } 183b33f9d70SAlex Deucher 184e12f3d7aSChristian König alloc_size = ring->funcs->emit_frame_size + num_ibs * 185e12f3d7aSChristian König ring->funcs->emit_ib_size; 1869a9db6efSAlex Deucher 1879a9db6efSAlex Deucher r = amdgpu_ring_alloc(ring, alloc_size); 188d38ceaf9SAlex Deucher if (r) { 189d38ceaf9SAlex Deucher dev_err(adev->dev, "scheduling IB failed (%d).\n", r); 190d38ceaf9SAlex Deucher return r; 191d38ceaf9SAlex Deucher } 192df83d1ebSChunming Zhou 1934f0ecd36SEmily Deng need_ctx_switch = ring->current_ctx != fence_ctx; 194df83d1ebSChunming Zhou if (ring->funcs->emit_pipeline_sync && job && 1951b2d5edaSChristian König ((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) || 1964f0ecd36SEmily Deng (amdgpu_sriov_vf(adev) && need_ctx_switch) || 197b9bf33d5SChunming Zhou amdgpu_vm_need_pipeline_sync(ring, job))) { 1988fdf074fSMonk Liu need_pipe_sync = true; 19965f7260bSAndrey Grodzovsky 20065f7260bSAndrey Grodzovsky if (tmp) 20165f7260bSAndrey Grodzovsky trace_amdgpu_ib_pipe_sync(job, tmp); 20265f7260bSAndrey Grodzovsky 203df83d1ebSChunming Zhou dma_fence_put(tmp); 204df83d1ebSChunming Zhou } 205d38ceaf9SAlex Deucher 20643c8546bSAndrey Grodzovsky if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync) 20743c8546bSAndrey Grodzovsky ring->funcs->emit_mem_sync(ring); 20843c8546bSAndrey Grodzovsky 20922e4f315SNirmoy Das if (ring->funcs->emit_wave_limit && 21022e4f315SNirmoy Das ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH) 21122e4f315SNirmoy Das ring->funcs->emit_wave_limit(ring, true); 21222e4f315SNirmoy Das 213ef44f854SLeo Liu if (ring->funcs->insert_start) 214ef44f854SLeo Liu ring->funcs->insert_start(ring); 215ef44f854SLeo Liu 216df264f9eSChristian König if (job) { 2178fdf074fSMonk Liu r = amdgpu_vm_flush(ring, job, need_pipe_sync); 21841d9eb2cSChristian König if (r) { 21941d9eb2cSChristian König amdgpu_ring_undo(ring); 22041d9eb2cSChristian König return r; 22141d9eb2cSChristian König } 222794ff571SMonk Liu } 223d38ceaf9SAlex Deucher 2243f4c175dSJiadong.Zhu amdgpu_ring_ib_begin(ring); 225ac928705SChristian König 22638be7796SAlex Deucher if (ring->funcs->emit_gfx_shadow) 227ac928705SChristian König amdgpu_ring_emit_gfx_shadow(ring, shadow_va, csa_va, gds_va, 228ac928705SChristian König init_shadow, vmid); 229ac928705SChristian König 23038be7796SAlex Deucher if (ring->funcs->init_cond_exec) 231e9d672b2SMonk Liu patch_offset = amdgpu_ring_init_cond_exec(ring); 232e9d672b2SMonk Liu 233810085ddSEric Huang amdgpu_device_flush_hdp(adev, ring); 234d2edb07bSChristian König 235753ad49cSMonk Liu if (need_ctx_switch) 236753ad49cSMonk Liu status |= AMDGPU_HAVE_CTX_SWITCH; 2377e6bf80fSMonk Liu 238c4c905ecSJack Xiao if (job && ring->funcs->emit_cntxcntl) { 239c4c905ecSJack Xiao status |= job->preamble_status; 240d8780dc7SJack Xiao status |= job->preemption_status; 2410bb5d5b0SLuben Tuikov amdgpu_ring_emit_cntxcntl(ring, status); 242753ad49cSMonk Liu } 243753ad49cSMonk Liu 244f77c9affSHuang Rui /* Setup initial TMZiness and send it off. 245f77c9affSHuang Rui */ 2460bb5d5b0SLuben Tuikov secure = false; 247f77c9affSHuang Rui if (job && ring->funcs->emit_frame_cntl) { 248f77c9affSHuang Rui secure = ib->flags & AMDGPU_IB_FLAGS_SECURE; 249f77c9affSHuang Rui amdgpu_ring_emit_frame_cntl(ring, true, secure); 250f77c9affSHuang Rui } 251f77c9affSHuang Rui 252d38ceaf9SAlex Deucher for (i = 0; i < num_ibs; ++i) { 253f153d286SChristian König ib = &ibs[i]; 2549f8fb5a2SChristian König 255f77c9affSHuang Rui if (job && ring->funcs->emit_frame_cntl) { 256f77c9affSHuang Rui if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) { 257f77c9affSHuang Rui amdgpu_ring_emit_frame_cntl(ring, false, secure); 258f77c9affSHuang Rui secure = !secure; 259f77c9affSHuang Rui amdgpu_ring_emit_frame_cntl(ring, true, secure); 2600bb5d5b0SLuben Tuikov } 2610bb5d5b0SLuben Tuikov } 2620bb5d5b0SLuben Tuikov 263c4c905ecSJack Xiao amdgpu_ring_emit_ib(ring, job, ib, status); 264c4c905ecSJack Xiao status &= ~AMDGPU_HAVE_CTX_SWITCH; 265d38ceaf9SAlex Deucher } 266d38ceaf9SAlex Deucher 267f77c9affSHuang Rui if (job && ring->funcs->emit_frame_cntl) 268f77c9affSHuang Rui amdgpu_ring_emit_frame_cntl(ring, false, secure); 2693b4d68e9SMonk Liu 270810085ddSEric Huang amdgpu_device_invalidate_hdp(adev, ring); 27111afbde8SChunming Zhou 272d240cd9eSMarek Olšák if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE) 273d240cd9eSMarek Olšák fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY; 274d240cd9eSMarek Olšák 2759fc15f5fSNicolai Hähnle /* wrap the last IB with fence */ 2769fc15f5fSNicolai Hähnle if (job && job->uf_addr) { 2779fc15f5fSNicolai Hähnle amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, 2789fc15f5fSNicolai Hähnle fence_flags | AMDGPU_FENCE_FLAG_64BIT); 2799fc15f5fSNicolai Hähnle } 2809fc15f5fSNicolai Hähnle 281ac928705SChristian König if (ring->funcs->emit_gfx_shadow) { 282ac928705SChristian König amdgpu_ring_emit_gfx_shadow(ring, 0, 0, 0, false, 0); 283ac928705SChristian König 284ac928705SChristian König if (ring->funcs->init_cond_exec) { 285ac928705SChristian König unsigned ce_offset = ~0; 286ac928705SChristian König 287ac928705SChristian König ce_offset = amdgpu_ring_init_cond_exec(ring); 288ac928705SChristian König if (ce_offset != ~0 && ring->funcs->patch_cond_exec) 289ac928705SChristian König amdgpu_ring_patch_cond_exec(ring, ce_offset); 290ac928705SChristian König } 291ac928705SChristian König } 292ac928705SChristian König 293c530b02fSJack Zhang r = amdgpu_fence_emit(ring, f, job, fence_flags); 294d38ceaf9SAlex Deucher if (r) { 295d38ceaf9SAlex Deucher dev_err(adev->dev, "failed to emit fence (%d)\n", r); 296c4f46f22SChristian König if (job && job->vmid) 2970530553bSLe Ma amdgpu_vmid_reset(adev, ring->vm_hub, job->vmid); 298a27de35cSChristian König amdgpu_ring_undo(ring); 299d38ceaf9SAlex Deucher return r; 300d38ceaf9SAlex Deucher } 301d38ceaf9SAlex Deucher 302135d4735SLeo Liu if (ring->funcs->insert_end) 303135d4735SLeo Liu ring->funcs->insert_end(ring); 304135d4735SLeo Liu 30503ccf481SMonk Liu if (patch_offset != ~0 && ring->funcs->patch_cond_exec) 30603ccf481SMonk Liu amdgpu_ring_patch_cond_exec(ring, patch_offset); 30703ccf481SMonk Liu 3083aecd24cSMonk Liu ring->current_ctx = fence_ctx; 309bc1e59b2SMonk Liu if (vm && ring->funcs->emit_switch_buffer) 310c2167a65SMonk Liu amdgpu_ring_emit_switch_buffer(ring); 31122e4f315SNirmoy Das 31222e4f315SNirmoy Das if (ring->funcs->emit_wave_limit && 31322e4f315SNirmoy Das ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH) 31422e4f315SNirmoy Das ring->funcs->emit_wave_limit(ring, false); 31522e4f315SNirmoy Das 3163f4c175dSJiadong.Zhu amdgpu_ring_ib_end(ring); 317a27de35cSChristian König amdgpu_ring_commit(ring); 318d38ceaf9SAlex Deucher return 0; 319d38ceaf9SAlex Deucher } 320d38ceaf9SAlex Deucher 321d38ceaf9SAlex Deucher /** 322d38ceaf9SAlex Deucher * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool 323d38ceaf9SAlex Deucher * 324d38ceaf9SAlex Deucher * @adev: amdgpu_device pointer 325d38ceaf9SAlex Deucher * 326d38ceaf9SAlex Deucher * Initialize the suballocator to manage a pool of memory 327d38ceaf9SAlex Deucher * for use as IBs (all asics). 328d38ceaf9SAlex Deucher * Returns 0 on success, error on failure. 329d38ceaf9SAlex Deucher */ 330d38ceaf9SAlex Deucher int amdgpu_ib_pool_init(struct amdgpu_device *adev) 331d38ceaf9SAlex Deucher { 3329ecefb19SChristian König int r, i; 333d38ceaf9SAlex Deucher 3349ecefb19SChristian König if (adev->ib_pool_ready) 335d38ceaf9SAlex Deucher return 0; 3369ecefb19SChristian König 337c8e42d57Sxinhui pan for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) { 3389ecefb19SChristian König r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i], 339c103a23fSMaarten Lankhorst AMDGPU_IB_POOL_SIZE, 256, 340d38ceaf9SAlex Deucher AMDGPU_GEM_DOMAIN_GTT); 3419ecefb19SChristian König if (r) 3429ecefb19SChristian König goto error; 343c8e42d57Sxinhui pan } 344d38ceaf9SAlex Deucher adev->ib_pool_ready = true; 34515997544SAlex Deucher 346d38ceaf9SAlex Deucher return 0; 3479ecefb19SChristian König 3489ecefb19SChristian König error: 3499ecefb19SChristian König while (i--) 3509ecefb19SChristian König amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]); 3519ecefb19SChristian König return r; 352d38ceaf9SAlex Deucher } 353d38ceaf9SAlex Deucher 354d38ceaf9SAlex Deucher /** 355d38ceaf9SAlex Deucher * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool 356d38ceaf9SAlex Deucher * 357d38ceaf9SAlex Deucher * @adev: amdgpu_device pointer 358d38ceaf9SAlex Deucher * 359d38ceaf9SAlex Deucher * Tear down the suballocator managing the pool of memory 360d38ceaf9SAlex Deucher * for use as IBs (all asics). 361d38ceaf9SAlex Deucher */ 362d38ceaf9SAlex Deucher void amdgpu_ib_pool_fini(struct amdgpu_device *adev) 363d38ceaf9SAlex Deucher { 364c8e42d57Sxinhui pan int i; 365c8e42d57Sxinhui pan 3669ecefb19SChristian König if (!adev->ib_pool_ready) 3679ecefb19SChristian König return; 3689ecefb19SChristian König 369c8e42d57Sxinhui pan for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) 3709ecefb19SChristian König amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]); 371d38ceaf9SAlex Deucher adev->ib_pool_ready = false; 372d38ceaf9SAlex Deucher } 373d38ceaf9SAlex Deucher 374d38ceaf9SAlex Deucher /** 375d38ceaf9SAlex Deucher * amdgpu_ib_ring_tests - test IBs on the rings 376d38ceaf9SAlex Deucher * 377d38ceaf9SAlex Deucher * @adev: amdgpu_device pointer 378d38ceaf9SAlex Deucher * 379d38ceaf9SAlex Deucher * Test an IB (Indirect Buffer) on each ring. 380d38ceaf9SAlex Deucher * If the test fails, disable the ring. 381d38ceaf9SAlex Deucher * Returns 0 on success, error if the primary GFX ring 382d38ceaf9SAlex Deucher * IB test fails. 383d38ceaf9SAlex Deucher */ 384d38ceaf9SAlex Deucher int amdgpu_ib_ring_tests(struct amdgpu_device *adev) 385d38ceaf9SAlex Deucher { 386dbf79765SMonk Liu long tmo_gfx, tmo_mm; 3879ecefb19SChristian König int r, ret = 0; 3889ecefb19SChristian König unsigned i; 389dbf79765SMonk Liu 390dbf79765SMonk Liu tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT; 391dbf79765SMonk Liu if (amdgpu_sriov_vf(adev)) { 392dbf79765SMonk Liu /* for MM engines in hypervisor side they are not scheduled together 393dbf79765SMonk Liu * with CP and SDMA engines, so even in exclusive mode MM engine could 394dbf79765SMonk Liu * still running on other VF thus the IB TEST TIMEOUT for MM engines 395dbf79765SMonk Liu * under SR-IOV should be set to a long time. 8 sec should be enough 396dbf79765SMonk Liu * for the MM comes back to this VF. 397dbf79765SMonk Liu */ 398dbf79765SMonk Liu tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT; 399dbf79765SMonk Liu } 400dbf79765SMonk Liu 401dbf79765SMonk Liu if (amdgpu_sriov_runtime(adev)) { 402dbf79765SMonk Liu /* for CP & SDMA engines since they are scheduled together so 403dbf79765SMonk Liu * need to make the timeout width enough to cover the time 404dbf79765SMonk Liu * cost waiting for it coming back under RUNTIME only 405dbf79765SMonk Liu */ 406dbf79765SMonk Liu tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT; 407d4162c61Sshaoyunl } else if (adev->gmc.xgmi.hive_id) { 408d4162c61Sshaoyunl tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT; 409dbf79765SMonk Liu } 410d38ceaf9SAlex Deucher 411af70a471SChristian König for (i = 0; i < adev->num_rings; ++i) { 412d38ceaf9SAlex Deucher struct amdgpu_ring *ring = adev->rings[i]; 413dbf79765SMonk Liu long tmo; 414d38ceaf9SAlex Deucher 415315fed03SChristian König /* KIQ rings don't have an IB test because we never submit IBs 416315fed03SChristian König * to them and they have no interrupt support. 417158b594aSPratik Vishwakarma */ 418315fed03SChristian König if (!ring->sched.ready || !ring->funcs->test_ib) 419158b594aSPratik Vishwakarma continue; 420158b594aSPratik Vishwakarma 4219d3bccdcSJack Xiao if (adev->enable_mes && 4229d3bccdcSJack Xiao ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 4239d3bccdcSJack Xiao continue; 4249d3bccdcSJack Xiao 425dbf79765SMonk Liu /* MM engine need more time */ 426dbf79765SMonk Liu if (ring->funcs->type == AMDGPU_RING_TYPE_UVD || 427dbf79765SMonk Liu ring->funcs->type == AMDGPU_RING_TYPE_VCE || 428dbf79765SMonk Liu ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC || 429dbf79765SMonk Liu ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC || 4305b2329b6SBoyuan Zhang ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC || 4315b2329b6SBoyuan Zhang ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) 432dbf79765SMonk Liu tmo = tmo_mm; 433dbf79765SMonk Liu else 434dbf79765SMonk Liu tmo = tmo_gfx; 435dbf79765SMonk Liu 436dbf79765SMonk Liu r = amdgpu_ring_test_ib(ring, tmo); 437af70a471SChristian König if (!r) { 438af70a471SChristian König DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n", 439af70a471SChristian König ring->name); 440af70a471SChristian König continue; 441af70a471SChristian König } 442af70a471SChristian König 443c66ed765SAndrey Grodzovsky ring->sched.ready = false; 444af70a471SChristian König DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n", 445af70a471SChristian König ring->name, r); 446d38ceaf9SAlex Deucher 447d38ceaf9SAlex Deucher if (ring == &adev->gfx.gfx_ring[0]) { 448d38ceaf9SAlex Deucher /* oh, oh, that's really bad */ 449d38ceaf9SAlex Deucher adev->accel_working = false; 450d38ceaf9SAlex Deucher return r; 451d38ceaf9SAlex Deucher 452d38ceaf9SAlex Deucher } else { 4531f703e66SChunming Zhou ret = r; 454d38ceaf9SAlex Deucher } 455d38ceaf9SAlex Deucher } 4561f703e66SChunming Zhou return ret; 457d38ceaf9SAlex Deucher } 458d38ceaf9SAlex Deucher 459d38ceaf9SAlex Deucher /* 460d38ceaf9SAlex Deucher * Debugfs info 461d38ceaf9SAlex Deucher */ 462d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS) 463d38ceaf9SAlex Deucher 46498d28ac2SNirmoy Das static int amdgpu_debugfs_sa_info_show(struct seq_file *m, void *unused) 465d38ceaf9SAlex Deucher { 466*109b4d8cSSu Hui struct amdgpu_device *adev = m->private; 467d38ceaf9SAlex Deucher 4689ecefb19SChristian König seq_printf(m, "--------------------- DELAYED --------------------- \n"); 4699ecefb19SChristian König amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED], 4709ecefb19SChristian König m); 4719ecefb19SChristian König seq_printf(m, "-------------------- IMMEDIATE -------------------- \n"); 4729ecefb19SChristian König amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE], 4739ecefb19SChristian König m); 4749ecefb19SChristian König seq_printf(m, "--------------------- DIRECT ---------------------- \n"); 4759ecefb19SChristian König amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m); 476d38ceaf9SAlex Deucher 477d38ceaf9SAlex Deucher return 0; 478d38ceaf9SAlex Deucher } 479d38ceaf9SAlex Deucher 48098d28ac2SNirmoy Das DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_sa_info); 481d38ceaf9SAlex Deucher 482d38ceaf9SAlex Deucher #endif 483d38ceaf9SAlex Deucher 48498d28ac2SNirmoy Das void amdgpu_debugfs_sa_init(struct amdgpu_device *adev) 485d38ceaf9SAlex Deucher { 486d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS) 48798d28ac2SNirmoy Das struct drm_minor *minor = adev_to_drm(adev)->primary; 48898d28ac2SNirmoy Das struct dentry *root = minor->debugfs_root; 48998d28ac2SNirmoy Das 49098d28ac2SNirmoy Das debugfs_create_file("amdgpu_sa_info", 0444, root, adev, 49198d28ac2SNirmoy Das &amdgpu_debugfs_sa_info_fops); 49298d28ac2SNirmoy Das 493d38ceaf9SAlex Deucher #endif 494d38ceaf9SAlex Deucher } 495