1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 * Christian König 28 */ 29 #include <linux/seq_file.h> 30 #include <linux/slab.h> 31 #include <drm/drmP.h> 32 #include <drm/amdgpu_drm.h> 33 #include "amdgpu.h" 34 #include "atom.h" 35 36 #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000) 37 38 /* 39 * IB 40 * IBs (Indirect Buffers) and areas of GPU accessible memory where 41 * commands are stored. You can put a pointer to the IB in the 42 * command ring and the hw will fetch the commands from the IB 43 * and execute them. Generally userspace acceleration drivers 44 * produce command buffers which are send to the kernel and 45 * put in IBs for execution by the requested ring. 46 */ 47 static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev); 48 49 /** 50 * amdgpu_ib_get - request an IB (Indirect Buffer) 51 * 52 * @ring: ring index the IB is associated with 53 * @size: requested IB size 54 * @ib: IB object returned 55 * 56 * Request an IB (all asics). IBs are allocated using the 57 * suballocator. 58 * Returns 0 on success, error on failure. 59 */ 60 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, 61 unsigned size, struct amdgpu_ib *ib) 62 { 63 int r; 64 65 if (size) { 66 r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, 67 &ib->sa_bo, size, 256); 68 if (r) { 69 dev_err(adev->dev, "failed to get a new IB (%d)\n", r); 70 return r; 71 } 72 73 ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo); 74 75 if (!vm) 76 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); 77 } 78 79 return 0; 80 } 81 82 /** 83 * amdgpu_ib_free - free an IB (Indirect Buffer) 84 * 85 * @adev: amdgpu_device pointer 86 * @ib: IB object to free 87 * @f: the fence SA bo need wait on for the ib alloation 88 * 89 * Free an IB (all asics). 90 */ 91 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, 92 struct dma_fence *f) 93 { 94 amdgpu_sa_bo_free(adev, &ib->sa_bo, f); 95 } 96 97 /** 98 * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring 99 * 100 * @adev: amdgpu_device pointer 101 * @num_ibs: number of IBs to schedule 102 * @ibs: IB objects to schedule 103 * @f: fence created during this submission 104 * 105 * Schedule an IB on the associated ring (all asics). 106 * Returns 0 on success, error on failure. 107 * 108 * On SI, there are two parallel engines fed from the primary ring, 109 * the CE (Constant Engine) and the DE (Drawing Engine). Since 110 * resource descriptors have moved to memory, the CE allows you to 111 * prime the caches while the DE is updating register state so that 112 * the resource descriptors will be already in cache when the draw is 113 * processed. To accomplish this, the userspace driver submits two 114 * IBs, one for the CE and one for the DE. If there is a CE IB (called 115 * a CONST_IB), it will be put on the ring prior to the DE IB. Prior 116 * to SI there was just a DE IB. 117 */ 118 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, 119 struct amdgpu_ib *ibs, struct amdgpu_job *job, 120 struct dma_fence **f) 121 { 122 struct amdgpu_device *adev = ring->adev; 123 struct amdgpu_ib *ib = &ibs[0]; 124 struct dma_fence *tmp = NULL; 125 bool skip_preamble, need_ctx_switch; 126 unsigned patch_offset = ~0; 127 struct amdgpu_vm *vm; 128 uint64_t fence_ctx; 129 uint32_t status = 0, alloc_size; 130 unsigned fence_flags = 0; 131 132 unsigned i; 133 int r = 0; 134 bool need_pipe_sync = false; 135 136 if (num_ibs == 0) 137 return -EINVAL; 138 139 /* ring tests don't use a job */ 140 if (job) { 141 vm = job->vm; 142 fence_ctx = job->base.s_fence->scheduled.context; 143 } else { 144 vm = NULL; 145 fence_ctx = 0; 146 } 147 148 if (!ring->ready) { 149 dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name); 150 return -EINVAL; 151 } 152 153 if (vm && !job->vmid) { 154 dev_err(adev->dev, "VM IB without ID\n"); 155 return -EINVAL; 156 } 157 158 alloc_size = ring->funcs->emit_frame_size + num_ibs * 159 ring->funcs->emit_ib_size; 160 161 r = amdgpu_ring_alloc(ring, alloc_size); 162 if (r) { 163 dev_err(adev->dev, "scheduling IB failed (%d).\n", r); 164 return r; 165 } 166 167 need_ctx_switch = ring->current_ctx != fence_ctx; 168 if (ring->funcs->emit_pipeline_sync && job && 169 ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || 170 (amdgpu_sriov_vf(adev) && need_ctx_switch) || 171 amdgpu_vm_need_pipeline_sync(ring, job))) { 172 need_pipe_sync = true; 173 dma_fence_put(tmp); 174 } 175 176 if (ring->funcs->insert_start) 177 ring->funcs->insert_start(ring); 178 179 if (job) { 180 r = amdgpu_vm_flush(ring, job, need_pipe_sync); 181 if (r) { 182 amdgpu_ring_undo(ring); 183 return r; 184 } 185 } 186 187 if (job && ring->funcs->init_cond_exec) 188 patch_offset = amdgpu_ring_init_cond_exec(ring); 189 190 #ifdef CONFIG_X86_64 191 if (!(adev->flags & AMD_IS_APU)) 192 #endif 193 { 194 if (ring->funcs->emit_hdp_flush) 195 amdgpu_ring_emit_hdp_flush(ring); 196 else 197 amdgpu_asic_flush_hdp(adev, ring); 198 } 199 200 skip_preamble = ring->current_ctx == fence_ctx; 201 if (job && ring->funcs->emit_cntxcntl) { 202 if (need_ctx_switch) 203 status |= AMDGPU_HAVE_CTX_SWITCH; 204 status |= job->preamble_status; 205 206 amdgpu_ring_emit_cntxcntl(ring, status); 207 } 208 209 for (i = 0; i < num_ibs; ++i) { 210 ib = &ibs[i]; 211 212 /* drop preamble IBs if we don't have a context switch */ 213 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && 214 skip_preamble && 215 !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) && 216 !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */ 217 continue; 218 219 amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0, 220 need_ctx_switch); 221 need_ctx_switch = false; 222 } 223 224 if (ring->funcs->emit_tmz) 225 amdgpu_ring_emit_tmz(ring, false); 226 227 #ifdef CONFIG_X86_64 228 if (!(adev->flags & AMD_IS_APU)) 229 #endif 230 amdgpu_asic_invalidate_hdp(adev, ring); 231 232 if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE) 233 fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY; 234 235 /* wrap the last IB with fence */ 236 if (job && job->uf_addr) { 237 amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, 238 fence_flags | AMDGPU_FENCE_FLAG_64BIT); 239 } 240 241 r = amdgpu_fence_emit(ring, f, fence_flags); 242 if (r) { 243 dev_err(adev->dev, "failed to emit fence (%d)\n", r); 244 if (job && job->vmid) 245 amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid); 246 amdgpu_ring_undo(ring); 247 return r; 248 } 249 250 if (ring->funcs->insert_end) 251 ring->funcs->insert_end(ring); 252 253 if (patch_offset != ~0 && ring->funcs->patch_cond_exec) 254 amdgpu_ring_patch_cond_exec(ring, patch_offset); 255 256 ring->current_ctx = fence_ctx; 257 if (vm && ring->funcs->emit_switch_buffer) 258 amdgpu_ring_emit_switch_buffer(ring); 259 amdgpu_ring_commit(ring); 260 return 0; 261 } 262 263 /** 264 * amdgpu_ib_pool_init - Init the IB (Indirect Buffer) pool 265 * 266 * @adev: amdgpu_device pointer 267 * 268 * Initialize the suballocator to manage a pool of memory 269 * for use as IBs (all asics). 270 * Returns 0 on success, error on failure. 271 */ 272 int amdgpu_ib_pool_init(struct amdgpu_device *adev) 273 { 274 int r; 275 276 if (adev->ib_pool_ready) { 277 return 0; 278 } 279 r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo, 280 AMDGPU_IB_POOL_SIZE*64*1024, 281 AMDGPU_GPU_PAGE_SIZE, 282 AMDGPU_GEM_DOMAIN_GTT); 283 if (r) { 284 return r; 285 } 286 287 adev->ib_pool_ready = true; 288 if (amdgpu_debugfs_sa_init(adev)) { 289 dev_err(adev->dev, "failed to register debugfs file for SA\n"); 290 } 291 return 0; 292 } 293 294 /** 295 * amdgpu_ib_pool_fini - Free the IB (Indirect Buffer) pool 296 * 297 * @adev: amdgpu_device pointer 298 * 299 * Tear down the suballocator managing the pool of memory 300 * for use as IBs (all asics). 301 */ 302 void amdgpu_ib_pool_fini(struct amdgpu_device *adev) 303 { 304 if (adev->ib_pool_ready) { 305 amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo); 306 adev->ib_pool_ready = false; 307 } 308 } 309 310 /** 311 * amdgpu_ib_ring_tests - test IBs on the rings 312 * 313 * @adev: amdgpu_device pointer 314 * 315 * Test an IB (Indirect Buffer) on each ring. 316 * If the test fails, disable the ring. 317 * Returns 0 on success, error if the primary GFX ring 318 * IB test fails. 319 */ 320 int amdgpu_ib_ring_tests(struct amdgpu_device *adev) 321 { 322 unsigned i; 323 int r, ret = 0; 324 long tmo_gfx, tmo_mm; 325 326 tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT; 327 if (amdgpu_sriov_vf(adev)) { 328 /* for MM engines in hypervisor side they are not scheduled together 329 * with CP and SDMA engines, so even in exclusive mode MM engine could 330 * still running on other VF thus the IB TEST TIMEOUT for MM engines 331 * under SR-IOV should be set to a long time. 8 sec should be enough 332 * for the MM comes back to this VF. 333 */ 334 tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT; 335 } 336 337 if (amdgpu_sriov_runtime(adev)) { 338 /* for CP & SDMA engines since they are scheduled together so 339 * need to make the timeout width enough to cover the time 340 * cost waiting for it coming back under RUNTIME only 341 */ 342 tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT; 343 } 344 345 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 346 struct amdgpu_ring *ring = adev->rings[i]; 347 long tmo; 348 349 if (!ring || !ring->ready) 350 continue; 351 352 /* MM engine need more time */ 353 if (ring->funcs->type == AMDGPU_RING_TYPE_UVD || 354 ring->funcs->type == AMDGPU_RING_TYPE_VCE || 355 ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC || 356 ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC || 357 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC || 358 ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) 359 tmo = tmo_mm; 360 else 361 tmo = tmo_gfx; 362 363 r = amdgpu_ring_test_ib(ring, tmo); 364 if (r) { 365 ring->ready = false; 366 367 if (ring == &adev->gfx.gfx_ring[0]) { 368 /* oh, oh, that's really bad */ 369 DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r); 370 adev->accel_working = false; 371 return r; 372 373 } else { 374 /* still not good, but we can live with it */ 375 DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r); 376 ret = r; 377 } 378 } 379 } 380 return ret; 381 } 382 383 /* 384 * Debugfs info 385 */ 386 #if defined(CONFIG_DEBUG_FS) 387 388 static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data) 389 { 390 struct drm_info_node *node = (struct drm_info_node *) m->private; 391 struct drm_device *dev = node->minor->dev; 392 struct amdgpu_device *adev = dev->dev_private; 393 394 amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m); 395 396 return 0; 397 398 } 399 400 static const struct drm_info_list amdgpu_debugfs_sa_list[] = { 401 {"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL}, 402 }; 403 404 #endif 405 406 static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev) 407 { 408 #if defined(CONFIG_DEBUG_FS) 409 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1); 410 #else 411 return 0; 412 #endif 413 } 414