1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * 23 */ 24 #include <linux/kthread.h> 25 #include <linux/wait.h> 26 #include <linux/sched.h> 27 28 #include <drm/drm_drv.h> 29 30 #include "amdgpu.h" 31 #include "amdgpu_trace.h" 32 #include "amdgpu_reset.h" 33 34 static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) 35 { 36 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); 37 struct amdgpu_job *job = to_amdgpu_job(s_job); 38 struct amdgpu_task_info ti; 39 struct amdgpu_device *adev = ring->adev; 40 int idx; 41 int r; 42 43 if (!drm_dev_enter(adev_to_drm(adev), &idx)) { 44 DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s", 45 __func__, s_job->sched->name); 46 47 /* Effectively the job is aborted as the device is gone */ 48 return DRM_GPU_SCHED_STAT_ENODEV; 49 } 50 51 memset(&ti, 0, sizeof(struct amdgpu_task_info)); 52 adev->job_hang = true; 53 54 if (amdgpu_gpu_recovery && 55 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { 56 DRM_ERROR("ring %s timeout, but soft recovered\n", 57 s_job->sched->name); 58 goto exit; 59 } 60 61 amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); 62 DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n", 63 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), 64 ring->fence_drv.sync_seq); 65 DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n", 66 ti.process_name, ti.tgid, ti.task_name, ti.pid); 67 68 if (amdgpu_device_should_recover_gpu(ring->adev)) { 69 struct amdgpu_reset_context reset_context; 70 memset(&reset_context, 0, sizeof(reset_context)); 71 72 reset_context.method = AMD_RESET_METHOD_NONE; 73 reset_context.reset_req_dev = adev; 74 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 75 76 r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context); 77 if (r) 78 DRM_ERROR("GPU Recovery Failed: %d\n", r); 79 } else { 80 drm_sched_suspend_timeout(&ring->sched); 81 if (amdgpu_sriov_vf(adev)) 82 adev->virt.tdr_debug = true; 83 } 84 85 exit: 86 adev->job_hang = false; 87 drm_dev_exit(idx); 88 return DRM_GPU_SCHED_STAT_NOMINAL; 89 } 90 91 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, 92 struct amdgpu_job **job, struct amdgpu_vm *vm) 93 { 94 if (num_ibs == 0) 95 return -EINVAL; 96 97 *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL); 98 if (!*job) 99 return -ENOMEM; 100 101 /* 102 * Initialize the scheduler to at least some ring so that we always 103 * have a pointer to adev. 104 */ 105 (*job)->base.sched = &adev->rings[0]->sched; 106 (*job)->vm = vm; 107 108 amdgpu_sync_create(&(*job)->sync); 109 amdgpu_sync_create(&(*job)->sched_sync); 110 (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter); 111 (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET; 112 113 return 0; 114 } 115 116 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, 117 enum amdgpu_ib_pool_type pool_type, 118 struct amdgpu_job **job) 119 { 120 int r; 121 122 r = amdgpu_job_alloc(adev, 1, job, NULL); 123 if (r) 124 return r; 125 126 (*job)->num_ibs = 1; 127 r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); 128 if (r) 129 kfree(*job); 130 131 return r; 132 } 133 134 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, 135 struct amdgpu_bo *gws, struct amdgpu_bo *oa) 136 { 137 if (gds) { 138 job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT; 139 job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT; 140 } 141 if (gws) { 142 job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT; 143 job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT; 144 } 145 if (oa) { 146 job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT; 147 job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT; 148 } 149 } 150 151 void amdgpu_job_free_resources(struct amdgpu_job *job) 152 { 153 struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); 154 struct dma_fence *f; 155 unsigned i; 156 157 /* use sched fence if available */ 158 f = job->base.s_fence ? &job->base.s_fence->finished : &job->hw_fence; 159 for (i = 0; i < job->num_ibs; ++i) 160 amdgpu_ib_free(ring->adev, &job->ibs[i], f); 161 } 162 163 static void amdgpu_job_free_cb(struct drm_sched_job *s_job) 164 { 165 struct amdgpu_job *job = to_amdgpu_job(s_job); 166 167 drm_sched_job_cleanup(s_job); 168 169 amdgpu_sync_free(&job->sync); 170 amdgpu_sync_free(&job->sched_sync); 171 172 /* only put the hw fence if has embedded fence */ 173 if (!job->hw_fence.ops) 174 kfree(job); 175 else 176 dma_fence_put(&job->hw_fence); 177 } 178 179 void amdgpu_job_set_gang_leader(struct amdgpu_job *job, 180 struct amdgpu_job *leader) 181 { 182 struct dma_fence *fence = &leader->base.s_fence->scheduled; 183 184 WARN_ON(job->gang_submit); 185 186 /* 187 * Don't add a reference when we are the gang leader to avoid circle 188 * dependency. 189 */ 190 if (job != leader) 191 dma_fence_get(fence); 192 job->gang_submit = fence; 193 } 194 195 void amdgpu_job_free(struct amdgpu_job *job) 196 { 197 amdgpu_job_free_resources(job); 198 amdgpu_sync_free(&job->sync); 199 amdgpu_sync_free(&job->sched_sync); 200 if (job->gang_submit != &job->base.s_fence->scheduled) 201 dma_fence_put(job->gang_submit); 202 203 if (!job->hw_fence.ops) 204 kfree(job); 205 else 206 dma_fence_put(&job->hw_fence); 207 } 208 209 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, 210 void *owner, struct dma_fence **f) 211 { 212 int r; 213 214 if (!f) 215 return -EINVAL; 216 217 r = drm_sched_job_init(&job->base, entity, owner); 218 if (r) 219 return r; 220 221 drm_sched_job_arm(&job->base); 222 223 *f = dma_fence_get(&job->base.s_fence->finished); 224 amdgpu_job_free_resources(job); 225 drm_sched_entity_push_job(&job->base); 226 227 return 0; 228 } 229 230 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, 231 struct dma_fence **fence) 232 { 233 int r; 234 235 job->base.sched = &ring->sched; 236 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence); 237 238 if (r) 239 return r; 240 241 amdgpu_job_free(job); 242 return 0; 243 } 244 245 static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, 246 struct drm_sched_entity *s_entity) 247 { 248 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched); 249 struct amdgpu_job *job = to_amdgpu_job(sched_job); 250 struct amdgpu_vm *vm = job->vm; 251 struct dma_fence *fence; 252 int r; 253 254 fence = amdgpu_sync_get_fence(&job->sync); 255 if (fence && drm_sched_dependency_optimized(fence, s_entity)) { 256 r = amdgpu_sync_fence(&job->sched_sync, fence); 257 if (r) 258 DRM_ERROR("Error adding fence (%d)\n", r); 259 } 260 261 if (!fence && job->gang_submit) 262 fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit); 263 264 while (fence == NULL && vm && !job->vmid) { 265 r = amdgpu_vmid_grab(vm, ring, &job->sync, 266 &job->base.s_fence->finished, 267 job); 268 if (r) 269 DRM_ERROR("Error getting VM ID (%d)\n", r); 270 271 fence = amdgpu_sync_get_fence(&job->sync); 272 } 273 274 return fence; 275 } 276 277 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) 278 { 279 struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched); 280 struct amdgpu_device *adev = ring->adev; 281 struct dma_fence *fence = NULL, *finished; 282 struct amdgpu_job *job; 283 int r = 0; 284 285 job = to_amdgpu_job(sched_job); 286 finished = &job->base.s_fence->finished; 287 288 BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); 289 290 trace_amdgpu_sched_run_job(job); 291 292 /* Skip job if VRAM is lost and never resubmit gangs */ 293 if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter) || 294 (job->job_run_counter && job->gang_submit)) 295 dma_fence_set_error(finished, -ECANCELED); 296 297 if (finished->error < 0) { 298 DRM_INFO("Skip scheduling IBs!\n"); 299 } else { 300 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, 301 &fence); 302 if (r) 303 DRM_ERROR("Error scheduling IBs (%d)\n", r); 304 } 305 306 job->job_run_counter++; 307 amdgpu_job_free_resources(job); 308 309 fence = r ? ERR_PTR(r) : fence; 310 return fence; 311 } 312 313 #define to_drm_sched_job(sched_job) \ 314 container_of((sched_job), struct drm_sched_job, queue_node) 315 316 void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) 317 { 318 struct drm_sched_job *s_job; 319 struct drm_sched_entity *s_entity = NULL; 320 int i; 321 322 /* Signal all jobs not yet scheduled */ 323 for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { 324 struct drm_sched_rq *rq = &sched->sched_rq[i]; 325 spin_lock(&rq->lock); 326 list_for_each_entry(s_entity, &rq->entities, list) { 327 while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) { 328 struct drm_sched_fence *s_fence = s_job->s_fence; 329 330 dma_fence_signal(&s_fence->scheduled); 331 dma_fence_set_error(&s_fence->finished, -EHWPOISON); 332 dma_fence_signal(&s_fence->finished); 333 } 334 } 335 spin_unlock(&rq->lock); 336 } 337 338 /* Signal all jobs already scheduled to HW */ 339 list_for_each_entry(s_job, &sched->pending_list, list) { 340 struct drm_sched_fence *s_fence = s_job->s_fence; 341 342 dma_fence_set_error(&s_fence->finished, -EHWPOISON); 343 dma_fence_signal(&s_fence->finished); 344 } 345 } 346 347 const struct drm_sched_backend_ops amdgpu_sched_ops = { 348 .dependency = amdgpu_job_dependency, 349 .run_job = amdgpu_job_run, 350 .timedout_job = amdgpu_job_timedout, 351 .free_job = amdgpu_job_free_cb 352 }; 353