1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/kthread.h> 25 #include <linux/slab.h> 26 #include <linux/completion.h> 27 28 #include <drm/drm_print.h> 29 #include <drm/gpu_scheduler.h> 30 31 #include "gpu_scheduler_trace.h" 32 33 #define to_drm_sched_job(sched_job) \ 34 container_of((sched_job), struct drm_sched_job, queue_node) 35 36 /** 37 * drm_sched_entity_init - Init a context entity used by scheduler when 38 * submit to HW ring. 39 * 40 * @entity: scheduler entity to init 41 * @priority: priority of the entity 42 * @sched_list: the list of drm scheds on which jobs from this 43 * entity can be submitted 44 * @num_sched_list: number of drm sched in sched_list 45 * @guilty: atomic_t set to 1 when a job on this queue 46 * is found to be guilty causing a timeout 47 * 48 * Note: the sched_list should have at least one element to schedule 49 * the entity 50 * 51 * Returns 0 on success or a negative error code on failure. 52 */ 53 int drm_sched_entity_init(struct drm_sched_entity *entity, 54 enum drm_sched_priority priority, 55 struct drm_gpu_scheduler **sched_list, 56 unsigned int num_sched_list, 57 atomic_t *guilty) 58 { 59 if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0]))) 60 return -EINVAL; 61 62 memset(entity, 0, sizeof(struct drm_sched_entity)); 63 INIT_LIST_HEAD(&entity->list); 64 entity->rq = NULL; 65 entity->guilty = guilty; 66 entity->num_sched_list = num_sched_list; 67 entity->priority = priority; 68 entity->sched_list = num_sched_list > 1 ? sched_list : NULL; 69 entity->last_scheduled = NULL; 70 71 if(num_sched_list) 72 entity->rq = &sched_list[0]->sched_rq[entity->priority]; 73 74 init_completion(&entity->entity_idle); 75 76 /* We start in an idle state. */ 77 complete(&entity->entity_idle); 78 79 spin_lock_init(&entity->rq_lock); 80 spsc_queue_init(&entity->job_queue); 81 82 atomic_set(&entity->fence_seq, 0); 83 entity->fence_context = dma_fence_context_alloc(2); 84 85 return 0; 86 } 87 EXPORT_SYMBOL(drm_sched_entity_init); 88 89 /** 90 * drm_sched_entity_modify_sched - Modify sched of an entity 91 * @entity: scheduler entity to init 92 * @sched_list: the list of new drm scheds which will replace 93 * existing entity->sched_list 94 * @num_sched_list: number of drm sched in sched_list 95 */ 96 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, 97 struct drm_gpu_scheduler **sched_list, 98 unsigned int num_sched_list) 99 { 100 WARN_ON(!num_sched_list || !sched_list); 101 102 entity->sched_list = sched_list; 103 entity->num_sched_list = num_sched_list; 104 } 105 EXPORT_SYMBOL(drm_sched_entity_modify_sched); 106 107 /** 108 * drm_sched_entity_is_idle - Check if entity is idle 109 * 110 * @entity: scheduler entity 111 * 112 * Returns true if the entity does not have any unscheduled jobs. 113 */ 114 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) 115 { 116 rmb(); /* for list_empty to work without lock */ 117 118 if (list_empty(&entity->list) || 119 spsc_queue_count(&entity->job_queue) == 0) 120 return true; 121 122 return false; 123 } 124 125 /** 126 * drm_sched_entity_is_ready - Check if entity is ready 127 * 128 * @entity: scheduler entity 129 * 130 * Return true if entity could provide a job. 131 */ 132 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) 133 { 134 if (spsc_queue_peek(&entity->job_queue) == NULL) 135 return false; 136 137 if (READ_ONCE(entity->dependency)) 138 return false; 139 140 return true; 141 } 142 143 /** 144 * drm_sched_entity_flush - Flush a context entity 145 * 146 * @entity: scheduler entity 147 * @timeout: time to wait in for Q to become empty in jiffies. 148 * 149 * Splitting drm_sched_entity_fini() into two functions, The first one does the 150 * waiting, removes the entity from the runqueue and returns an error when the 151 * process was killed. 152 * 153 * Returns the remaining time in jiffies left from the input timeout 154 */ 155 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) 156 { 157 struct drm_gpu_scheduler *sched; 158 struct task_struct *last_user; 159 long ret = timeout; 160 161 if (!entity->rq) 162 return 0; 163 164 sched = entity->rq->sched; 165 /** 166 * The client will not queue more IBs during this fini, consume existing 167 * queued IBs or discard them on SIGKILL 168 */ 169 if (current->flags & PF_EXITING) { 170 if (timeout) 171 ret = wait_event_timeout( 172 sched->job_scheduled, 173 drm_sched_entity_is_idle(entity), 174 timeout); 175 } else { 176 wait_event_killable(sched->job_scheduled, 177 drm_sched_entity_is_idle(entity)); 178 } 179 180 /* For killed process disable any more IBs enqueue right now */ 181 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); 182 if ((!last_user || last_user == current->group_leader) && 183 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) { 184 spin_lock(&entity->rq_lock); 185 entity->stopped = true; 186 drm_sched_rq_remove_entity(entity->rq, entity); 187 spin_unlock(&entity->rq_lock); 188 } 189 190 return ret; 191 } 192 EXPORT_SYMBOL(drm_sched_entity_flush); 193 194 /** 195 * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs 196 * 197 * @f: signaled fence 198 * @cb: our callback structure 199 * 200 * Signal the scheduler finished fence when the entity in question is killed. 201 */ 202 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, 203 struct dma_fence_cb *cb) 204 { 205 struct drm_sched_job *job = container_of(cb, struct drm_sched_job, 206 finish_cb); 207 208 drm_sched_fence_finished(job->s_fence); 209 WARN_ON(job->s_fence->parent); 210 job->sched->ops->free_job(job); 211 } 212 213 /** 214 * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed 215 * 216 * @entity: entity which is cleaned up 217 * 218 * Makes sure that all remaining jobs in an entity are killed before it is 219 * destroyed. 220 */ 221 static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity) 222 { 223 struct drm_sched_job *job; 224 int r; 225 226 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { 227 struct drm_sched_fence *s_fence = job->s_fence; 228 229 drm_sched_fence_scheduled(s_fence); 230 dma_fence_set_error(&s_fence->finished, -ESRCH); 231 232 /* 233 * When pipe is hanged by older entity, new entity might 234 * not even have chance to submit it's first job to HW 235 * and so entity->last_scheduled will remain NULL 236 */ 237 if (!entity->last_scheduled) { 238 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 239 continue; 240 } 241 242 r = dma_fence_add_callback(entity->last_scheduled, 243 &job->finish_cb, 244 drm_sched_entity_kill_jobs_cb); 245 if (r == -ENOENT) 246 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 247 else if (r) 248 DRM_ERROR("fence add callback failed (%d)\n", r); 249 } 250 } 251 252 /** 253 * drm_sched_entity_cleanup - Destroy a context entity 254 * 255 * @entity: scheduler entity 256 * 257 * This should be called after @drm_sched_entity_do_release. It goes over the 258 * entity and signals all jobs with an error code if the process was killed. 259 * 260 */ 261 void drm_sched_entity_fini(struct drm_sched_entity *entity) 262 { 263 struct drm_gpu_scheduler *sched = NULL; 264 265 if (entity->rq) { 266 sched = entity->rq->sched; 267 drm_sched_rq_remove_entity(entity->rq, entity); 268 } 269 270 /* Consumption of existing IBs wasn't completed. Forcefully 271 * remove them here. 272 */ 273 if (spsc_queue_count(&entity->job_queue)) { 274 if (sched) { 275 /* 276 * Wait for thread to idle to make sure it isn't processing 277 * this entity. 278 */ 279 wait_for_completion(&entity->entity_idle); 280 281 } 282 if (entity->dependency) { 283 dma_fence_remove_callback(entity->dependency, 284 &entity->cb); 285 dma_fence_put(entity->dependency); 286 entity->dependency = NULL; 287 } 288 289 drm_sched_entity_kill_jobs(entity); 290 } 291 292 dma_fence_put(entity->last_scheduled); 293 entity->last_scheduled = NULL; 294 } 295 EXPORT_SYMBOL(drm_sched_entity_fini); 296 297 /** 298 * drm_sched_entity_fini - Destroy a context entity 299 * 300 * @entity: scheduler entity 301 * 302 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() 303 */ 304 void drm_sched_entity_destroy(struct drm_sched_entity *entity) 305 { 306 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); 307 drm_sched_entity_fini(entity); 308 } 309 EXPORT_SYMBOL(drm_sched_entity_destroy); 310 311 /* 312 * drm_sched_entity_clear_dep - callback to clear the entities dependency 313 */ 314 static void drm_sched_entity_clear_dep(struct dma_fence *f, 315 struct dma_fence_cb *cb) 316 { 317 struct drm_sched_entity *entity = 318 container_of(cb, struct drm_sched_entity, cb); 319 320 entity->dependency = NULL; 321 dma_fence_put(f); 322 } 323 324 /* 325 * drm_sched_entity_clear_dep - callback to clear the entities dependency and 326 * wake up scheduler 327 */ 328 static void drm_sched_entity_wakeup(struct dma_fence *f, 329 struct dma_fence_cb *cb) 330 { 331 struct drm_sched_entity *entity = 332 container_of(cb, struct drm_sched_entity, cb); 333 334 drm_sched_entity_clear_dep(f, cb); 335 drm_sched_wakeup(entity->rq->sched); 336 } 337 338 /** 339 * drm_sched_entity_set_priority - Sets priority of the entity 340 * 341 * @entity: scheduler entity 342 * @priority: scheduler priority 343 * 344 * Update the priority of runqueus used for the entity. 345 */ 346 void drm_sched_entity_set_priority(struct drm_sched_entity *entity, 347 enum drm_sched_priority priority) 348 { 349 spin_lock(&entity->rq_lock); 350 entity->priority = priority; 351 spin_unlock(&entity->rq_lock); 352 } 353 EXPORT_SYMBOL(drm_sched_entity_set_priority); 354 355 /** 356 * drm_sched_entity_add_dependency_cb - add callback for the entities dependency 357 * 358 * @entity: entity with dependency 359 * 360 * Add a callback to the current dependency of the entity to wake up the 361 * scheduler when the entity becomes available. 362 */ 363 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) 364 { 365 struct drm_gpu_scheduler *sched = entity->rq->sched; 366 struct dma_fence *fence = entity->dependency; 367 struct drm_sched_fence *s_fence; 368 369 if (fence->context == entity->fence_context || 370 fence->context == entity->fence_context + 1) { 371 /* 372 * Fence is a scheduled/finished fence from a job 373 * which belongs to the same entity, we can ignore 374 * fences from ourself 375 */ 376 dma_fence_put(entity->dependency); 377 return false; 378 } 379 380 s_fence = to_drm_sched_fence(fence); 381 if (s_fence && s_fence->sched == sched) { 382 383 /* 384 * Fence is from the same scheduler, only need to wait for 385 * it to be scheduled 386 */ 387 fence = dma_fence_get(&s_fence->scheduled); 388 dma_fence_put(entity->dependency); 389 entity->dependency = fence; 390 if (!dma_fence_add_callback(fence, &entity->cb, 391 drm_sched_entity_clear_dep)) 392 return true; 393 394 /* Ignore it when it is already scheduled */ 395 dma_fence_put(fence); 396 return false; 397 } 398 399 if (!dma_fence_add_callback(entity->dependency, &entity->cb, 400 drm_sched_entity_wakeup)) 401 return true; 402 403 dma_fence_put(entity->dependency); 404 return false; 405 } 406 407 /** 408 * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity 409 * 410 * @entity: entity to get the job from 411 * 412 * Process all dependencies and try to get one job from the entities queue. 413 */ 414 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) 415 { 416 struct drm_gpu_scheduler *sched = entity->rq->sched; 417 struct drm_sched_job *sched_job; 418 419 sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); 420 if (!sched_job) 421 return NULL; 422 423 while ((entity->dependency = 424 sched->ops->dependency(sched_job, entity))) { 425 trace_drm_sched_job_wait_dep(sched_job, entity->dependency); 426 427 if (drm_sched_entity_add_dependency_cb(entity)) 428 return NULL; 429 } 430 431 /* skip jobs from entity that marked guilty */ 432 if (entity->guilty && atomic_read(entity->guilty)) 433 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); 434 435 dma_fence_put(entity->last_scheduled); 436 entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished); 437 438 spsc_queue_pop(&entity->job_queue); 439 return sched_job; 440 } 441 442 /** 443 * drm_sched_entity_select_rq - select a new rq for the entity 444 * 445 * @entity: scheduler entity 446 * 447 * Check all prerequisites and select a new rq for the entity for load 448 * balancing. 449 */ 450 void drm_sched_entity_select_rq(struct drm_sched_entity *entity) 451 { 452 struct dma_fence *fence; 453 struct drm_gpu_scheduler *sched; 454 struct drm_sched_rq *rq; 455 456 if (spsc_queue_count(&entity->job_queue) || !entity->sched_list) 457 return; 458 459 fence = READ_ONCE(entity->last_scheduled); 460 if (fence && !dma_fence_is_signaled(fence)) 461 return; 462 463 spin_lock(&entity->rq_lock); 464 sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list); 465 rq = sched ? &sched->sched_rq[entity->priority] : NULL; 466 if (rq != entity->rq) { 467 drm_sched_rq_remove_entity(entity->rq, entity); 468 entity->rq = rq; 469 } 470 spin_unlock(&entity->rq_lock); 471 472 if (entity->num_sched_list == 1) 473 entity->sched_list = NULL; 474 } 475 476 /** 477 * drm_sched_entity_push_job - Submit a job to the entity's job queue 478 * 479 * @sched_job: job to submit 480 * @entity: scheduler entity 481 * 482 * Note: To guarantee that the order of insertion to queue matches 483 * the job's fence sequence number this function should be 484 * called with drm_sched_job_init under common lock. 485 * 486 * Returns 0 for success, negative error code otherwise. 487 */ 488 void drm_sched_entity_push_job(struct drm_sched_job *sched_job, 489 struct drm_sched_entity *entity) 490 { 491 bool first; 492 493 trace_drm_sched_job(sched_job, entity); 494 atomic_inc(entity->rq->sched->score); 495 WRITE_ONCE(entity->last_user, current->group_leader); 496 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); 497 498 /* first job wakes up scheduler */ 499 if (first) { 500 /* Add the entity to the run queue */ 501 spin_lock(&entity->rq_lock); 502 if (entity->stopped) { 503 spin_unlock(&entity->rq_lock); 504 505 DRM_ERROR("Trying to push to a killed entity\n"); 506 return; 507 } 508 drm_sched_rq_add_entity(entity->rq, entity); 509 spin_unlock(&entity->rq_lock); 510 drm_sched_wakeup(entity->rq->sched); 511 } 512 } 513 EXPORT_SYMBOL(drm_sched_entity_push_job); 514