1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/kthread.h> 25 #include <linux/slab.h> 26 #include <linux/completion.h> 27 28 #include <drm/drm_print.h> 29 #include <drm/gpu_scheduler.h> 30 31 #include "gpu_scheduler_trace.h" 32 33 #define to_drm_sched_job(sched_job) \ 34 container_of((sched_job), struct drm_sched_job, queue_node) 35 36 /** 37 * drm_sched_entity_init - Init a context entity used by scheduler when 38 * submit to HW ring. 39 * 40 * @entity: scheduler entity to init 41 * @priority: priority of the entity 42 * @sched_list: the list of drm scheds on which jobs from this 43 * entity can be submitted 44 * @num_sched_list: number of drm sched in sched_list 45 * @guilty: atomic_t set to 1 when a job on this queue 46 * is found to be guilty causing a timeout 47 * 48 * Note that the &sched_list must have at least one element to schedule the entity. 49 * 50 * For changing @priority later on at runtime see 51 * drm_sched_entity_set_priority(). For changing the set of schedulers 52 * @sched_list at runtime see drm_sched_entity_modify_sched(). 53 * 54 * An entity is cleaned up by callind drm_sched_entity_fini(). See also 55 * drm_sched_entity_destroy(). 56 * 57 * Returns 0 on success or a negative error code on failure. 58 */ 59 int drm_sched_entity_init(struct drm_sched_entity *entity, 60 enum drm_sched_priority priority, 61 struct drm_gpu_scheduler **sched_list, 62 unsigned int num_sched_list, 63 atomic_t *guilty) 64 { 65 if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0]))) 66 return -EINVAL; 67 68 memset(entity, 0, sizeof(struct drm_sched_entity)); 69 INIT_LIST_HEAD(&entity->list); 70 entity->rq = NULL; 71 entity->guilty = guilty; 72 entity->num_sched_list = num_sched_list; 73 entity->priority = priority; 74 entity->sched_list = num_sched_list > 1 ? sched_list : NULL; 75 entity->last_scheduled = NULL; 76 RB_CLEAR_NODE(&entity->rb_tree_node); 77 78 if(num_sched_list) 79 entity->rq = &sched_list[0]->sched_rq[entity->priority]; 80 81 init_completion(&entity->entity_idle); 82 83 /* We start in an idle state. */ 84 complete(&entity->entity_idle); 85 86 spin_lock_init(&entity->rq_lock); 87 spsc_queue_init(&entity->job_queue); 88 89 atomic_set(&entity->fence_seq, 0); 90 entity->fence_context = dma_fence_context_alloc(2); 91 92 return 0; 93 } 94 EXPORT_SYMBOL(drm_sched_entity_init); 95 96 /** 97 * drm_sched_entity_modify_sched - Modify sched of an entity 98 * @entity: scheduler entity to init 99 * @sched_list: the list of new drm scheds which will replace 100 * existing entity->sched_list 101 * @num_sched_list: number of drm sched in sched_list 102 * 103 * Note that this must be called under the same common lock for @entity as 104 * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to 105 * guarantee through some other means that this is never called while new jobs 106 * can be pushed to @entity. 107 */ 108 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, 109 struct drm_gpu_scheduler **sched_list, 110 unsigned int num_sched_list) 111 { 112 WARN_ON(!num_sched_list || !sched_list); 113 114 entity->sched_list = sched_list; 115 entity->num_sched_list = num_sched_list; 116 } 117 EXPORT_SYMBOL(drm_sched_entity_modify_sched); 118 119 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) 120 { 121 rmb(); /* for list_empty to work without lock */ 122 123 if (list_empty(&entity->list) || 124 spsc_queue_count(&entity->job_queue) == 0 || 125 entity->stopped) 126 return true; 127 128 return false; 129 } 130 131 /* Return true if entity could provide a job. */ 132 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) 133 { 134 if (spsc_queue_peek(&entity->job_queue) == NULL) 135 return false; 136 137 if (READ_ONCE(entity->dependency)) 138 return false; 139 140 return true; 141 } 142 143 /** 144 * drm_sched_entity_flush - Flush a context entity 145 * 146 * @entity: scheduler entity 147 * @timeout: time to wait in for Q to become empty in jiffies. 148 * 149 * Splitting drm_sched_entity_fini() into two functions, The first one does the 150 * waiting, removes the entity from the runqueue and returns an error when the 151 * process was killed. 152 * 153 * Returns the remaining time in jiffies left from the input timeout 154 */ 155 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) 156 { 157 struct drm_gpu_scheduler *sched; 158 struct task_struct *last_user; 159 long ret = timeout; 160 161 if (!entity->rq) 162 return 0; 163 164 sched = entity->rq->sched; 165 /** 166 * The client will not queue more IBs during this fini, consume existing 167 * queued IBs or discard them on SIGKILL 168 */ 169 if (current->flags & PF_EXITING) { 170 if (timeout) 171 ret = wait_event_timeout( 172 sched->job_scheduled, 173 drm_sched_entity_is_idle(entity), 174 timeout); 175 } else { 176 wait_event_killable(sched->job_scheduled, 177 drm_sched_entity_is_idle(entity)); 178 } 179 180 /* For killed process disable any more IBs enqueue right now */ 181 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); 182 if ((!last_user || last_user == current->group_leader) && 183 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) { 184 spin_lock(&entity->rq_lock); 185 entity->stopped = true; 186 drm_sched_rq_remove_entity(entity->rq, entity); 187 spin_unlock(&entity->rq_lock); 188 } 189 190 return ret; 191 } 192 EXPORT_SYMBOL(drm_sched_entity_flush); 193 194 static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) 195 { 196 struct drm_sched_job *job = container_of(wrk, typeof(*job), work); 197 198 drm_sched_fence_finished(job->s_fence); 199 WARN_ON(job->s_fence->parent); 200 job->sched->ops->free_job(job); 201 } 202 203 204 /* Signal the scheduler finished fence when the entity in question is killed. */ 205 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, 206 struct dma_fence_cb *cb) 207 { 208 struct drm_sched_job *job = container_of(cb, struct drm_sched_job, 209 finish_cb); 210 211 dma_fence_put(f); 212 INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); 213 schedule_work(&job->work); 214 } 215 216 static struct dma_fence * 217 drm_sched_job_dependency(struct drm_sched_job *job, 218 struct drm_sched_entity *entity) 219 { 220 if (!xa_empty(&job->dependencies)) 221 return xa_erase(&job->dependencies, job->last_dependency++); 222 223 if (job->sched->ops->dependency) 224 return job->sched->ops->dependency(job, entity); 225 226 return NULL; 227 } 228 229 static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity) 230 { 231 struct drm_sched_job *job; 232 struct dma_fence *f; 233 int r; 234 235 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { 236 struct drm_sched_fence *s_fence = job->s_fence; 237 238 /* Wait for all dependencies to avoid data corruptions */ 239 while ((f = drm_sched_job_dependency(job, entity))) { 240 dma_fence_wait(f, false); 241 dma_fence_put(f); 242 } 243 244 drm_sched_fence_scheduled(s_fence); 245 dma_fence_set_error(&s_fence->finished, -ESRCH); 246 247 /* 248 * When pipe is hanged by older entity, new entity might 249 * not even have chance to submit it's first job to HW 250 * and so entity->last_scheduled will remain NULL 251 */ 252 if (!entity->last_scheduled) { 253 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 254 continue; 255 } 256 257 dma_fence_get(entity->last_scheduled); 258 r = dma_fence_add_callback(entity->last_scheduled, 259 &job->finish_cb, 260 drm_sched_entity_kill_jobs_cb); 261 if (r == -ENOENT) 262 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 263 else if (r) 264 DRM_ERROR("fence add callback failed (%d)\n", r); 265 } 266 } 267 268 /** 269 * drm_sched_entity_fini - Destroy a context entity 270 * 271 * @entity: scheduler entity 272 * 273 * Cleanups up @entity which has been initialized by drm_sched_entity_init(). 274 * 275 * If there are potentially job still in flight or getting newly queued 276 * drm_sched_entity_flush() must be called first. This function then goes over 277 * the entity and signals all jobs with an error code if the process was killed. 278 */ 279 void drm_sched_entity_fini(struct drm_sched_entity *entity) 280 { 281 struct drm_gpu_scheduler *sched = NULL; 282 283 if (entity->rq) { 284 sched = entity->rq->sched; 285 drm_sched_rq_remove_entity(entity->rq, entity); 286 } 287 288 /* Consumption of existing IBs wasn't completed. Forcefully 289 * remove them here. 290 */ 291 if (spsc_queue_count(&entity->job_queue)) { 292 if (sched) { 293 /* 294 * Wait for thread to idle to make sure it isn't processing 295 * this entity. 296 */ 297 wait_for_completion(&entity->entity_idle); 298 299 } 300 if (entity->dependency) { 301 dma_fence_remove_callback(entity->dependency, 302 &entity->cb); 303 dma_fence_put(entity->dependency); 304 entity->dependency = NULL; 305 } 306 307 drm_sched_entity_kill_jobs(entity); 308 } 309 310 dma_fence_put(entity->last_scheduled); 311 entity->last_scheduled = NULL; 312 } 313 EXPORT_SYMBOL(drm_sched_entity_fini); 314 315 /** 316 * drm_sched_entity_destroy - Destroy a context entity 317 * @entity: scheduler entity 318 * 319 * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a 320 * convenience wrapper. 321 */ 322 void drm_sched_entity_destroy(struct drm_sched_entity *entity) 323 { 324 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); 325 drm_sched_entity_fini(entity); 326 } 327 EXPORT_SYMBOL(drm_sched_entity_destroy); 328 329 /* drm_sched_entity_clear_dep - callback to clear the entities dependency */ 330 static void drm_sched_entity_clear_dep(struct dma_fence *f, 331 struct dma_fence_cb *cb) 332 { 333 struct drm_sched_entity *entity = 334 container_of(cb, struct drm_sched_entity, cb); 335 336 entity->dependency = NULL; 337 dma_fence_put(f); 338 } 339 340 /* 341 * drm_sched_entity_clear_dep - callback to clear the entities dependency and 342 * wake up scheduler 343 */ 344 static void drm_sched_entity_wakeup(struct dma_fence *f, 345 struct dma_fence_cb *cb) 346 { 347 struct drm_sched_entity *entity = 348 container_of(cb, struct drm_sched_entity, cb); 349 350 drm_sched_entity_clear_dep(f, cb); 351 drm_sched_wakeup(entity->rq->sched); 352 } 353 354 /** 355 * drm_sched_entity_set_priority - Sets priority of the entity 356 * 357 * @entity: scheduler entity 358 * @priority: scheduler priority 359 * 360 * Update the priority of runqueus used for the entity. 361 */ 362 void drm_sched_entity_set_priority(struct drm_sched_entity *entity, 363 enum drm_sched_priority priority) 364 { 365 spin_lock(&entity->rq_lock); 366 entity->priority = priority; 367 spin_unlock(&entity->rq_lock); 368 } 369 EXPORT_SYMBOL(drm_sched_entity_set_priority); 370 371 /* 372 * Add a callback to the current dependency of the entity to wake up the 373 * scheduler when the entity becomes available. 374 */ 375 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) 376 { 377 struct drm_gpu_scheduler *sched = entity->rq->sched; 378 struct dma_fence *fence = entity->dependency; 379 struct drm_sched_fence *s_fence; 380 381 if (fence->context == entity->fence_context || 382 fence->context == entity->fence_context + 1) { 383 /* 384 * Fence is a scheduled/finished fence from a job 385 * which belongs to the same entity, we can ignore 386 * fences from ourself 387 */ 388 dma_fence_put(entity->dependency); 389 return false; 390 } 391 392 s_fence = to_drm_sched_fence(fence); 393 if (s_fence && s_fence->sched == sched && 394 !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) { 395 396 /* 397 * Fence is from the same scheduler, only need to wait for 398 * it to be scheduled 399 */ 400 fence = dma_fence_get(&s_fence->scheduled); 401 dma_fence_put(entity->dependency); 402 entity->dependency = fence; 403 if (!dma_fence_add_callback(fence, &entity->cb, 404 drm_sched_entity_clear_dep)) 405 return true; 406 407 /* Ignore it when it is already scheduled */ 408 dma_fence_put(fence); 409 return false; 410 } 411 412 if (!dma_fence_add_callback(entity->dependency, &entity->cb, 413 drm_sched_entity_wakeup)) 414 return true; 415 416 dma_fence_put(entity->dependency); 417 return false; 418 } 419 420 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) 421 { 422 struct drm_sched_job *sched_job; 423 424 sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); 425 if (!sched_job) 426 return NULL; 427 428 while ((entity->dependency = 429 drm_sched_job_dependency(sched_job, entity))) { 430 trace_drm_sched_job_wait_dep(sched_job, entity->dependency); 431 432 if (drm_sched_entity_add_dependency_cb(entity)) 433 return NULL; 434 } 435 436 /* skip jobs from entity that marked guilty */ 437 if (entity->guilty && atomic_read(entity->guilty)) 438 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); 439 440 dma_fence_put(entity->last_scheduled); 441 442 entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished); 443 444 /* 445 * If the queue is empty we allow drm_sched_entity_select_rq() to 446 * locklessly access ->last_scheduled. This only works if we set the 447 * pointer before we dequeue and if we a write barrier here. 448 */ 449 smp_wmb(); 450 451 spsc_queue_pop(&entity->job_queue); 452 453 /* 454 * Update the entity's location in the min heap according to 455 * the timestamp of the next job, if any. 456 */ 457 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) { 458 struct drm_sched_job *next; 459 460 next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); 461 if (next) 462 drm_sched_rq_update_fifo(entity, next->submit_ts); 463 } 464 465 return sched_job; 466 } 467 468 void drm_sched_entity_select_rq(struct drm_sched_entity *entity) 469 { 470 struct dma_fence *fence; 471 struct drm_gpu_scheduler *sched; 472 struct drm_sched_rq *rq; 473 474 /* single possible engine and already selected */ 475 if (!entity->sched_list) 476 return; 477 478 /* queue non-empty, stay on the same engine */ 479 if (spsc_queue_count(&entity->job_queue)) 480 return; 481 482 /* 483 * Only when the queue is empty are we guaranteed that the scheduler 484 * thread cannot change ->last_scheduled. To enforce ordering we need 485 * a read barrier here. See drm_sched_entity_pop_job() for the other 486 * side. 487 */ 488 smp_rmb(); 489 490 fence = entity->last_scheduled; 491 492 /* stay on the same engine if the previous job hasn't finished */ 493 if (fence && !dma_fence_is_signaled(fence)) 494 return; 495 496 spin_lock(&entity->rq_lock); 497 sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list); 498 rq = sched ? &sched->sched_rq[entity->priority] : NULL; 499 if (rq != entity->rq) { 500 drm_sched_rq_remove_entity(entity->rq, entity); 501 entity->rq = rq; 502 } 503 spin_unlock(&entity->rq_lock); 504 505 if (entity->num_sched_list == 1) 506 entity->sched_list = NULL; 507 } 508 509 /** 510 * drm_sched_entity_push_job - Submit a job to the entity's job queue 511 * @sched_job: job to submit 512 * 513 * Note: To guarantee that the order of insertion to queue matches the job's 514 * fence sequence number this function should be called with drm_sched_job_arm() 515 * under common lock for the struct drm_sched_entity that was set up for 516 * @sched_job in drm_sched_job_init(). 517 * 518 * Returns 0 for success, negative error code otherwise. 519 */ 520 void drm_sched_entity_push_job(struct drm_sched_job *sched_job) 521 { 522 struct drm_sched_entity *entity = sched_job->entity; 523 bool first; 524 525 trace_drm_sched_job(sched_job, entity); 526 atomic_inc(entity->rq->sched->score); 527 WRITE_ONCE(entity->last_user, current->group_leader); 528 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); 529 sched_job->submit_ts = ktime_get(); 530 531 /* first job wakes up scheduler */ 532 if (first) { 533 /* Add the entity to the run queue */ 534 spin_lock(&entity->rq_lock); 535 if (entity->stopped) { 536 spin_unlock(&entity->rq_lock); 537 538 DRM_ERROR("Trying to push to a killed entity\n"); 539 return; 540 } 541 542 drm_sched_rq_add_entity(entity->rq, entity); 543 spin_unlock(&entity->rq_lock); 544 545 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) 546 drm_sched_rq_update_fifo(entity, sched_job->submit_ts); 547 548 drm_sched_wakeup(entity->rq->sched); 549 } 550 } 551 EXPORT_SYMBOL(drm_sched_entity_push_job); 552