1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 /** 25 * DOC: Overview 26 * 27 * The GPU scheduler provides entities which allow userspace to push jobs 28 * into software queues which are then scheduled on a hardware run queue. 29 * The software queues have a priority among them. The scheduler selects the entities 30 * from the run queue using a FIFO. The scheduler provides dependency handling 31 * features among jobs. The driver is supposed to provide callback functions for 32 * backend operations to the scheduler like submitting a job to hardware run queue, 33 * returning the dependencies of a job etc. 34 * 35 * The organisation of the scheduler is the following: 36 * 37 * 1. Each hw run queue has one scheduler 38 * 2. Each scheduler has multiple run queues with different priorities 39 * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL) 40 * 3. Each scheduler run queue has a queue of entities to schedule 41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on 42 * the hardware. 43 * 44 * The jobs in a entity are always scheduled in the order that they were pushed. 45 */ 46 47 #include <linux/kthread.h> 48 #include <linux/wait.h> 49 #include <linux/sched.h> 50 #include <linux/completion.h> 51 #include <linux/dma-resv.h> 52 #include <uapi/linux/sched/types.h> 53 54 #include <drm/drm_print.h> 55 #include <drm/drm_gem.h> 56 #include <drm/gpu_scheduler.h> 57 #include <drm/spsc_queue.h> 58 59 #define CREATE_TRACE_POINTS 60 #include "gpu_scheduler_trace.h" 61 62 #define to_drm_sched_job(sched_job) \ 63 container_of((sched_job), struct drm_sched_job, queue_node) 64 65 int drm_sched_policy = DRM_SCHED_POLICY_FIFO; 66 67 /** 68 * DOC: sched_policy (int) 69 * Used to override default entities scheduling policy in a run queue. 70 */ 71 MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default)."); 72 module_param_named(sched_policy, drm_sched_policy, int, 0444); 73 74 static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a, 75 const struct rb_node *b) 76 { 77 struct drm_sched_entity *ent_a = rb_entry((a), struct drm_sched_entity, rb_tree_node); 78 struct drm_sched_entity *ent_b = rb_entry((b), struct drm_sched_entity, rb_tree_node); 79 80 return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting); 81 } 82 83 static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity) 84 { 85 struct drm_sched_rq *rq = entity->rq; 86 87 if (!RB_EMPTY_NODE(&entity->rb_tree_node)) { 88 rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root); 89 RB_CLEAR_NODE(&entity->rb_tree_node); 90 } 91 } 92 93 void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts) 94 { 95 /* 96 * Both locks need to be grabbed, one to protect from entity->rq change 97 * for entity from within concurrent drm_sched_entity_select_rq and the 98 * other to update the rb tree structure. 99 */ 100 spin_lock(&entity->rq_lock); 101 spin_lock(&entity->rq->lock); 102 103 drm_sched_rq_remove_fifo_locked(entity); 104 105 entity->oldest_job_waiting = ts; 106 107 rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root, 108 drm_sched_entity_compare_before); 109 110 spin_unlock(&entity->rq->lock); 111 spin_unlock(&entity->rq_lock); 112 } 113 114 /** 115 * drm_sched_rq_init - initialize a given run queue struct 116 * 117 * @sched: scheduler instance to associate with this run queue 118 * @rq: scheduler run queue 119 * 120 * Initializes a scheduler runqueue. 121 */ 122 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, 123 struct drm_sched_rq *rq) 124 { 125 spin_lock_init(&rq->lock); 126 INIT_LIST_HEAD(&rq->entities); 127 rq->rb_tree_root = RB_ROOT_CACHED; 128 rq->current_entity = NULL; 129 rq->sched = sched; 130 } 131 132 /** 133 * drm_sched_rq_add_entity - add an entity 134 * 135 * @rq: scheduler run queue 136 * @entity: scheduler entity 137 * 138 * Adds a scheduler entity to the run queue. 139 */ 140 void drm_sched_rq_add_entity(struct drm_sched_rq *rq, 141 struct drm_sched_entity *entity) 142 { 143 if (!list_empty(&entity->list)) 144 return; 145 146 spin_lock(&rq->lock); 147 148 atomic_inc(rq->sched->score); 149 list_add_tail(&entity->list, &rq->entities); 150 151 spin_unlock(&rq->lock); 152 } 153 154 /** 155 * drm_sched_rq_remove_entity - remove an entity 156 * 157 * @rq: scheduler run queue 158 * @entity: scheduler entity 159 * 160 * Removes a scheduler entity from the run queue. 161 */ 162 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, 163 struct drm_sched_entity *entity) 164 { 165 if (list_empty(&entity->list)) 166 return; 167 168 spin_lock(&rq->lock); 169 170 atomic_dec(rq->sched->score); 171 list_del_init(&entity->list); 172 173 if (rq->current_entity == entity) 174 rq->current_entity = NULL; 175 176 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) 177 drm_sched_rq_remove_fifo_locked(entity); 178 179 spin_unlock(&rq->lock); 180 } 181 182 /** 183 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run 184 * 185 * @rq: scheduler run queue to check. 186 * 187 * Try to find a ready entity, returns NULL if none found. 188 */ 189 static struct drm_sched_entity * 190 drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq) 191 { 192 struct drm_sched_entity *entity; 193 194 spin_lock(&rq->lock); 195 196 entity = rq->current_entity; 197 if (entity) { 198 list_for_each_entry_continue(entity, &rq->entities, list) { 199 if (drm_sched_entity_is_ready(entity)) { 200 rq->current_entity = entity; 201 reinit_completion(&entity->entity_idle); 202 spin_unlock(&rq->lock); 203 return entity; 204 } 205 } 206 } 207 208 list_for_each_entry(entity, &rq->entities, list) { 209 210 if (drm_sched_entity_is_ready(entity)) { 211 rq->current_entity = entity; 212 reinit_completion(&entity->entity_idle); 213 spin_unlock(&rq->lock); 214 return entity; 215 } 216 217 if (entity == rq->current_entity) 218 break; 219 } 220 221 spin_unlock(&rq->lock); 222 223 return NULL; 224 } 225 226 /** 227 * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run 228 * 229 * @rq: scheduler run queue to check. 230 * 231 * Find oldest waiting ready entity, returns NULL if none found. 232 */ 233 static struct drm_sched_entity * 234 drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq) 235 { 236 struct rb_node *rb; 237 238 spin_lock(&rq->lock); 239 for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) { 240 struct drm_sched_entity *entity; 241 242 entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node); 243 if (drm_sched_entity_is_ready(entity)) { 244 rq->current_entity = entity; 245 reinit_completion(&entity->entity_idle); 246 break; 247 } 248 } 249 spin_unlock(&rq->lock); 250 251 return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL; 252 } 253 254 /** 255 * drm_sched_job_done - complete a job 256 * @s_job: pointer to the job which is done 257 * 258 * Finish the job's fence and wake up the worker thread. 259 */ 260 static void drm_sched_job_done(struct drm_sched_job *s_job) 261 { 262 struct drm_sched_fence *s_fence = s_job->s_fence; 263 struct drm_gpu_scheduler *sched = s_fence->sched; 264 265 atomic_dec(&sched->hw_rq_count); 266 atomic_dec(sched->score); 267 268 trace_drm_sched_process_job(s_fence); 269 270 dma_fence_get(&s_fence->finished); 271 drm_sched_fence_finished(s_fence); 272 dma_fence_put(&s_fence->finished); 273 wake_up_interruptible(&sched->wake_up_worker); 274 } 275 276 /** 277 * drm_sched_job_done_cb - the callback for a done job 278 * @f: fence 279 * @cb: fence callbacks 280 */ 281 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb) 282 { 283 struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb); 284 285 drm_sched_job_done(s_job); 286 } 287 288 /** 289 * drm_sched_start_timeout - start timeout for reset worker 290 * 291 * @sched: scheduler instance to start the worker for 292 * 293 * Start the timeout for the given scheduler. 294 */ 295 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) 296 { 297 if (sched->timeout != MAX_SCHEDULE_TIMEOUT && 298 !list_empty(&sched->pending_list)) 299 queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout); 300 } 301 302 /** 303 * drm_sched_fault - immediately start timeout handler 304 * 305 * @sched: scheduler where the timeout handling should be started. 306 * 307 * Start timeout handling immediately when the driver detects a hardware fault. 308 */ 309 void drm_sched_fault(struct drm_gpu_scheduler *sched) 310 { 311 if (sched->ready) 312 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0); 313 } 314 EXPORT_SYMBOL(drm_sched_fault); 315 316 /** 317 * drm_sched_suspend_timeout - Suspend scheduler job timeout 318 * 319 * @sched: scheduler instance for which to suspend the timeout 320 * 321 * Suspend the delayed work timeout for the scheduler. This is done by 322 * modifying the delayed work timeout to an arbitrary large value, 323 * MAX_SCHEDULE_TIMEOUT in this case. 324 * 325 * Returns the timeout remaining 326 * 327 */ 328 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched) 329 { 330 unsigned long sched_timeout, now = jiffies; 331 332 sched_timeout = sched->work_tdr.timer.expires; 333 334 /* 335 * Modify the timeout to an arbitrarily large value. This also prevents 336 * the timeout to be restarted when new submissions arrive 337 */ 338 if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT) 339 && time_after(sched_timeout, now)) 340 return sched_timeout - now; 341 else 342 return sched->timeout; 343 } 344 EXPORT_SYMBOL(drm_sched_suspend_timeout); 345 346 /** 347 * drm_sched_resume_timeout - Resume scheduler job timeout 348 * 349 * @sched: scheduler instance for which to resume the timeout 350 * @remaining: remaining timeout 351 * 352 * Resume the delayed work timeout for the scheduler. 353 */ 354 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, 355 unsigned long remaining) 356 { 357 spin_lock(&sched->job_list_lock); 358 359 if (list_empty(&sched->pending_list)) 360 cancel_delayed_work(&sched->work_tdr); 361 else 362 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining); 363 364 spin_unlock(&sched->job_list_lock); 365 } 366 EXPORT_SYMBOL(drm_sched_resume_timeout); 367 368 static void drm_sched_job_begin(struct drm_sched_job *s_job) 369 { 370 struct drm_gpu_scheduler *sched = s_job->sched; 371 372 spin_lock(&sched->job_list_lock); 373 list_add_tail(&s_job->list, &sched->pending_list); 374 drm_sched_start_timeout(sched); 375 spin_unlock(&sched->job_list_lock); 376 } 377 378 static void drm_sched_job_timedout(struct work_struct *work) 379 { 380 struct drm_gpu_scheduler *sched; 381 struct drm_sched_job *job; 382 enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL; 383 384 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); 385 386 /* Protects against concurrent deletion in drm_sched_get_cleanup_job */ 387 spin_lock(&sched->job_list_lock); 388 job = list_first_entry_or_null(&sched->pending_list, 389 struct drm_sched_job, list); 390 391 if (job) { 392 /* 393 * Remove the bad job so it cannot be freed by concurrent 394 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread 395 * is parked at which point it's safe. 396 */ 397 list_del_init(&job->list); 398 spin_unlock(&sched->job_list_lock); 399 400 status = job->sched->ops->timedout_job(job); 401 402 /* 403 * Guilty job did complete and hence needs to be manually removed 404 * See drm_sched_stop doc. 405 */ 406 if (sched->free_guilty) { 407 job->sched->ops->free_job(job); 408 sched->free_guilty = false; 409 } 410 } else { 411 spin_unlock(&sched->job_list_lock); 412 } 413 414 if (status != DRM_GPU_SCHED_STAT_ENODEV) { 415 spin_lock(&sched->job_list_lock); 416 drm_sched_start_timeout(sched); 417 spin_unlock(&sched->job_list_lock); 418 } 419 } 420 421 /** 422 * drm_sched_stop - stop the scheduler 423 * 424 * @sched: scheduler instance 425 * @bad: job which caused the time out 426 * 427 * Stop the scheduler and also removes and frees all completed jobs. 428 * Note: bad job will not be freed as it might be used later and so it's 429 * callers responsibility to release it manually if it's not part of the 430 * pending list any more. 431 * 432 */ 433 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) 434 { 435 struct drm_sched_job *s_job, *tmp; 436 437 kthread_park(sched->thread); 438 439 /* 440 * Reinsert back the bad job here - now it's safe as 441 * drm_sched_get_cleanup_job cannot race against us and release the 442 * bad job at this point - we parked (waited for) any in progress 443 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called 444 * now until the scheduler thread is unparked. 445 */ 446 if (bad && bad->sched == sched) 447 /* 448 * Add at the head of the queue to reflect it was the earliest 449 * job extracted. 450 */ 451 list_add(&bad->list, &sched->pending_list); 452 453 /* 454 * Iterate the job list from later to earlier one and either deactive 455 * their HW callbacks or remove them from pending list if they already 456 * signaled. 457 * This iteration is thread safe as sched thread is stopped. 458 */ 459 list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list, 460 list) { 461 if (s_job->s_fence->parent && 462 dma_fence_remove_callback(s_job->s_fence->parent, 463 &s_job->cb)) { 464 dma_fence_put(s_job->s_fence->parent); 465 s_job->s_fence->parent = NULL; 466 atomic_dec(&sched->hw_rq_count); 467 } else { 468 /* 469 * remove job from pending_list. 470 * Locking here is for concurrent resume timeout 471 */ 472 spin_lock(&sched->job_list_lock); 473 list_del_init(&s_job->list); 474 spin_unlock(&sched->job_list_lock); 475 476 /* 477 * Wait for job's HW fence callback to finish using s_job 478 * before releasing it. 479 * 480 * Job is still alive so fence refcount at least 1 481 */ 482 dma_fence_wait(&s_job->s_fence->finished, false); 483 484 /* 485 * We must keep bad job alive for later use during 486 * recovery by some of the drivers but leave a hint 487 * that the guilty job must be released. 488 */ 489 if (bad != s_job) 490 sched->ops->free_job(s_job); 491 else 492 sched->free_guilty = true; 493 } 494 } 495 496 /* 497 * Stop pending timer in flight as we rearm it in drm_sched_start. This 498 * avoids the pending timeout work in progress to fire right away after 499 * this TDR finished and before the newly restarted jobs had a 500 * chance to complete. 501 */ 502 cancel_delayed_work(&sched->work_tdr); 503 } 504 505 EXPORT_SYMBOL(drm_sched_stop); 506 507 /** 508 * drm_sched_start - recover jobs after a reset 509 * 510 * @sched: scheduler instance 511 * @full_recovery: proceed with complete sched restart 512 * 513 */ 514 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) 515 { 516 struct drm_sched_job *s_job, *tmp; 517 int r; 518 519 /* 520 * Locking the list is not required here as the sched thread is parked 521 * so no new jobs are being inserted or removed. Also concurrent 522 * GPU recovers can't run in parallel. 523 */ 524 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { 525 struct dma_fence *fence = s_job->s_fence->parent; 526 527 atomic_inc(&sched->hw_rq_count); 528 529 if (!full_recovery) 530 continue; 531 532 if (fence) { 533 r = dma_fence_add_callback(fence, &s_job->cb, 534 drm_sched_job_done_cb); 535 if (r == -ENOENT) 536 drm_sched_job_done(s_job); 537 else if (r) 538 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", 539 r); 540 } else 541 drm_sched_job_done(s_job); 542 } 543 544 if (full_recovery) { 545 spin_lock(&sched->job_list_lock); 546 drm_sched_start_timeout(sched); 547 spin_unlock(&sched->job_list_lock); 548 } 549 550 kthread_unpark(sched->thread); 551 } 552 EXPORT_SYMBOL(drm_sched_start); 553 554 /** 555 * drm_sched_resubmit_jobs - Deprecated, don't use in new code! 556 * 557 * @sched: scheduler instance 558 * 559 * Re-submitting jobs was a concept AMD came up as cheap way to implement 560 * recovery after a job timeout. 561 * 562 * This turned out to be not working very well. First of all there are many 563 * problem with the dma_fence implementation and requirements. Either the 564 * implementation is risking deadlocks with core memory management or violating 565 * documented implementation details of the dma_fence object. 566 * 567 * Drivers can still save and restore their state for recovery operations, but 568 * we shouldn't make this a general scheduler feature around the dma_fence 569 * interface. 570 */ 571 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) 572 { 573 struct drm_sched_job *s_job, *tmp; 574 uint64_t guilty_context; 575 bool found_guilty = false; 576 struct dma_fence *fence; 577 578 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { 579 struct drm_sched_fence *s_fence = s_job->s_fence; 580 581 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { 582 found_guilty = true; 583 guilty_context = s_job->s_fence->scheduled.context; 584 } 585 586 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) 587 dma_fence_set_error(&s_fence->finished, -ECANCELED); 588 589 fence = sched->ops->run_job(s_job); 590 591 if (IS_ERR_OR_NULL(fence)) { 592 if (IS_ERR(fence)) 593 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); 594 595 s_job->s_fence->parent = NULL; 596 } else { 597 598 s_job->s_fence->parent = dma_fence_get(fence); 599 600 /* Drop for orignal kref_init */ 601 dma_fence_put(fence); 602 } 603 } 604 } 605 EXPORT_SYMBOL(drm_sched_resubmit_jobs); 606 607 /** 608 * drm_sched_job_init - init a scheduler job 609 * @job: scheduler job to init 610 * @entity: scheduler entity to use 611 * @owner: job owner for debugging 612 * 613 * Refer to drm_sched_entity_push_job() documentation 614 * for locking considerations. 615 * 616 * Drivers must make sure drm_sched_job_cleanup() if this function returns 617 * successfully, even when @job is aborted before drm_sched_job_arm() is called. 618 * 619 * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware 620 * has died, which can mean that there's no valid runqueue for a @entity. 621 * This function returns -ENOENT in this case (which probably should be -EIO as 622 * a more meanigful return value). 623 * 624 * Returns 0 for success, negative error code otherwise. 625 */ 626 int drm_sched_job_init(struct drm_sched_job *job, 627 struct drm_sched_entity *entity, 628 void *owner) 629 { 630 if (!entity->rq) 631 return -ENOENT; 632 633 job->entity = entity; 634 job->s_fence = drm_sched_fence_alloc(entity, owner); 635 if (!job->s_fence) 636 return -ENOMEM; 637 638 INIT_LIST_HEAD(&job->list); 639 640 xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC); 641 642 return 0; 643 } 644 EXPORT_SYMBOL(drm_sched_job_init); 645 646 /** 647 * drm_sched_job_arm - arm a scheduler job for execution 648 * @job: scheduler job to arm 649 * 650 * This arms a scheduler job for execution. Specifically it initializes the 651 * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv 652 * or other places that need to track the completion of this job. 653 * 654 * Refer to drm_sched_entity_push_job() documentation for locking 655 * considerations. 656 * 657 * This can only be called if drm_sched_job_init() succeeded. 658 */ 659 void drm_sched_job_arm(struct drm_sched_job *job) 660 { 661 struct drm_gpu_scheduler *sched; 662 struct drm_sched_entity *entity = job->entity; 663 664 BUG_ON(!entity); 665 drm_sched_entity_select_rq(entity); 666 sched = entity->rq->sched; 667 668 job->sched = sched; 669 job->s_priority = entity->rq - sched->sched_rq; 670 job->id = atomic64_inc_return(&sched->job_id_count); 671 672 drm_sched_fence_init(job->s_fence, job->entity); 673 } 674 EXPORT_SYMBOL(drm_sched_job_arm); 675 676 /** 677 * drm_sched_job_add_dependency - adds the fence as a job dependency 678 * @job: scheduler job to add the dependencies to 679 * @fence: the dma_fence to add to the list of dependencies. 680 * 681 * Note that @fence is consumed in both the success and error cases. 682 * 683 * Returns: 684 * 0 on success, or an error on failing to expand the array. 685 */ 686 int drm_sched_job_add_dependency(struct drm_sched_job *job, 687 struct dma_fence *fence) 688 { 689 struct dma_fence *entry; 690 unsigned long index; 691 u32 id = 0; 692 int ret; 693 694 if (!fence) 695 return 0; 696 697 /* Deduplicate if we already depend on a fence from the same context. 698 * This lets the size of the array of deps scale with the number of 699 * engines involved, rather than the number of BOs. 700 */ 701 xa_for_each(&job->dependencies, index, entry) { 702 if (entry->context != fence->context) 703 continue; 704 705 if (dma_fence_is_later(fence, entry)) { 706 dma_fence_put(entry); 707 xa_store(&job->dependencies, index, fence, GFP_KERNEL); 708 } else { 709 dma_fence_put(fence); 710 } 711 return 0; 712 } 713 714 ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL); 715 if (ret != 0) 716 dma_fence_put(fence); 717 718 return ret; 719 } 720 EXPORT_SYMBOL(drm_sched_job_add_dependency); 721 722 /** 723 * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job 724 * @job: scheduler job to add the dependencies to 725 * @resv: the dma_resv object to get the fences from 726 * @usage: the dma_resv_usage to use to filter the fences 727 * 728 * This adds all fences matching the given usage from @resv to @job. 729 * Must be called with the @resv lock held. 730 * 731 * Returns: 732 * 0 on success, or an error on failing to expand the array. 733 */ 734 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, 735 struct dma_resv *resv, 736 enum dma_resv_usage usage) 737 { 738 struct dma_resv_iter cursor; 739 struct dma_fence *fence; 740 int ret; 741 742 dma_resv_assert_held(resv); 743 744 dma_resv_for_each_fence(&cursor, resv, usage, fence) { 745 /* Make sure to grab an additional ref on the added fence */ 746 dma_fence_get(fence); 747 ret = drm_sched_job_add_dependency(job, fence); 748 if (ret) { 749 dma_fence_put(fence); 750 return ret; 751 } 752 } 753 return 0; 754 } 755 EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies); 756 757 /** 758 * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job 759 * dependencies 760 * @job: scheduler job to add the dependencies to 761 * @obj: the gem object to add new dependencies from. 762 * @write: whether the job might write the object (so we need to depend on 763 * shared fences in the reservation object). 764 * 765 * This should be called after drm_gem_lock_reservations() on your array of 766 * GEM objects used in the job but before updating the reservations with your 767 * own fences. 768 * 769 * Returns: 770 * 0 on success, or an error on failing to expand the array. 771 */ 772 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, 773 struct drm_gem_object *obj, 774 bool write) 775 { 776 return drm_sched_job_add_resv_dependencies(job, obj->resv, 777 dma_resv_usage_rw(write)); 778 } 779 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies); 780 781 /** 782 * drm_sched_job_cleanup - clean up scheduler job resources 783 * @job: scheduler job to clean up 784 * 785 * Cleans up the resources allocated with drm_sched_job_init(). 786 * 787 * Drivers should call this from their error unwind code if @job is aborted 788 * before drm_sched_job_arm() is called. 789 * 790 * After that point of no return @job is committed to be executed by the 791 * scheduler, and this function should be called from the 792 * &drm_sched_backend_ops.free_job callback. 793 */ 794 void drm_sched_job_cleanup(struct drm_sched_job *job) 795 { 796 struct dma_fence *fence; 797 unsigned long index; 798 799 if (kref_read(&job->s_fence->finished.refcount)) { 800 /* drm_sched_job_arm() has been called */ 801 dma_fence_put(&job->s_fence->finished); 802 } else { 803 /* aborted job before committing to run it */ 804 drm_sched_fence_free(job->s_fence); 805 } 806 807 job->s_fence = NULL; 808 809 xa_for_each(&job->dependencies, index, fence) { 810 dma_fence_put(fence); 811 } 812 xa_destroy(&job->dependencies); 813 814 } 815 EXPORT_SYMBOL(drm_sched_job_cleanup); 816 817 /** 818 * drm_sched_ready - is the scheduler ready 819 * 820 * @sched: scheduler instance 821 * 822 * Return true if we can push more jobs to the hw, otherwise false. 823 */ 824 static bool drm_sched_ready(struct drm_gpu_scheduler *sched) 825 { 826 return atomic_read(&sched->hw_rq_count) < 827 sched->hw_submission_limit; 828 } 829 830 /** 831 * drm_sched_wakeup - Wake up the scheduler when it is ready 832 * 833 * @sched: scheduler instance 834 * 835 */ 836 void drm_sched_wakeup(struct drm_gpu_scheduler *sched) 837 { 838 if (drm_sched_ready(sched)) 839 wake_up_interruptible(&sched->wake_up_worker); 840 } 841 842 /** 843 * drm_sched_select_entity - Select next entity to process 844 * 845 * @sched: scheduler instance 846 * 847 * Returns the entity to process or NULL if none are found. 848 */ 849 static struct drm_sched_entity * 850 drm_sched_select_entity(struct drm_gpu_scheduler *sched) 851 { 852 struct drm_sched_entity *entity; 853 int i; 854 855 if (!drm_sched_ready(sched)) 856 return NULL; 857 858 /* Kernel run queue has higher priority than normal run queue*/ 859 for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { 860 entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ? 861 drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) : 862 drm_sched_rq_select_entity_rr(&sched->sched_rq[i]); 863 if (entity) 864 break; 865 } 866 867 return entity; 868 } 869 870 /** 871 * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed 872 * 873 * @sched: scheduler instance 874 * 875 * Returns the next finished job from the pending list (if there is one) 876 * ready for it to be destroyed. 877 */ 878 static struct drm_sched_job * 879 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) 880 { 881 struct drm_sched_job *job, *next; 882 883 spin_lock(&sched->job_list_lock); 884 885 job = list_first_entry_or_null(&sched->pending_list, 886 struct drm_sched_job, list); 887 888 if (job && dma_fence_is_signaled(&job->s_fence->finished)) { 889 /* remove job from pending_list */ 890 list_del_init(&job->list); 891 892 /* cancel this job's TO timer */ 893 cancel_delayed_work(&sched->work_tdr); 894 /* make the scheduled timestamp more accurate */ 895 next = list_first_entry_or_null(&sched->pending_list, 896 typeof(*next), list); 897 898 if (next) { 899 next->s_fence->scheduled.timestamp = 900 job->s_fence->finished.timestamp; 901 /* start TO timer for next job */ 902 drm_sched_start_timeout(sched); 903 } 904 } else { 905 job = NULL; 906 } 907 908 spin_unlock(&sched->job_list_lock); 909 910 return job; 911 } 912 913 /** 914 * drm_sched_pick_best - Get a drm sched from a sched_list with the least load 915 * @sched_list: list of drm_gpu_schedulers 916 * @num_sched_list: number of drm_gpu_schedulers in the sched_list 917 * 918 * Returns pointer of the sched with the least load or NULL if none of the 919 * drm_gpu_schedulers are ready 920 */ 921 struct drm_gpu_scheduler * 922 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, 923 unsigned int num_sched_list) 924 { 925 struct drm_gpu_scheduler *sched, *picked_sched = NULL; 926 int i; 927 unsigned int min_score = UINT_MAX, num_score; 928 929 for (i = 0; i < num_sched_list; ++i) { 930 sched = sched_list[i]; 931 932 if (!sched->ready) { 933 DRM_WARN("scheduler %s is not ready, skipping", 934 sched->name); 935 continue; 936 } 937 938 num_score = atomic_read(sched->score); 939 if (num_score < min_score) { 940 min_score = num_score; 941 picked_sched = sched; 942 } 943 } 944 945 return picked_sched; 946 } 947 EXPORT_SYMBOL(drm_sched_pick_best); 948 949 /** 950 * drm_sched_blocked - check if the scheduler is blocked 951 * 952 * @sched: scheduler instance 953 * 954 * Returns true if blocked, otherwise false. 955 */ 956 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched) 957 { 958 if (kthread_should_park()) { 959 kthread_parkme(); 960 return true; 961 } 962 963 return false; 964 } 965 966 /** 967 * drm_sched_main - main scheduler thread 968 * 969 * @param: scheduler instance 970 * 971 * Returns 0. 972 */ 973 static int drm_sched_main(void *param) 974 { 975 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; 976 int r; 977 978 sched_set_fifo_low(current); 979 980 while (!kthread_should_stop()) { 981 struct drm_sched_entity *entity = NULL; 982 struct drm_sched_fence *s_fence; 983 struct drm_sched_job *sched_job; 984 struct dma_fence *fence; 985 struct drm_sched_job *cleanup_job = NULL; 986 987 wait_event_interruptible(sched->wake_up_worker, 988 (cleanup_job = drm_sched_get_cleanup_job(sched)) || 989 (!drm_sched_blocked(sched) && 990 (entity = drm_sched_select_entity(sched))) || 991 kthread_should_stop()); 992 993 if (cleanup_job) 994 sched->ops->free_job(cleanup_job); 995 996 if (!entity) 997 continue; 998 999 sched_job = drm_sched_entity_pop_job(entity); 1000 1001 if (!sched_job) { 1002 complete_all(&entity->entity_idle); 1003 continue; 1004 } 1005 1006 s_fence = sched_job->s_fence; 1007 1008 atomic_inc(&sched->hw_rq_count); 1009 drm_sched_job_begin(sched_job); 1010 1011 trace_drm_run_job(sched_job, entity); 1012 fence = sched->ops->run_job(sched_job); 1013 complete_all(&entity->entity_idle); 1014 drm_sched_fence_scheduled(s_fence); 1015 1016 if (!IS_ERR_OR_NULL(fence)) { 1017 s_fence->parent = dma_fence_get(fence); 1018 /* Drop for original kref_init of the fence */ 1019 dma_fence_put(fence); 1020 1021 r = dma_fence_add_callback(fence, &sched_job->cb, 1022 drm_sched_job_done_cb); 1023 if (r == -ENOENT) 1024 drm_sched_job_done(sched_job); 1025 else if (r) 1026 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", 1027 r); 1028 } else { 1029 if (IS_ERR(fence)) 1030 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); 1031 1032 drm_sched_job_done(sched_job); 1033 } 1034 1035 wake_up(&sched->job_scheduled); 1036 } 1037 return 0; 1038 } 1039 1040 /** 1041 * drm_sched_init - Init a gpu scheduler instance 1042 * 1043 * @sched: scheduler instance 1044 * @ops: backend operations for this scheduler 1045 * @hw_submission: number of hw submissions that can be in flight 1046 * @hang_limit: number of times to allow a job to hang before dropping it 1047 * @timeout: timeout value in jiffies for the scheduler 1048 * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is 1049 * used 1050 * @score: optional score atomic shared with other schedulers 1051 * @name: name used for debugging 1052 * @dev: target &struct device 1053 * 1054 * Return 0 on success, otherwise error code. 1055 */ 1056 int drm_sched_init(struct drm_gpu_scheduler *sched, 1057 const struct drm_sched_backend_ops *ops, 1058 unsigned hw_submission, unsigned hang_limit, 1059 long timeout, struct workqueue_struct *timeout_wq, 1060 atomic_t *score, const char *name, struct device *dev) 1061 { 1062 int i, ret; 1063 sched->ops = ops; 1064 sched->hw_submission_limit = hw_submission; 1065 sched->name = name; 1066 sched->timeout = timeout; 1067 sched->timeout_wq = timeout_wq ? : system_wq; 1068 sched->hang_limit = hang_limit; 1069 sched->score = score ? score : &sched->_score; 1070 sched->dev = dev; 1071 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++) 1072 drm_sched_rq_init(sched, &sched->sched_rq[i]); 1073 1074 init_waitqueue_head(&sched->wake_up_worker); 1075 init_waitqueue_head(&sched->job_scheduled); 1076 INIT_LIST_HEAD(&sched->pending_list); 1077 spin_lock_init(&sched->job_list_lock); 1078 atomic_set(&sched->hw_rq_count, 0); 1079 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); 1080 atomic_set(&sched->_score, 0); 1081 atomic64_set(&sched->job_id_count, 0); 1082 1083 /* Each scheduler will run on a seperate kernel thread */ 1084 sched->thread = kthread_run(drm_sched_main, sched, sched->name); 1085 if (IS_ERR(sched->thread)) { 1086 ret = PTR_ERR(sched->thread); 1087 sched->thread = NULL; 1088 DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name); 1089 return ret; 1090 } 1091 1092 sched->ready = true; 1093 return 0; 1094 } 1095 EXPORT_SYMBOL(drm_sched_init); 1096 1097 /** 1098 * drm_sched_fini - Destroy a gpu scheduler 1099 * 1100 * @sched: scheduler instance 1101 * 1102 * Tears down and cleans up the scheduler. 1103 */ 1104 void drm_sched_fini(struct drm_gpu_scheduler *sched) 1105 { 1106 struct drm_sched_entity *s_entity; 1107 int i; 1108 1109 if (sched->thread) 1110 kthread_stop(sched->thread); 1111 1112 for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { 1113 struct drm_sched_rq *rq = &sched->sched_rq[i]; 1114 1115 if (!rq) 1116 continue; 1117 1118 spin_lock(&rq->lock); 1119 list_for_each_entry(s_entity, &rq->entities, list) 1120 /* 1121 * Prevents reinsertion and marks job_queue as idle, 1122 * it will removed from rq in drm_sched_entity_fini 1123 * eventually 1124 */ 1125 s_entity->stopped = true; 1126 spin_unlock(&rq->lock); 1127 1128 } 1129 1130 /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */ 1131 wake_up_all(&sched->job_scheduled); 1132 1133 /* Confirm no work left behind accessing device structures */ 1134 cancel_delayed_work_sync(&sched->work_tdr); 1135 1136 sched->ready = false; 1137 } 1138 EXPORT_SYMBOL(drm_sched_fini); 1139 1140 /** 1141 * drm_sched_increase_karma - Update sched_entity guilty flag 1142 * 1143 * @bad: The job guilty of time out 1144 * 1145 * Increment on every hang caused by the 'bad' job. If this exceeds the hang 1146 * limit of the scheduler then the respective sched entity is marked guilty and 1147 * jobs from it will not be scheduled further 1148 */ 1149 void drm_sched_increase_karma(struct drm_sched_job *bad) 1150 { 1151 int i; 1152 struct drm_sched_entity *tmp; 1153 struct drm_sched_entity *entity; 1154 struct drm_gpu_scheduler *sched = bad->sched; 1155 1156 /* don't change @bad's karma if it's from KERNEL RQ, 1157 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs) 1158 * corrupt but keep in mind that kernel jobs always considered good. 1159 */ 1160 if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) { 1161 atomic_inc(&bad->karma); 1162 1163 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; 1164 i++) { 1165 struct drm_sched_rq *rq = &sched->sched_rq[i]; 1166 1167 spin_lock(&rq->lock); 1168 list_for_each_entry_safe(entity, tmp, &rq->entities, list) { 1169 if (bad->s_fence->scheduled.context == 1170 entity->fence_context) { 1171 if (entity->guilty) 1172 atomic_set(entity->guilty, 1); 1173 break; 1174 } 1175 } 1176 spin_unlock(&rq->lock); 1177 if (&entity->list != &rq->entities) 1178 break; 1179 } 1180 } 1181 } 1182 EXPORT_SYMBOL(drm_sched_increase_karma); 1183