1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 /** 25 * DOC: Overview 26 * 27 * The GPU scheduler provides entities which allow userspace to push jobs 28 * into software queues which are then scheduled on a hardware run queue. 29 * The software queues have a priority among them. The scheduler selects the entities 30 * from the run queue using a FIFO. The scheduler provides dependency handling 31 * features among jobs. The driver is supposed to provide callback functions for 32 * backend operations to the scheduler like submitting a job to hardware run queue, 33 * returning the dependencies of a job etc. 34 * 35 * The organisation of the scheduler is the following: 36 * 37 * 1. Each hw run queue has one scheduler 38 * 2. Each scheduler has multiple run queues with different priorities 39 * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL) 40 * 3. Each scheduler run queue has a queue of entities to schedule 41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on 42 * the hardware. 43 * 44 * The jobs in a entity are always scheduled in the order that they were pushed. 45 */ 46 47 #include <linux/kthread.h> 48 #include <linux/wait.h> 49 #include <linux/sched.h> 50 #include <uapi/linux/sched/types.h> 51 #include <drm/drmP.h> 52 #include <drm/gpu_scheduler.h> 53 #include <drm/spsc_queue.h> 54 55 #define CREATE_TRACE_POINTS 56 #include "gpu_scheduler_trace.h" 57 58 #define to_drm_sched_job(sched_job) \ 59 container_of((sched_job), struct drm_sched_job, queue_node) 60 61 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); 62 63 /** 64 * drm_sched_rq_init - initialize a given run queue struct 65 * 66 * @rq: scheduler run queue 67 * 68 * Initializes a scheduler runqueue. 69 */ 70 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, 71 struct drm_sched_rq *rq) 72 { 73 spin_lock_init(&rq->lock); 74 INIT_LIST_HEAD(&rq->entities); 75 rq->current_entity = NULL; 76 rq->sched = sched; 77 } 78 79 /** 80 * drm_sched_rq_add_entity - add an entity 81 * 82 * @rq: scheduler run queue 83 * @entity: scheduler entity 84 * 85 * Adds a scheduler entity to the run queue. 86 */ 87 void drm_sched_rq_add_entity(struct drm_sched_rq *rq, 88 struct drm_sched_entity *entity) 89 { 90 if (!list_empty(&entity->list)) 91 return; 92 spin_lock(&rq->lock); 93 list_add_tail(&entity->list, &rq->entities); 94 spin_unlock(&rq->lock); 95 } 96 97 /** 98 * drm_sched_rq_remove_entity - remove an entity 99 * 100 * @rq: scheduler run queue 101 * @entity: scheduler entity 102 * 103 * Removes a scheduler entity from the run queue. 104 */ 105 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, 106 struct drm_sched_entity *entity) 107 { 108 if (list_empty(&entity->list)) 109 return; 110 spin_lock(&rq->lock); 111 list_del_init(&entity->list); 112 if (rq->current_entity == entity) 113 rq->current_entity = NULL; 114 spin_unlock(&rq->lock); 115 } 116 117 /** 118 * drm_sched_rq_select_entity - Select an entity which could provide a job to run 119 * 120 * @rq: scheduler run queue to check. 121 * 122 * Try to find a ready entity, returns NULL if none found. 123 */ 124 static struct drm_sched_entity * 125 drm_sched_rq_select_entity(struct drm_sched_rq *rq) 126 { 127 struct drm_sched_entity *entity; 128 129 spin_lock(&rq->lock); 130 131 entity = rq->current_entity; 132 if (entity) { 133 list_for_each_entry_continue(entity, &rq->entities, list) { 134 if (drm_sched_entity_is_ready(entity)) { 135 rq->current_entity = entity; 136 spin_unlock(&rq->lock); 137 return entity; 138 } 139 } 140 } 141 142 list_for_each_entry(entity, &rq->entities, list) { 143 144 if (drm_sched_entity_is_ready(entity)) { 145 rq->current_entity = entity; 146 spin_unlock(&rq->lock); 147 return entity; 148 } 149 150 if (entity == rq->current_entity) 151 break; 152 } 153 154 spin_unlock(&rq->lock); 155 156 return NULL; 157 } 158 159 /** 160 * drm_sched_dependency_optimized 161 * 162 * @fence: the dependency fence 163 * @entity: the entity which depends on the above fence 164 * 165 * Returns true if the dependency can be optimized and false otherwise 166 */ 167 bool drm_sched_dependency_optimized(struct dma_fence* fence, 168 struct drm_sched_entity *entity) 169 { 170 struct drm_gpu_scheduler *sched = entity->rq->sched; 171 struct drm_sched_fence *s_fence; 172 173 if (!fence || dma_fence_is_signaled(fence)) 174 return false; 175 if (fence->context == entity->fence_context) 176 return true; 177 s_fence = to_drm_sched_fence(fence); 178 if (s_fence && s_fence->sched == sched) 179 return true; 180 181 return false; 182 } 183 EXPORT_SYMBOL(drm_sched_dependency_optimized); 184 185 /** 186 * drm_sched_start_timeout - start timeout for reset worker 187 * 188 * @sched: scheduler instance to start the worker for 189 * 190 * Start the timeout for the given scheduler. 191 */ 192 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) 193 { 194 if (sched->timeout != MAX_SCHEDULE_TIMEOUT && 195 !list_empty(&sched->ring_mirror_list)) 196 schedule_delayed_work(&sched->work_tdr, sched->timeout); 197 } 198 199 /** 200 * drm_sched_fault - immediately start timeout handler 201 * 202 * @sched: scheduler where the timeout handling should be started. 203 * 204 * Start timeout handling immediately when the driver detects a hardware fault. 205 */ 206 void drm_sched_fault(struct drm_gpu_scheduler *sched) 207 { 208 mod_delayed_work(system_wq, &sched->work_tdr, 0); 209 } 210 EXPORT_SYMBOL(drm_sched_fault); 211 212 /** 213 * drm_sched_suspend_timeout - Suspend scheduler job timeout 214 * 215 * @sched: scheduler instance for which to suspend the timeout 216 * 217 * Suspend the delayed work timeout for the scheduler. This is done by 218 * modifying the delayed work timeout to an arbitrary large value, 219 * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be 220 * called from an IRQ context. 221 * 222 * Returns the timeout remaining 223 * 224 */ 225 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched) 226 { 227 unsigned long sched_timeout, now = jiffies; 228 229 sched_timeout = sched->work_tdr.timer.expires; 230 231 /* 232 * Modify the timeout to an arbitrarily large value. This also prevents 233 * the timeout to be restarted when new submissions arrive 234 */ 235 if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT) 236 && time_after(sched_timeout, now)) 237 return sched_timeout - now; 238 else 239 return sched->timeout; 240 } 241 EXPORT_SYMBOL(drm_sched_suspend_timeout); 242 243 /** 244 * drm_sched_resume_timeout - Resume scheduler job timeout 245 * 246 * @sched: scheduler instance for which to resume the timeout 247 * @remaining: remaining timeout 248 * 249 * Resume the delayed work timeout for the scheduler. Note that 250 * this function can be called from an IRQ context. 251 */ 252 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, 253 unsigned long remaining) 254 { 255 unsigned long flags; 256 257 spin_lock_irqsave(&sched->job_list_lock, flags); 258 259 if (list_empty(&sched->ring_mirror_list)) 260 cancel_delayed_work(&sched->work_tdr); 261 else 262 mod_delayed_work(system_wq, &sched->work_tdr, remaining); 263 264 spin_unlock_irqrestore(&sched->job_list_lock, flags); 265 } 266 EXPORT_SYMBOL(drm_sched_resume_timeout); 267 268 static void drm_sched_job_begin(struct drm_sched_job *s_job) 269 { 270 struct drm_gpu_scheduler *sched = s_job->sched; 271 unsigned long flags; 272 273 spin_lock_irqsave(&sched->job_list_lock, flags); 274 list_add_tail(&s_job->node, &sched->ring_mirror_list); 275 drm_sched_start_timeout(sched); 276 spin_unlock_irqrestore(&sched->job_list_lock, flags); 277 } 278 279 static void drm_sched_job_timedout(struct work_struct *work) 280 { 281 struct drm_gpu_scheduler *sched; 282 struct drm_sched_job *job; 283 unsigned long flags; 284 285 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); 286 job = list_first_entry_or_null(&sched->ring_mirror_list, 287 struct drm_sched_job, node); 288 289 if (job) { 290 job->sched->ops->timedout_job(job); 291 292 /* 293 * Guilty job did complete and hence needs to be manually removed 294 * See drm_sched_stop doc. 295 */ 296 if (sched->free_guilty) { 297 job->sched->ops->free_job(job); 298 sched->free_guilty = false; 299 } 300 } 301 302 spin_lock_irqsave(&sched->job_list_lock, flags); 303 drm_sched_start_timeout(sched); 304 spin_unlock_irqrestore(&sched->job_list_lock, flags); 305 } 306 307 /** 308 * drm_sched_increase_karma - Update sched_entity guilty flag 309 * 310 * @bad: The job guilty of time out 311 * 312 * Increment on every hang caused by the 'bad' job. If this exceeds the hang 313 * limit of the scheduler then the respective sched entity is marked guilty and 314 * jobs from it will not be scheduled further 315 */ 316 void drm_sched_increase_karma(struct drm_sched_job *bad) 317 { 318 int i; 319 struct drm_sched_entity *tmp; 320 struct drm_sched_entity *entity; 321 struct drm_gpu_scheduler *sched = bad->sched; 322 323 /* don't increase @bad's karma if it's from KERNEL RQ, 324 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs) 325 * corrupt but keep in mind that kernel jobs always considered good. 326 */ 327 if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) { 328 atomic_inc(&bad->karma); 329 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; 330 i++) { 331 struct drm_sched_rq *rq = &sched->sched_rq[i]; 332 333 spin_lock(&rq->lock); 334 list_for_each_entry_safe(entity, tmp, &rq->entities, list) { 335 if (bad->s_fence->scheduled.context == 336 entity->fence_context) { 337 if (atomic_read(&bad->karma) > 338 bad->sched->hang_limit) 339 if (entity->guilty) 340 atomic_set(entity->guilty, 1); 341 break; 342 } 343 } 344 spin_unlock(&rq->lock); 345 if (&entity->list != &rq->entities) 346 break; 347 } 348 } 349 } 350 EXPORT_SYMBOL(drm_sched_increase_karma); 351 352 /** 353 * drm_sched_stop - stop the scheduler 354 * 355 * @sched: scheduler instance 356 * @bad: job which caused the time out 357 * 358 * Stop the scheduler and also removes and frees all completed jobs. 359 * Note: bad job will not be freed as it might be used later and so it's 360 * callers responsibility to release it manually if it's not part of the 361 * mirror list any more. 362 * 363 */ 364 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) 365 { 366 struct drm_sched_job *s_job, *tmp; 367 unsigned long flags; 368 369 kthread_park(sched->thread); 370 371 /* 372 * Iterate the job list from later to earlier one and either deactive 373 * their HW callbacks or remove them from mirror list if they already 374 * signaled. 375 * This iteration is thread safe as sched thread is stopped. 376 */ 377 list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) { 378 if (s_job->s_fence->parent && 379 dma_fence_remove_callback(s_job->s_fence->parent, 380 &s_job->cb)) { 381 atomic_dec(&sched->hw_rq_count); 382 } else { 383 /* 384 * remove job from ring_mirror_list. 385 * Locking here is for concurrent resume timeout 386 */ 387 spin_lock_irqsave(&sched->job_list_lock, flags); 388 list_del_init(&s_job->node); 389 spin_unlock_irqrestore(&sched->job_list_lock, flags); 390 391 /* 392 * Wait for job's HW fence callback to finish using s_job 393 * before releasing it. 394 * 395 * Job is still alive so fence refcount at least 1 396 */ 397 dma_fence_wait(&s_job->s_fence->finished, false); 398 399 /* 400 * We must keep bad job alive for later use during 401 * recovery by some of the drivers but leave a hint 402 * that the guilty job must be released. 403 */ 404 if (bad != s_job) 405 sched->ops->free_job(s_job); 406 else 407 sched->free_guilty = true; 408 } 409 } 410 411 /* 412 * Stop pending timer in flight as we rearm it in drm_sched_start. This 413 * avoids the pending timeout work in progress to fire right away after 414 * this TDR finished and before the newly restarted jobs had a 415 * chance to complete. 416 */ 417 cancel_delayed_work(&sched->work_tdr); 418 } 419 420 EXPORT_SYMBOL(drm_sched_stop); 421 422 /** 423 * drm_sched_job_recovery - recover jobs after a reset 424 * 425 * @sched: scheduler instance 426 * @full_recovery: proceed with complete sched restart 427 * 428 */ 429 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) 430 { 431 struct drm_sched_job *s_job, *tmp; 432 unsigned long flags; 433 int r; 434 435 /* 436 * Locking the list is not required here as the sched thread is parked 437 * so no new jobs are being inserted or removed. Also concurrent 438 * GPU recovers can't run in parallel. 439 */ 440 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { 441 struct dma_fence *fence = s_job->s_fence->parent; 442 443 atomic_inc(&sched->hw_rq_count); 444 445 if (!full_recovery) 446 continue; 447 448 if (fence) { 449 r = dma_fence_add_callback(fence, &s_job->cb, 450 drm_sched_process_job); 451 if (r == -ENOENT) 452 drm_sched_process_job(fence, &s_job->cb); 453 else if (r) 454 DRM_ERROR("fence add callback failed (%d)\n", 455 r); 456 } else 457 drm_sched_process_job(NULL, &s_job->cb); 458 } 459 460 if (full_recovery) { 461 spin_lock_irqsave(&sched->job_list_lock, flags); 462 drm_sched_start_timeout(sched); 463 spin_unlock_irqrestore(&sched->job_list_lock, flags); 464 } 465 466 kthread_unpark(sched->thread); 467 } 468 EXPORT_SYMBOL(drm_sched_start); 469 470 /** 471 * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list 472 * 473 * @sched: scheduler instance 474 * 475 */ 476 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) 477 { 478 struct drm_sched_job *s_job, *tmp; 479 uint64_t guilty_context; 480 bool found_guilty = false; 481 482 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { 483 struct drm_sched_fence *s_fence = s_job->s_fence; 484 485 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { 486 found_guilty = true; 487 guilty_context = s_job->s_fence->scheduled.context; 488 } 489 490 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) 491 dma_fence_set_error(&s_fence->finished, -ECANCELED); 492 493 dma_fence_put(s_job->s_fence->parent); 494 s_job->s_fence->parent = sched->ops->run_job(s_job); 495 } 496 } 497 EXPORT_SYMBOL(drm_sched_resubmit_jobs); 498 499 /** 500 * drm_sched_job_init - init a scheduler job 501 * 502 * @job: scheduler job to init 503 * @entity: scheduler entity to use 504 * @owner: job owner for debugging 505 * 506 * Refer to drm_sched_entity_push_job() documentation 507 * for locking considerations. 508 * 509 * Returns 0 for success, negative error code otherwise. 510 */ 511 int drm_sched_job_init(struct drm_sched_job *job, 512 struct drm_sched_entity *entity, 513 void *owner) 514 { 515 struct drm_gpu_scheduler *sched; 516 517 drm_sched_entity_select_rq(entity); 518 if (!entity->rq) 519 return -ENOENT; 520 521 sched = entity->rq->sched; 522 523 job->sched = sched; 524 job->entity = entity; 525 job->s_priority = entity->rq - sched->sched_rq; 526 job->s_fence = drm_sched_fence_create(entity, owner); 527 if (!job->s_fence) 528 return -ENOMEM; 529 job->id = atomic64_inc_return(&sched->job_id_count); 530 531 INIT_LIST_HEAD(&job->node); 532 533 return 0; 534 } 535 EXPORT_SYMBOL(drm_sched_job_init); 536 537 /** 538 * drm_sched_job_cleanup - clean up scheduler job resources 539 * 540 * @job: scheduler job to clean up 541 */ 542 void drm_sched_job_cleanup(struct drm_sched_job *job) 543 { 544 dma_fence_put(&job->s_fence->finished); 545 job->s_fence = NULL; 546 } 547 EXPORT_SYMBOL(drm_sched_job_cleanup); 548 549 /** 550 * drm_sched_ready - is the scheduler ready 551 * 552 * @sched: scheduler instance 553 * 554 * Return true if we can push more jobs to the hw, otherwise false. 555 */ 556 static bool drm_sched_ready(struct drm_gpu_scheduler *sched) 557 { 558 return atomic_read(&sched->hw_rq_count) < 559 sched->hw_submission_limit; 560 } 561 562 /** 563 * drm_sched_wakeup - Wake up the scheduler when it is ready 564 * 565 * @sched: scheduler instance 566 * 567 */ 568 void drm_sched_wakeup(struct drm_gpu_scheduler *sched) 569 { 570 if (drm_sched_ready(sched)) 571 wake_up_interruptible(&sched->wake_up_worker); 572 } 573 574 /** 575 * drm_sched_select_entity - Select next entity to process 576 * 577 * @sched: scheduler instance 578 * 579 * Returns the entity to process or NULL if none are found. 580 */ 581 static struct drm_sched_entity * 582 drm_sched_select_entity(struct drm_gpu_scheduler *sched) 583 { 584 struct drm_sched_entity *entity; 585 int i; 586 587 if (!drm_sched_ready(sched)) 588 return NULL; 589 590 /* Kernel run queue has higher priority than normal run queue*/ 591 for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { 592 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]); 593 if (entity) 594 break; 595 } 596 597 return entity; 598 } 599 600 /** 601 * drm_sched_process_job - process a job 602 * 603 * @f: fence 604 * @cb: fence callbacks 605 * 606 * Called after job has finished execution. 607 */ 608 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) 609 { 610 struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb); 611 struct drm_sched_fence *s_fence = s_job->s_fence; 612 struct drm_gpu_scheduler *sched = s_fence->sched; 613 614 atomic_dec(&sched->hw_rq_count); 615 atomic_dec(&sched->num_jobs); 616 617 trace_drm_sched_process_job(s_fence); 618 619 drm_sched_fence_finished(s_fence); 620 wake_up_interruptible(&sched->wake_up_worker); 621 } 622 623 /** 624 * drm_sched_cleanup_jobs - destroy finished jobs 625 * 626 * @sched: scheduler instance 627 * 628 * Remove all finished jobs from the mirror list and destroy them. 629 */ 630 static void drm_sched_cleanup_jobs(struct drm_gpu_scheduler *sched) 631 { 632 unsigned long flags; 633 634 /* Don't destroy jobs while the timeout worker is running */ 635 if (sched->timeout != MAX_SCHEDULE_TIMEOUT && 636 !cancel_delayed_work(&sched->work_tdr)) 637 return; 638 639 640 while (!list_empty(&sched->ring_mirror_list)) { 641 struct drm_sched_job *job; 642 643 job = list_first_entry(&sched->ring_mirror_list, 644 struct drm_sched_job, node); 645 if (!dma_fence_is_signaled(&job->s_fence->finished)) 646 break; 647 648 spin_lock_irqsave(&sched->job_list_lock, flags); 649 /* remove job from ring_mirror_list */ 650 list_del_init(&job->node); 651 spin_unlock_irqrestore(&sched->job_list_lock, flags); 652 653 sched->ops->free_job(job); 654 } 655 656 /* queue timeout for next job */ 657 spin_lock_irqsave(&sched->job_list_lock, flags); 658 drm_sched_start_timeout(sched); 659 spin_unlock_irqrestore(&sched->job_list_lock, flags); 660 661 } 662 663 /** 664 * drm_sched_blocked - check if the scheduler is blocked 665 * 666 * @sched: scheduler instance 667 * 668 * Returns true if blocked, otherwise false. 669 */ 670 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched) 671 { 672 if (kthread_should_park()) { 673 kthread_parkme(); 674 return true; 675 } 676 677 return false; 678 } 679 680 /** 681 * drm_sched_main - main scheduler thread 682 * 683 * @param: scheduler instance 684 * 685 * Returns 0. 686 */ 687 static int drm_sched_main(void *param) 688 { 689 struct sched_param sparam = {.sched_priority = 1}; 690 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; 691 int r; 692 693 sched_setscheduler(current, SCHED_FIFO, &sparam); 694 695 while (!kthread_should_stop()) { 696 struct drm_sched_entity *entity = NULL; 697 struct drm_sched_fence *s_fence; 698 struct drm_sched_job *sched_job; 699 struct dma_fence *fence; 700 701 wait_event_interruptible(sched->wake_up_worker, 702 (drm_sched_cleanup_jobs(sched), 703 (!drm_sched_blocked(sched) && 704 (entity = drm_sched_select_entity(sched))) || 705 kthread_should_stop())); 706 707 if (!entity) 708 continue; 709 710 sched_job = drm_sched_entity_pop_job(entity); 711 if (!sched_job) 712 continue; 713 714 s_fence = sched_job->s_fence; 715 716 atomic_inc(&sched->hw_rq_count); 717 drm_sched_job_begin(sched_job); 718 719 fence = sched->ops->run_job(sched_job); 720 drm_sched_fence_scheduled(s_fence); 721 722 if (fence) { 723 s_fence->parent = dma_fence_get(fence); 724 r = dma_fence_add_callback(fence, &sched_job->cb, 725 drm_sched_process_job); 726 if (r == -ENOENT) 727 drm_sched_process_job(fence, &sched_job->cb); 728 else if (r) 729 DRM_ERROR("fence add callback failed (%d)\n", 730 r); 731 dma_fence_put(fence); 732 } else 733 drm_sched_process_job(NULL, &sched_job->cb); 734 735 wake_up(&sched->job_scheduled); 736 } 737 return 0; 738 } 739 740 /** 741 * drm_sched_init - Init a gpu scheduler instance 742 * 743 * @sched: scheduler instance 744 * @ops: backend operations for this scheduler 745 * @hw_submission: number of hw submissions that can be in flight 746 * @hang_limit: number of times to allow a job to hang before dropping it 747 * @timeout: timeout value in jiffies for the scheduler 748 * @name: name used for debugging 749 * 750 * Return 0 on success, otherwise error code. 751 */ 752 int drm_sched_init(struct drm_gpu_scheduler *sched, 753 const struct drm_sched_backend_ops *ops, 754 unsigned hw_submission, 755 unsigned hang_limit, 756 long timeout, 757 const char *name) 758 { 759 int i, ret; 760 sched->ops = ops; 761 sched->hw_submission_limit = hw_submission; 762 sched->name = name; 763 sched->timeout = timeout; 764 sched->hang_limit = hang_limit; 765 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++) 766 drm_sched_rq_init(sched, &sched->sched_rq[i]); 767 768 init_waitqueue_head(&sched->wake_up_worker); 769 init_waitqueue_head(&sched->job_scheduled); 770 INIT_LIST_HEAD(&sched->ring_mirror_list); 771 spin_lock_init(&sched->job_list_lock); 772 atomic_set(&sched->hw_rq_count, 0); 773 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); 774 atomic_set(&sched->num_jobs, 0); 775 atomic64_set(&sched->job_id_count, 0); 776 777 /* Each scheduler will run on a seperate kernel thread */ 778 sched->thread = kthread_run(drm_sched_main, sched, sched->name); 779 if (IS_ERR(sched->thread)) { 780 ret = PTR_ERR(sched->thread); 781 sched->thread = NULL; 782 DRM_ERROR("Failed to create scheduler for %s.\n", name); 783 return ret; 784 } 785 786 sched->ready = true; 787 return 0; 788 } 789 EXPORT_SYMBOL(drm_sched_init); 790 791 /** 792 * drm_sched_fini - Destroy a gpu scheduler 793 * 794 * @sched: scheduler instance 795 * 796 * Tears down and cleans up the scheduler. 797 */ 798 void drm_sched_fini(struct drm_gpu_scheduler *sched) 799 { 800 if (sched->thread) 801 kthread_stop(sched->thread); 802 803 sched->ready = false; 804 } 805 EXPORT_SYMBOL(drm_sched_fini); 806