Lines Matching refs:sched
127 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, in drm_sched_rq_init() argument
134 rq->sched = sched; in drm_sched_rq_init()
153 atomic_inc(rq->sched->score); in drm_sched_rq_add_entity()
175 atomic_dec(rq->sched->score); in drm_sched_rq_remove_entity()
268 struct drm_gpu_scheduler *sched = s_fence->sched; in drm_sched_job_done() local
270 atomic_dec(&sched->hw_rq_count); in drm_sched_job_done()
271 atomic_dec(sched->score); in drm_sched_job_done()
278 wake_up_interruptible(&sched->wake_up_worker); in drm_sched_job_done()
300 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) in drm_sched_start_timeout() argument
302 if (sched->timeout != MAX_SCHEDULE_TIMEOUT && in drm_sched_start_timeout()
303 !list_empty(&sched->pending_list)) in drm_sched_start_timeout()
304 queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout); in drm_sched_start_timeout()
314 void drm_sched_fault(struct drm_gpu_scheduler *sched) in drm_sched_fault() argument
316 if (sched->timeout_wq) in drm_sched_fault()
317 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0); in drm_sched_fault()
333 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched) in drm_sched_suspend_timeout() argument
337 sched_timeout = sched->work_tdr.timer.expires; in drm_sched_suspend_timeout()
343 if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT) in drm_sched_suspend_timeout()
347 return sched->timeout; in drm_sched_suspend_timeout()
359 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, in drm_sched_resume_timeout() argument
362 spin_lock(&sched->job_list_lock); in drm_sched_resume_timeout()
364 if (list_empty(&sched->pending_list)) in drm_sched_resume_timeout()
365 cancel_delayed_work(&sched->work_tdr); in drm_sched_resume_timeout()
367 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining); in drm_sched_resume_timeout()
369 spin_unlock(&sched->job_list_lock); in drm_sched_resume_timeout()
375 struct drm_gpu_scheduler *sched = s_job->sched; in drm_sched_job_begin() local
377 spin_lock(&sched->job_list_lock); in drm_sched_job_begin()
378 list_add_tail(&s_job->list, &sched->pending_list); in drm_sched_job_begin()
379 drm_sched_start_timeout(sched); in drm_sched_job_begin()
380 spin_unlock(&sched->job_list_lock); in drm_sched_job_begin()
385 struct drm_gpu_scheduler *sched; in drm_sched_job_timedout() local
389 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); in drm_sched_job_timedout()
392 spin_lock(&sched->job_list_lock); in drm_sched_job_timedout()
393 job = list_first_entry_or_null(&sched->pending_list, in drm_sched_job_timedout()
403 spin_unlock(&sched->job_list_lock); in drm_sched_job_timedout()
405 status = job->sched->ops->timedout_job(job); in drm_sched_job_timedout()
411 if (sched->free_guilty) { in drm_sched_job_timedout()
412 job->sched->ops->free_job(job); in drm_sched_job_timedout()
413 sched->free_guilty = false; in drm_sched_job_timedout()
416 spin_unlock(&sched->job_list_lock); in drm_sched_job_timedout()
420 spin_lock(&sched->job_list_lock); in drm_sched_job_timedout()
421 drm_sched_start_timeout(sched); in drm_sched_job_timedout()
422 spin_unlock(&sched->job_list_lock); in drm_sched_job_timedout()
438 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) in drm_sched_stop() argument
442 kthread_park(sched->thread); in drm_sched_stop()
451 if (bad && bad->sched == sched) in drm_sched_stop()
456 list_add(&bad->list, &sched->pending_list); in drm_sched_stop()
464 list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list, in drm_sched_stop()
471 atomic_dec(&sched->hw_rq_count); in drm_sched_stop()
477 spin_lock(&sched->job_list_lock); in drm_sched_stop()
479 spin_unlock(&sched->job_list_lock); in drm_sched_stop()
495 sched->ops->free_job(s_job); in drm_sched_stop()
497 sched->free_guilty = true; in drm_sched_stop()
507 cancel_delayed_work(&sched->work_tdr); in drm_sched_stop()
519 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) in drm_sched_start() argument
529 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { in drm_sched_start()
532 atomic_inc(&sched->hw_rq_count); in drm_sched_start()
543 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", in drm_sched_start()
550 spin_lock(&sched->job_list_lock); in drm_sched_start()
551 drm_sched_start_timeout(sched); in drm_sched_start()
552 spin_unlock(&sched->job_list_lock); in drm_sched_start()
555 kthread_unpark(sched->thread); in drm_sched_start()
576 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) in drm_sched_resubmit_jobs() argument
583 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { in drm_sched_resubmit_jobs()
586 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { in drm_sched_resubmit_jobs()
594 fence = sched->ops->run_job(s_job); in drm_sched_resubmit_jobs()
674 struct drm_gpu_scheduler *sched; in drm_sched_job_arm() local
679 sched = entity->rq->sched; in drm_sched_job_arm()
681 job->sched = sched; in drm_sched_job_arm()
682 job->s_priority = entity->rq - sched->sched_rq; in drm_sched_job_arm()
683 job->id = atomic64_inc_return(&sched->job_id_count); in drm_sched_job_arm()
864 static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched) in drm_sched_can_queue() argument
866 return atomic_read(&sched->hw_rq_count) < in drm_sched_can_queue()
867 sched->hw_submission_limit; in drm_sched_can_queue()
876 void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched) in drm_sched_wakeup_if_can_queue() argument
878 if (drm_sched_can_queue(sched)) in drm_sched_wakeup_if_can_queue()
879 wake_up_interruptible(&sched->wake_up_worker); in drm_sched_wakeup_if_can_queue()
890 drm_sched_select_entity(struct drm_gpu_scheduler *sched) in drm_sched_select_entity() argument
895 if (!drm_sched_can_queue(sched)) in drm_sched_select_entity()
901 drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) : in drm_sched_select_entity()
902 drm_sched_rq_select_entity_rr(&sched->sched_rq[i]); in drm_sched_select_entity()
919 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) in drm_sched_get_cleanup_job() argument
923 spin_lock(&sched->job_list_lock); in drm_sched_get_cleanup_job()
925 job = list_first_entry_or_null(&sched->pending_list, in drm_sched_get_cleanup_job()
933 cancel_delayed_work(&sched->work_tdr); in drm_sched_get_cleanup_job()
935 next = list_first_entry_or_null(&sched->pending_list, in drm_sched_get_cleanup_job()
942 drm_sched_start_timeout(sched); in drm_sched_get_cleanup_job()
948 spin_unlock(&sched->job_list_lock); in drm_sched_get_cleanup_job()
965 struct drm_gpu_scheduler *sched, *picked_sched = NULL; in drm_sched_pick_best() local
970 sched = sched_list[i]; in drm_sched_pick_best()
972 if (!sched->ready) { in drm_sched_pick_best()
974 sched->name); in drm_sched_pick_best()
978 num_score = atomic_read(sched->score); in drm_sched_pick_best()
981 picked_sched = sched; in drm_sched_pick_best()
996 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched) in drm_sched_blocked() argument
1015 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; in drm_sched_main() local
1027 wait_event_interruptible(sched->wake_up_worker, in drm_sched_main()
1028 (cleanup_job = drm_sched_get_cleanup_job(sched)) || in drm_sched_main()
1029 (!drm_sched_blocked(sched) && in drm_sched_main()
1030 (entity = drm_sched_select_entity(sched))) || in drm_sched_main()
1034 sched->ops->free_job(cleanup_job); in drm_sched_main()
1048 atomic_inc(&sched->hw_rq_count); in drm_sched_main()
1052 fence = sched->ops->run_job(sched_job); in drm_sched_main()
1065 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", in drm_sched_main()
1072 wake_up(&sched->job_scheduled); in drm_sched_main()
1093 int drm_sched_init(struct drm_gpu_scheduler *sched, in drm_sched_init() argument
1100 sched->ops = ops; in drm_sched_init()
1101 sched->hw_submission_limit = hw_submission; in drm_sched_init()
1102 sched->name = name; in drm_sched_init()
1103 sched->timeout = timeout; in drm_sched_init()
1104 sched->timeout_wq = timeout_wq ? : system_wq; in drm_sched_init()
1105 sched->hang_limit = hang_limit; in drm_sched_init()
1106 sched->score = score ? score : &sched->_score; in drm_sched_init()
1107 sched->dev = dev; in drm_sched_init()
1109 drm_sched_rq_init(sched, &sched->sched_rq[i]); in drm_sched_init()
1111 init_waitqueue_head(&sched->wake_up_worker); in drm_sched_init()
1112 init_waitqueue_head(&sched->job_scheduled); in drm_sched_init()
1113 INIT_LIST_HEAD(&sched->pending_list); in drm_sched_init()
1114 spin_lock_init(&sched->job_list_lock); in drm_sched_init()
1115 atomic_set(&sched->hw_rq_count, 0); in drm_sched_init()
1116 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); in drm_sched_init()
1117 atomic_set(&sched->_score, 0); in drm_sched_init()
1118 atomic64_set(&sched->job_id_count, 0); in drm_sched_init()
1121 sched->thread = kthread_run(drm_sched_main, sched, sched->name); in drm_sched_init()
1122 if (IS_ERR(sched->thread)) { in drm_sched_init()
1123 ret = PTR_ERR(sched->thread); in drm_sched_init()
1124 sched->thread = NULL; in drm_sched_init()
1125 DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name); in drm_sched_init()
1129 sched->ready = true; in drm_sched_init()
1141 void drm_sched_fini(struct drm_gpu_scheduler *sched) in drm_sched_fini() argument
1146 if (sched->thread) in drm_sched_fini()
1147 kthread_stop(sched->thread); in drm_sched_fini()
1150 struct drm_sched_rq *rq = &sched->sched_rq[i]; in drm_sched_fini()
1165 wake_up_all(&sched->job_scheduled); in drm_sched_fini()
1168 cancel_delayed_work_sync(&sched->work_tdr); in drm_sched_fini()
1170 sched->ready = false; in drm_sched_fini()
1188 struct drm_gpu_scheduler *sched = bad->sched; in drm_sched_increase_karma() local
1199 struct drm_sched_rq *rq = &sched->sched_rq[i]; in drm_sched_increase_karma()