1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 /**
25  * DOC: Overview
26  *
27  * The GPU scheduler provides entities which allow userspace to push jobs
28  * into software queues which are then scheduled on a hardware run queue.
29  * The software queues have a priority among them. The scheduler selects the entities
30  * from the run queue using a FIFO. The scheduler provides dependency handling
31  * features among jobs. The driver is supposed to provide callback functions for
32  * backend operations to the scheduler like submitting a job to hardware run queue,
33  * returning the dependencies of a job etc.
34  *
35  * The organisation of the scheduler is the following:
36  *
37  * 1. Each hw run queue has one scheduler
38  * 2. Each scheduler has multiple run queues with different priorities
39  *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40  * 3. Each scheduler run queue has a queue of entities to schedule
41  * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42  *    the hardware.
43  *
44  * The jobs in a entity are always scheduled in the order that they were pushed.
45  */
46 
47 #include <linux/kthread.h>
48 #include <linux/wait.h>
49 #include <linux/sched.h>
50 #include <linux/completion.h>
51 #include <uapi/linux/sched/types.h>
52 
53 #include <drm/drm_print.h>
54 #include <drm/gpu_scheduler.h>
55 #include <drm/spsc_queue.h>
56 
57 #define CREATE_TRACE_POINTS
58 #include "gpu_scheduler_trace.h"
59 
60 #define to_drm_sched_job(sched_job)		\
61 		container_of((sched_job), struct drm_sched_job, queue_node)
62 
63 /**
64  * drm_sched_rq_init - initialize a given run queue struct
65  *
66  * @sched: scheduler instance to associate with this run queue
67  * @rq: scheduler run queue
68  *
69  * Initializes a scheduler runqueue.
70  */
71 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
72 			      struct drm_sched_rq *rq)
73 {
74 	spin_lock_init(&rq->lock);
75 	INIT_LIST_HEAD(&rq->entities);
76 	rq->current_entity = NULL;
77 	rq->sched = sched;
78 }
79 
80 /**
81  * drm_sched_rq_add_entity - add an entity
82  *
83  * @rq: scheduler run queue
84  * @entity: scheduler entity
85  *
86  * Adds a scheduler entity to the run queue.
87  */
88 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
89 			     struct drm_sched_entity *entity)
90 {
91 	if (!list_empty(&entity->list))
92 		return;
93 	spin_lock(&rq->lock);
94 	atomic_inc(&rq->sched->score);
95 	list_add_tail(&entity->list, &rq->entities);
96 	spin_unlock(&rq->lock);
97 }
98 
99 /**
100  * drm_sched_rq_remove_entity - remove an entity
101  *
102  * @rq: scheduler run queue
103  * @entity: scheduler entity
104  *
105  * Removes a scheduler entity from the run queue.
106  */
107 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
108 				struct drm_sched_entity *entity)
109 {
110 	if (list_empty(&entity->list))
111 		return;
112 	spin_lock(&rq->lock);
113 	atomic_dec(&rq->sched->score);
114 	list_del_init(&entity->list);
115 	if (rq->current_entity == entity)
116 		rq->current_entity = NULL;
117 	spin_unlock(&rq->lock);
118 }
119 
120 /**
121  * drm_sched_rq_select_entity - Select an entity which could provide a job to run
122  *
123  * @rq: scheduler run queue to check.
124  *
125  * Try to find a ready entity, returns NULL if none found.
126  */
127 static struct drm_sched_entity *
128 drm_sched_rq_select_entity(struct drm_sched_rq *rq)
129 {
130 	struct drm_sched_entity *entity;
131 
132 	spin_lock(&rq->lock);
133 
134 	entity = rq->current_entity;
135 	if (entity) {
136 		list_for_each_entry_continue(entity, &rq->entities, list) {
137 			if (drm_sched_entity_is_ready(entity)) {
138 				rq->current_entity = entity;
139 				reinit_completion(&entity->entity_idle);
140 				spin_unlock(&rq->lock);
141 				return entity;
142 			}
143 		}
144 	}
145 
146 	list_for_each_entry(entity, &rq->entities, list) {
147 
148 		if (drm_sched_entity_is_ready(entity)) {
149 			rq->current_entity = entity;
150 			reinit_completion(&entity->entity_idle);
151 			spin_unlock(&rq->lock);
152 			return entity;
153 		}
154 
155 		if (entity == rq->current_entity)
156 			break;
157 	}
158 
159 	spin_unlock(&rq->lock);
160 
161 	return NULL;
162 }
163 
164 /**
165  * drm_sched_job_done - complete a job
166  * @s_job: pointer to the job which is done
167  *
168  * Finish the job's fence and wake up the worker thread.
169  */
170 static void drm_sched_job_done(struct drm_sched_job *s_job)
171 {
172 	struct drm_sched_fence *s_fence = s_job->s_fence;
173 	struct drm_gpu_scheduler *sched = s_fence->sched;
174 
175 	atomic_dec(&sched->hw_rq_count);
176 	atomic_dec(&sched->score);
177 
178 	trace_drm_sched_process_job(s_fence);
179 
180 	dma_fence_get(&s_fence->finished);
181 	drm_sched_fence_finished(s_fence);
182 	dma_fence_put(&s_fence->finished);
183 	wake_up_interruptible(&sched->wake_up_worker);
184 }
185 
186 /**
187  * drm_sched_job_done_cb - the callback for a done job
188  * @f: fence
189  * @cb: fence callbacks
190  */
191 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
192 {
193 	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
194 
195 	drm_sched_job_done(s_job);
196 }
197 
198 /**
199  * drm_sched_dependency_optimized
200  *
201  * @fence: the dependency fence
202  * @entity: the entity which depends on the above fence
203  *
204  * Returns true if the dependency can be optimized and false otherwise
205  */
206 bool drm_sched_dependency_optimized(struct dma_fence* fence,
207 				    struct drm_sched_entity *entity)
208 {
209 	struct drm_gpu_scheduler *sched = entity->rq->sched;
210 	struct drm_sched_fence *s_fence;
211 
212 	if (!fence || dma_fence_is_signaled(fence))
213 		return false;
214 	if (fence->context == entity->fence_context)
215 		return true;
216 	s_fence = to_drm_sched_fence(fence);
217 	if (s_fence && s_fence->sched == sched)
218 		return true;
219 
220 	return false;
221 }
222 EXPORT_SYMBOL(drm_sched_dependency_optimized);
223 
224 /**
225  * drm_sched_start_timeout - start timeout for reset worker
226  *
227  * @sched: scheduler instance to start the worker for
228  *
229  * Start the timeout for the given scheduler.
230  */
231 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
232 {
233 	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
234 	    !list_empty(&sched->pending_list))
235 		schedule_delayed_work(&sched->work_tdr, sched->timeout);
236 }
237 
238 /**
239  * drm_sched_fault - immediately start timeout handler
240  *
241  * @sched: scheduler where the timeout handling should be started.
242  *
243  * Start timeout handling immediately when the driver detects a hardware fault.
244  */
245 void drm_sched_fault(struct drm_gpu_scheduler *sched)
246 {
247 	mod_delayed_work(system_wq, &sched->work_tdr, 0);
248 }
249 EXPORT_SYMBOL(drm_sched_fault);
250 
251 /**
252  * drm_sched_suspend_timeout - Suspend scheduler job timeout
253  *
254  * @sched: scheduler instance for which to suspend the timeout
255  *
256  * Suspend the delayed work timeout for the scheduler. This is done by
257  * modifying the delayed work timeout to an arbitrary large value,
258  * MAX_SCHEDULE_TIMEOUT in this case.
259  *
260  * Returns the timeout remaining
261  *
262  */
263 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
264 {
265 	unsigned long sched_timeout, now = jiffies;
266 
267 	sched_timeout = sched->work_tdr.timer.expires;
268 
269 	/*
270 	 * Modify the timeout to an arbitrarily large value. This also prevents
271 	 * the timeout to be restarted when new submissions arrive
272 	 */
273 	if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
274 			&& time_after(sched_timeout, now))
275 		return sched_timeout - now;
276 	else
277 		return sched->timeout;
278 }
279 EXPORT_SYMBOL(drm_sched_suspend_timeout);
280 
281 /**
282  * drm_sched_resume_timeout - Resume scheduler job timeout
283  *
284  * @sched: scheduler instance for which to resume the timeout
285  * @remaining: remaining timeout
286  *
287  * Resume the delayed work timeout for the scheduler.
288  */
289 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
290 		unsigned long remaining)
291 {
292 	spin_lock(&sched->job_list_lock);
293 
294 	if (list_empty(&sched->pending_list))
295 		cancel_delayed_work(&sched->work_tdr);
296 	else
297 		mod_delayed_work(system_wq, &sched->work_tdr, remaining);
298 
299 	spin_unlock(&sched->job_list_lock);
300 }
301 EXPORT_SYMBOL(drm_sched_resume_timeout);
302 
303 static void drm_sched_job_begin(struct drm_sched_job *s_job)
304 {
305 	struct drm_gpu_scheduler *sched = s_job->sched;
306 
307 	spin_lock(&sched->job_list_lock);
308 	list_add_tail(&s_job->list, &sched->pending_list);
309 	drm_sched_start_timeout(sched);
310 	spin_unlock(&sched->job_list_lock);
311 }
312 
313 static void drm_sched_job_timedout(struct work_struct *work)
314 {
315 	struct drm_gpu_scheduler *sched;
316 	struct drm_sched_job *job;
317 
318 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
319 
320 	/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
321 	spin_lock(&sched->job_list_lock);
322 	job = list_first_entry_or_null(&sched->pending_list,
323 				       struct drm_sched_job, list);
324 
325 	if (job) {
326 		/*
327 		 * Remove the bad job so it cannot be freed by concurrent
328 		 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
329 		 * is parked at which point it's safe.
330 		 */
331 		list_del_init(&job->list);
332 		spin_unlock(&sched->job_list_lock);
333 
334 		job->sched->ops->timedout_job(job);
335 
336 		/*
337 		 * Guilty job did complete and hence needs to be manually removed
338 		 * See drm_sched_stop doc.
339 		 */
340 		if (sched->free_guilty) {
341 			job->sched->ops->free_job(job);
342 			sched->free_guilty = false;
343 		}
344 	} else {
345 		spin_unlock(&sched->job_list_lock);
346 	}
347 
348 	spin_lock(&sched->job_list_lock);
349 	drm_sched_start_timeout(sched);
350 	spin_unlock(&sched->job_list_lock);
351 }
352 
353  /**
354   * drm_sched_increase_karma - Update sched_entity guilty flag
355   *
356   * @bad: The job guilty of time out
357   *
358   * Increment on every hang caused by the 'bad' job. If this exceeds the hang
359   * limit of the scheduler then the respective sched entity is marked guilty and
360   * jobs from it will not be scheduled further
361   */
362 void drm_sched_increase_karma(struct drm_sched_job *bad)
363 {
364 	int i;
365 	struct drm_sched_entity *tmp;
366 	struct drm_sched_entity *entity;
367 	struct drm_gpu_scheduler *sched = bad->sched;
368 
369 	/* don't increase @bad's karma if it's from KERNEL RQ,
370 	 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
371 	 * corrupt but keep in mind that kernel jobs always considered good.
372 	 */
373 	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
374 		atomic_inc(&bad->karma);
375 		for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
376 		     i++) {
377 			struct drm_sched_rq *rq = &sched->sched_rq[i];
378 
379 			spin_lock(&rq->lock);
380 			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
381 				if (bad->s_fence->scheduled.context ==
382 				    entity->fence_context) {
383 					if (atomic_read(&bad->karma) >
384 					    bad->sched->hang_limit)
385 						if (entity->guilty)
386 							atomic_set(entity->guilty, 1);
387 					break;
388 				}
389 			}
390 			spin_unlock(&rq->lock);
391 			if (&entity->list != &rq->entities)
392 				break;
393 		}
394 	}
395 }
396 EXPORT_SYMBOL(drm_sched_increase_karma);
397 
398 /**
399  * drm_sched_stop - stop the scheduler
400  *
401  * @sched: scheduler instance
402  * @bad: job which caused the time out
403  *
404  * Stop the scheduler and also removes and frees all completed jobs.
405  * Note: bad job will not be freed as it might be used later and so it's
406  * callers responsibility to release it manually if it's not part of the
407  * pending list any more.
408  *
409  */
410 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
411 {
412 	struct drm_sched_job *s_job, *tmp;
413 
414 	kthread_park(sched->thread);
415 
416 	/*
417 	 * Reinsert back the bad job here - now it's safe as
418 	 * drm_sched_get_cleanup_job cannot race against us and release the
419 	 * bad job at this point - we parked (waited for) any in progress
420 	 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
421 	 * now until the scheduler thread is unparked.
422 	 */
423 	if (bad && bad->sched == sched)
424 		/*
425 		 * Add at the head of the queue to reflect it was the earliest
426 		 * job extracted.
427 		 */
428 		list_add(&bad->list, &sched->pending_list);
429 
430 	/*
431 	 * Iterate the job list from later to  earlier one and either deactive
432 	 * their HW callbacks or remove them from pending list if they already
433 	 * signaled.
434 	 * This iteration is thread safe as sched thread is stopped.
435 	 */
436 	list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
437 					 list) {
438 		if (s_job->s_fence->parent &&
439 		    dma_fence_remove_callback(s_job->s_fence->parent,
440 					      &s_job->cb)) {
441 			atomic_dec(&sched->hw_rq_count);
442 		} else {
443 			/*
444 			 * remove job from pending_list.
445 			 * Locking here is for concurrent resume timeout
446 			 */
447 			spin_lock(&sched->job_list_lock);
448 			list_del_init(&s_job->list);
449 			spin_unlock(&sched->job_list_lock);
450 
451 			/*
452 			 * Wait for job's HW fence callback to finish using s_job
453 			 * before releasing it.
454 			 *
455 			 * Job is still alive so fence refcount at least 1
456 			 */
457 			dma_fence_wait(&s_job->s_fence->finished, false);
458 
459 			/*
460 			 * We must keep bad job alive for later use during
461 			 * recovery by some of the drivers but leave a hint
462 			 * that the guilty job must be released.
463 			 */
464 			if (bad != s_job)
465 				sched->ops->free_job(s_job);
466 			else
467 				sched->free_guilty = true;
468 		}
469 	}
470 
471 	/*
472 	 * Stop pending timer in flight as we rearm it in  drm_sched_start. This
473 	 * avoids the pending timeout work in progress to fire right away after
474 	 * this TDR finished and before the newly restarted jobs had a
475 	 * chance to complete.
476 	 */
477 	cancel_delayed_work(&sched->work_tdr);
478 }
479 
480 EXPORT_SYMBOL(drm_sched_stop);
481 
482 /**
483  * drm_sched_start - recover jobs after a reset
484  *
485  * @sched: scheduler instance
486  * @full_recovery: proceed with complete sched restart
487  *
488  */
489 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
490 {
491 	struct drm_sched_job *s_job, *tmp;
492 	int r;
493 
494 	/*
495 	 * Locking the list is not required here as the sched thread is parked
496 	 * so no new jobs are being inserted or removed. Also concurrent
497 	 * GPU recovers can't run in parallel.
498 	 */
499 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
500 		struct dma_fence *fence = s_job->s_fence->parent;
501 
502 		atomic_inc(&sched->hw_rq_count);
503 
504 		if (!full_recovery)
505 			continue;
506 
507 		if (fence) {
508 			r = dma_fence_add_callback(fence, &s_job->cb,
509 						   drm_sched_job_done_cb);
510 			if (r == -ENOENT)
511 				drm_sched_job_done(s_job);
512 			else if (r)
513 				DRM_ERROR("fence add callback failed (%d)\n",
514 					  r);
515 		} else
516 			drm_sched_job_done(s_job);
517 	}
518 
519 	if (full_recovery) {
520 		spin_lock(&sched->job_list_lock);
521 		drm_sched_start_timeout(sched);
522 		spin_unlock(&sched->job_list_lock);
523 	}
524 
525 	kthread_unpark(sched->thread);
526 }
527 EXPORT_SYMBOL(drm_sched_start);
528 
529 /**
530  * drm_sched_resubmit_jobs - helper to relunch job from pending ring list
531  *
532  * @sched: scheduler instance
533  *
534  */
535 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
536 {
537 	struct drm_sched_job *s_job, *tmp;
538 	uint64_t guilty_context;
539 	bool found_guilty = false;
540 	struct dma_fence *fence;
541 
542 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
543 		struct drm_sched_fence *s_fence = s_job->s_fence;
544 
545 		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
546 			found_guilty = true;
547 			guilty_context = s_job->s_fence->scheduled.context;
548 		}
549 
550 		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
551 			dma_fence_set_error(&s_fence->finished, -ECANCELED);
552 
553 		dma_fence_put(s_job->s_fence->parent);
554 		fence = sched->ops->run_job(s_job);
555 
556 		if (IS_ERR_OR_NULL(fence)) {
557 			if (IS_ERR(fence))
558 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
559 
560 			s_job->s_fence->parent = NULL;
561 		} else {
562 			s_job->s_fence->parent = fence;
563 		}
564 
565 
566 	}
567 }
568 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
569 
570 /**
571  * drm_sched_job_init - init a scheduler job
572  *
573  * @job: scheduler job to init
574  * @entity: scheduler entity to use
575  * @owner: job owner for debugging
576  *
577  * Refer to drm_sched_entity_push_job() documentation
578  * for locking considerations.
579  *
580  * Returns 0 for success, negative error code otherwise.
581  */
582 int drm_sched_job_init(struct drm_sched_job *job,
583 		       struct drm_sched_entity *entity,
584 		       void *owner)
585 {
586 	struct drm_gpu_scheduler *sched;
587 
588 	drm_sched_entity_select_rq(entity);
589 	if (!entity->rq)
590 		return -ENOENT;
591 
592 	sched = entity->rq->sched;
593 
594 	job->sched = sched;
595 	job->entity = entity;
596 	job->s_priority = entity->rq - sched->sched_rq;
597 	job->s_fence = drm_sched_fence_create(entity, owner);
598 	if (!job->s_fence)
599 		return -ENOMEM;
600 	job->id = atomic64_inc_return(&sched->job_id_count);
601 
602 	INIT_LIST_HEAD(&job->list);
603 
604 	return 0;
605 }
606 EXPORT_SYMBOL(drm_sched_job_init);
607 
608 /**
609  * drm_sched_job_cleanup - clean up scheduler job resources
610  *
611  * @job: scheduler job to clean up
612  */
613 void drm_sched_job_cleanup(struct drm_sched_job *job)
614 {
615 	dma_fence_put(&job->s_fence->finished);
616 	job->s_fence = NULL;
617 }
618 EXPORT_SYMBOL(drm_sched_job_cleanup);
619 
620 /**
621  * drm_sched_ready - is the scheduler ready
622  *
623  * @sched: scheduler instance
624  *
625  * Return true if we can push more jobs to the hw, otherwise false.
626  */
627 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
628 {
629 	return atomic_read(&sched->hw_rq_count) <
630 		sched->hw_submission_limit;
631 }
632 
633 /**
634  * drm_sched_wakeup - Wake up the scheduler when it is ready
635  *
636  * @sched: scheduler instance
637  *
638  */
639 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
640 {
641 	if (drm_sched_ready(sched))
642 		wake_up_interruptible(&sched->wake_up_worker);
643 }
644 
645 /**
646  * drm_sched_select_entity - Select next entity to process
647  *
648  * @sched: scheduler instance
649  *
650  * Returns the entity to process or NULL if none are found.
651  */
652 static struct drm_sched_entity *
653 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
654 {
655 	struct drm_sched_entity *entity;
656 	int i;
657 
658 	if (!drm_sched_ready(sched))
659 		return NULL;
660 
661 	/* Kernel run queue has higher priority than normal run queue*/
662 	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
663 		entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
664 		if (entity)
665 			break;
666 	}
667 
668 	return entity;
669 }
670 
671 /**
672  * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
673  *
674  * @sched: scheduler instance
675  *
676  * Returns the next finished job from the pending list (if there is one)
677  * ready for it to be destroyed.
678  */
679 static struct drm_sched_job *
680 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
681 {
682 	struct drm_sched_job *job;
683 
684 	/*
685 	 * Don't destroy jobs while the timeout worker is running  OR thread
686 	 * is being parked and hence assumed to not touch pending_list
687 	 */
688 	if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
689 	    !cancel_delayed_work(&sched->work_tdr)) ||
690 	    kthread_should_park())
691 		return NULL;
692 
693 	spin_lock(&sched->job_list_lock);
694 
695 	job = list_first_entry_or_null(&sched->pending_list,
696 				       struct drm_sched_job, list);
697 
698 	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
699 		/* remove job from pending_list */
700 		list_del_init(&job->list);
701 	} else {
702 		job = NULL;
703 		/* queue timeout for next job */
704 		drm_sched_start_timeout(sched);
705 	}
706 
707 	spin_unlock(&sched->job_list_lock);
708 
709 	return job;
710 }
711 
712 /**
713  * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
714  * @sched_list: list of drm_gpu_schedulers
715  * @num_sched_list: number of drm_gpu_schedulers in the sched_list
716  *
717  * Returns pointer of the sched with the least load or NULL if none of the
718  * drm_gpu_schedulers are ready
719  */
720 struct drm_gpu_scheduler *
721 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
722 		     unsigned int num_sched_list)
723 {
724 	struct drm_gpu_scheduler *sched, *picked_sched = NULL;
725 	int i;
726 	unsigned int min_score = UINT_MAX, num_score;
727 
728 	for (i = 0; i < num_sched_list; ++i) {
729 		sched = sched_list[i];
730 
731 		if (!sched->ready) {
732 			DRM_WARN("scheduler %s is not ready, skipping",
733 				 sched->name);
734 			continue;
735 		}
736 
737 		num_score = atomic_read(&sched->score);
738 		if (num_score < min_score) {
739 			min_score = num_score;
740 			picked_sched = sched;
741 		}
742 	}
743 
744 	return picked_sched;
745 }
746 EXPORT_SYMBOL(drm_sched_pick_best);
747 
748 /**
749  * drm_sched_blocked - check if the scheduler is blocked
750  *
751  * @sched: scheduler instance
752  *
753  * Returns true if blocked, otherwise false.
754  */
755 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
756 {
757 	if (kthread_should_park()) {
758 		kthread_parkme();
759 		return true;
760 	}
761 
762 	return false;
763 }
764 
765 /**
766  * drm_sched_main - main scheduler thread
767  *
768  * @param: scheduler instance
769  *
770  * Returns 0.
771  */
772 static int drm_sched_main(void *param)
773 {
774 	struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
775 	int r;
776 
777 	sched_set_fifo_low(current);
778 
779 	while (!kthread_should_stop()) {
780 		struct drm_sched_entity *entity = NULL;
781 		struct drm_sched_fence *s_fence;
782 		struct drm_sched_job *sched_job;
783 		struct dma_fence *fence;
784 		struct drm_sched_job *cleanup_job = NULL;
785 
786 		wait_event_interruptible(sched->wake_up_worker,
787 					 (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
788 					 (!drm_sched_blocked(sched) &&
789 					  (entity = drm_sched_select_entity(sched))) ||
790 					 kthread_should_stop());
791 
792 		if (cleanup_job) {
793 			sched->ops->free_job(cleanup_job);
794 			/* queue timeout for next job */
795 			drm_sched_start_timeout(sched);
796 		}
797 
798 		if (!entity)
799 			continue;
800 
801 		sched_job = drm_sched_entity_pop_job(entity);
802 
803 		complete(&entity->entity_idle);
804 
805 		if (!sched_job)
806 			continue;
807 
808 		s_fence = sched_job->s_fence;
809 
810 		atomic_inc(&sched->hw_rq_count);
811 		drm_sched_job_begin(sched_job);
812 
813 		trace_drm_run_job(sched_job, entity);
814 		fence = sched->ops->run_job(sched_job);
815 		drm_sched_fence_scheduled(s_fence);
816 
817 		if (!IS_ERR_OR_NULL(fence)) {
818 			s_fence->parent = dma_fence_get(fence);
819 			r = dma_fence_add_callback(fence, &sched_job->cb,
820 						   drm_sched_job_done_cb);
821 			if (r == -ENOENT)
822 				drm_sched_job_done(sched_job);
823 			else if (r)
824 				DRM_ERROR("fence add callback failed (%d)\n",
825 					  r);
826 			dma_fence_put(fence);
827 		} else {
828 			if (IS_ERR(fence))
829 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
830 
831 			drm_sched_job_done(sched_job);
832 		}
833 
834 		wake_up(&sched->job_scheduled);
835 	}
836 	return 0;
837 }
838 
839 /**
840  * drm_sched_init - Init a gpu scheduler instance
841  *
842  * @sched: scheduler instance
843  * @ops: backend operations for this scheduler
844  * @hw_submission: number of hw submissions that can be in flight
845  * @hang_limit: number of times to allow a job to hang before dropping it
846  * @timeout: timeout value in jiffies for the scheduler
847  * @name: name used for debugging
848  *
849  * Return 0 on success, otherwise error code.
850  */
851 int drm_sched_init(struct drm_gpu_scheduler *sched,
852 		   const struct drm_sched_backend_ops *ops,
853 		   unsigned hw_submission,
854 		   unsigned hang_limit,
855 		   long timeout,
856 		   const char *name)
857 {
858 	int i, ret;
859 	sched->ops = ops;
860 	sched->hw_submission_limit = hw_submission;
861 	sched->name = name;
862 	sched->timeout = timeout;
863 	sched->hang_limit = hang_limit;
864 	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
865 		drm_sched_rq_init(sched, &sched->sched_rq[i]);
866 
867 	init_waitqueue_head(&sched->wake_up_worker);
868 	init_waitqueue_head(&sched->job_scheduled);
869 	INIT_LIST_HEAD(&sched->pending_list);
870 	spin_lock_init(&sched->job_list_lock);
871 	atomic_set(&sched->hw_rq_count, 0);
872 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
873 	atomic_set(&sched->score, 0);
874 	atomic64_set(&sched->job_id_count, 0);
875 
876 	/* Each scheduler will run on a seperate kernel thread */
877 	sched->thread = kthread_run(drm_sched_main, sched, sched->name);
878 	if (IS_ERR(sched->thread)) {
879 		ret = PTR_ERR(sched->thread);
880 		sched->thread = NULL;
881 		DRM_ERROR("Failed to create scheduler for %s.\n", name);
882 		return ret;
883 	}
884 
885 	sched->ready = true;
886 	return 0;
887 }
888 EXPORT_SYMBOL(drm_sched_init);
889 
890 /**
891  * drm_sched_fini - Destroy a gpu scheduler
892  *
893  * @sched: scheduler instance
894  *
895  * Tears down and cleans up the scheduler.
896  */
897 void drm_sched_fini(struct drm_gpu_scheduler *sched)
898 {
899 	if (sched->thread)
900 		kthread_stop(sched->thread);
901 
902 	/* Confirm no work left behind accessing device structures */
903 	cancel_delayed_work_sync(&sched->work_tdr);
904 
905 	sched->ready = false;
906 }
907 EXPORT_SYMBOL(drm_sched_fini);
908