1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 /**
25  * DOC: Overview
26  *
27  * The GPU scheduler provides entities which allow userspace to push jobs
28  * into software queues which are then scheduled on a hardware run queue.
29  * The software queues have a priority among them. The scheduler selects the entities
30  * from the run queue using a FIFO. The scheduler provides dependency handling
31  * features among jobs. The driver is supposed to provide callback functions for
32  * backend operations to the scheduler like submitting a job to hardware run queue,
33  * returning the dependencies of a job etc.
34  *
35  * The organisation of the scheduler is the following:
36  *
37  * 1. Each hw run queue has one scheduler
38  * 2. Each scheduler has multiple run queues with different priorities
39  *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40  * 3. Each scheduler run queue has a queue of entities to schedule
41  * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42  *    the hardware.
43  *
44  * The jobs in a entity are always scheduled in the order that they were pushed.
45  */
46 
47 #include <linux/kthread.h>
48 #include <linux/wait.h>
49 #include <linux/sched.h>
50 #include <linux/completion.h>
51 #include <uapi/linux/sched/types.h>
52 
53 #include <drm/drm_print.h>
54 #include <drm/gpu_scheduler.h>
55 #include <drm/spsc_queue.h>
56 
57 #define CREATE_TRACE_POINTS
58 #include "gpu_scheduler_trace.h"
59 
60 #define to_drm_sched_job(sched_job)		\
61 		container_of((sched_job), struct drm_sched_job, queue_node)
62 
63 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
64 
65 /**
66  * drm_sched_rq_init - initialize a given run queue struct
67  *
68  * @sched: scheduler instance to associate with this run queue
69  * @rq: scheduler run queue
70  *
71  * Initializes a scheduler runqueue.
72  */
73 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
74 			      struct drm_sched_rq *rq)
75 {
76 	spin_lock_init(&rq->lock);
77 	INIT_LIST_HEAD(&rq->entities);
78 	rq->current_entity = NULL;
79 	rq->sched = sched;
80 }
81 
82 /**
83  * drm_sched_rq_add_entity - add an entity
84  *
85  * @rq: scheduler run queue
86  * @entity: scheduler entity
87  *
88  * Adds a scheduler entity to the run queue.
89  */
90 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
91 			     struct drm_sched_entity *entity)
92 {
93 	if (!list_empty(&entity->list))
94 		return;
95 	spin_lock(&rq->lock);
96 	atomic_inc(&rq->sched->score);
97 	list_add_tail(&entity->list, &rq->entities);
98 	spin_unlock(&rq->lock);
99 }
100 
101 /**
102  * drm_sched_rq_remove_entity - remove an entity
103  *
104  * @rq: scheduler run queue
105  * @entity: scheduler entity
106  *
107  * Removes a scheduler entity from the run queue.
108  */
109 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
110 				struct drm_sched_entity *entity)
111 {
112 	if (list_empty(&entity->list))
113 		return;
114 	spin_lock(&rq->lock);
115 	atomic_dec(&rq->sched->score);
116 	list_del_init(&entity->list);
117 	if (rq->current_entity == entity)
118 		rq->current_entity = NULL;
119 	spin_unlock(&rq->lock);
120 }
121 
122 /**
123  * drm_sched_rq_select_entity - Select an entity which could provide a job to run
124  *
125  * @rq: scheduler run queue to check.
126  *
127  * Try to find a ready entity, returns NULL if none found.
128  */
129 static struct drm_sched_entity *
130 drm_sched_rq_select_entity(struct drm_sched_rq *rq)
131 {
132 	struct drm_sched_entity *entity;
133 
134 	spin_lock(&rq->lock);
135 
136 	entity = rq->current_entity;
137 	if (entity) {
138 		list_for_each_entry_continue(entity, &rq->entities, list) {
139 			if (drm_sched_entity_is_ready(entity)) {
140 				rq->current_entity = entity;
141 				reinit_completion(&entity->entity_idle);
142 				spin_unlock(&rq->lock);
143 				return entity;
144 			}
145 		}
146 	}
147 
148 	list_for_each_entry(entity, &rq->entities, list) {
149 
150 		if (drm_sched_entity_is_ready(entity)) {
151 			rq->current_entity = entity;
152 			reinit_completion(&entity->entity_idle);
153 			spin_unlock(&rq->lock);
154 			return entity;
155 		}
156 
157 		if (entity == rq->current_entity)
158 			break;
159 	}
160 
161 	spin_unlock(&rq->lock);
162 
163 	return NULL;
164 }
165 
166 /**
167  * drm_sched_dependency_optimized
168  *
169  * @fence: the dependency fence
170  * @entity: the entity which depends on the above fence
171  *
172  * Returns true if the dependency can be optimized and false otherwise
173  */
174 bool drm_sched_dependency_optimized(struct dma_fence* fence,
175 				    struct drm_sched_entity *entity)
176 {
177 	struct drm_gpu_scheduler *sched = entity->rq->sched;
178 	struct drm_sched_fence *s_fence;
179 
180 	if (!fence || dma_fence_is_signaled(fence))
181 		return false;
182 	if (fence->context == entity->fence_context)
183 		return true;
184 	s_fence = to_drm_sched_fence(fence);
185 	if (s_fence && s_fence->sched == sched)
186 		return true;
187 
188 	return false;
189 }
190 EXPORT_SYMBOL(drm_sched_dependency_optimized);
191 
192 /**
193  * drm_sched_start_timeout - start timeout for reset worker
194  *
195  * @sched: scheduler instance to start the worker for
196  *
197  * Start the timeout for the given scheduler.
198  */
199 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
200 {
201 	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
202 	    !list_empty(&sched->ring_mirror_list))
203 		schedule_delayed_work(&sched->work_tdr, sched->timeout);
204 }
205 
206 /**
207  * drm_sched_fault - immediately start timeout handler
208  *
209  * @sched: scheduler where the timeout handling should be started.
210  *
211  * Start timeout handling immediately when the driver detects a hardware fault.
212  */
213 void drm_sched_fault(struct drm_gpu_scheduler *sched)
214 {
215 	mod_delayed_work(system_wq, &sched->work_tdr, 0);
216 }
217 EXPORT_SYMBOL(drm_sched_fault);
218 
219 /**
220  * drm_sched_suspend_timeout - Suspend scheduler job timeout
221  *
222  * @sched: scheduler instance for which to suspend the timeout
223  *
224  * Suspend the delayed work timeout for the scheduler. This is done by
225  * modifying the delayed work timeout to an arbitrary large value,
226  * MAX_SCHEDULE_TIMEOUT in this case.
227  *
228  * Returns the timeout remaining
229  *
230  */
231 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
232 {
233 	unsigned long sched_timeout, now = jiffies;
234 
235 	sched_timeout = sched->work_tdr.timer.expires;
236 
237 	/*
238 	 * Modify the timeout to an arbitrarily large value. This also prevents
239 	 * the timeout to be restarted when new submissions arrive
240 	 */
241 	if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
242 			&& time_after(sched_timeout, now))
243 		return sched_timeout - now;
244 	else
245 		return sched->timeout;
246 }
247 EXPORT_SYMBOL(drm_sched_suspend_timeout);
248 
249 /**
250  * drm_sched_resume_timeout - Resume scheduler job timeout
251  *
252  * @sched: scheduler instance for which to resume the timeout
253  * @remaining: remaining timeout
254  *
255  * Resume the delayed work timeout for the scheduler.
256  */
257 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
258 		unsigned long remaining)
259 {
260 	spin_lock(&sched->job_list_lock);
261 
262 	if (list_empty(&sched->ring_mirror_list))
263 		cancel_delayed_work(&sched->work_tdr);
264 	else
265 		mod_delayed_work(system_wq, &sched->work_tdr, remaining);
266 
267 	spin_unlock(&sched->job_list_lock);
268 }
269 EXPORT_SYMBOL(drm_sched_resume_timeout);
270 
271 static void drm_sched_job_begin(struct drm_sched_job *s_job)
272 {
273 	struct drm_gpu_scheduler *sched = s_job->sched;
274 
275 	spin_lock(&sched->job_list_lock);
276 	list_add_tail(&s_job->node, &sched->ring_mirror_list);
277 	drm_sched_start_timeout(sched);
278 	spin_unlock(&sched->job_list_lock);
279 }
280 
281 static void drm_sched_job_timedout(struct work_struct *work)
282 {
283 	struct drm_gpu_scheduler *sched;
284 	struct drm_sched_job *job;
285 
286 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
287 
288 	/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
289 	spin_lock(&sched->job_list_lock);
290 	job = list_first_entry_or_null(&sched->ring_mirror_list,
291 				       struct drm_sched_job, node);
292 
293 	if (job) {
294 		/*
295 		 * Remove the bad job so it cannot be freed by concurrent
296 		 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
297 		 * is parked at which point it's safe.
298 		 */
299 		list_del_init(&job->node);
300 		spin_unlock(&sched->job_list_lock);
301 
302 		job->sched->ops->timedout_job(job);
303 
304 		/*
305 		 * Guilty job did complete and hence needs to be manually removed
306 		 * See drm_sched_stop doc.
307 		 */
308 		if (sched->free_guilty) {
309 			job->sched->ops->free_job(job);
310 			sched->free_guilty = false;
311 		}
312 	} else {
313 		spin_unlock(&sched->job_list_lock);
314 	}
315 
316 	spin_lock(&sched->job_list_lock);
317 	drm_sched_start_timeout(sched);
318 	spin_unlock(&sched->job_list_lock);
319 }
320 
321  /**
322   * drm_sched_increase_karma - Update sched_entity guilty flag
323   *
324   * @bad: The job guilty of time out
325   *
326   * Increment on every hang caused by the 'bad' job. If this exceeds the hang
327   * limit of the scheduler then the respective sched entity is marked guilty and
328   * jobs from it will not be scheduled further
329   */
330 void drm_sched_increase_karma(struct drm_sched_job *bad)
331 {
332 	int i;
333 	struct drm_sched_entity *tmp;
334 	struct drm_sched_entity *entity;
335 	struct drm_gpu_scheduler *sched = bad->sched;
336 
337 	/* don't increase @bad's karma if it's from KERNEL RQ,
338 	 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
339 	 * corrupt but keep in mind that kernel jobs always considered good.
340 	 */
341 	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
342 		atomic_inc(&bad->karma);
343 		for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
344 		     i++) {
345 			struct drm_sched_rq *rq = &sched->sched_rq[i];
346 
347 			spin_lock(&rq->lock);
348 			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
349 				if (bad->s_fence->scheduled.context ==
350 				    entity->fence_context) {
351 					if (atomic_read(&bad->karma) >
352 					    bad->sched->hang_limit)
353 						if (entity->guilty)
354 							atomic_set(entity->guilty, 1);
355 					break;
356 				}
357 			}
358 			spin_unlock(&rq->lock);
359 			if (&entity->list != &rq->entities)
360 				break;
361 		}
362 	}
363 }
364 EXPORT_SYMBOL(drm_sched_increase_karma);
365 
366 /**
367  * drm_sched_stop - stop the scheduler
368  *
369  * @sched: scheduler instance
370  * @bad: job which caused the time out
371  *
372  * Stop the scheduler and also removes and frees all completed jobs.
373  * Note: bad job will not be freed as it might be used later and so it's
374  * callers responsibility to release it manually if it's not part of the
375  * mirror list any more.
376  *
377  */
378 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
379 {
380 	struct drm_sched_job *s_job, *tmp;
381 
382 	kthread_park(sched->thread);
383 
384 	/*
385 	 * Reinsert back the bad job here - now it's safe as
386 	 * drm_sched_get_cleanup_job cannot race against us and release the
387 	 * bad job at this point - we parked (waited for) any in progress
388 	 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
389 	 * now until the scheduler thread is unparked.
390 	 */
391 	if (bad && bad->sched == sched)
392 		/*
393 		 * Add at the head of the queue to reflect it was the earliest
394 		 * job extracted.
395 		 */
396 		list_add(&bad->node, &sched->ring_mirror_list);
397 
398 	/*
399 	 * Iterate the job list from later to  earlier one and either deactive
400 	 * their HW callbacks or remove them from mirror list if they already
401 	 * signaled.
402 	 * This iteration is thread safe as sched thread is stopped.
403 	 */
404 	list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) {
405 		if (s_job->s_fence->parent &&
406 		    dma_fence_remove_callback(s_job->s_fence->parent,
407 					      &s_job->cb)) {
408 			atomic_dec(&sched->hw_rq_count);
409 		} else {
410 			/*
411 			 * remove job from ring_mirror_list.
412 			 * Locking here is for concurrent resume timeout
413 			 */
414 			spin_lock(&sched->job_list_lock);
415 			list_del_init(&s_job->node);
416 			spin_unlock(&sched->job_list_lock);
417 
418 			/*
419 			 * Wait for job's HW fence callback to finish using s_job
420 			 * before releasing it.
421 			 *
422 			 * Job is still alive so fence refcount at least 1
423 			 */
424 			dma_fence_wait(&s_job->s_fence->finished, false);
425 
426 			/*
427 			 * We must keep bad job alive for later use during
428 			 * recovery by some of the drivers but leave a hint
429 			 * that the guilty job must be released.
430 			 */
431 			if (bad != s_job)
432 				sched->ops->free_job(s_job);
433 			else
434 				sched->free_guilty = true;
435 		}
436 	}
437 
438 	/*
439 	 * Stop pending timer in flight as we rearm it in  drm_sched_start. This
440 	 * avoids the pending timeout work in progress to fire right away after
441 	 * this TDR finished and before the newly restarted jobs had a
442 	 * chance to complete.
443 	 */
444 	cancel_delayed_work(&sched->work_tdr);
445 }
446 
447 EXPORT_SYMBOL(drm_sched_stop);
448 
449 /**
450  * drm_sched_start - recover jobs after a reset
451  *
452  * @sched: scheduler instance
453  * @full_recovery: proceed with complete sched restart
454  *
455  */
456 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
457 {
458 	struct drm_sched_job *s_job, *tmp;
459 	int r;
460 
461 	/*
462 	 * Locking the list is not required here as the sched thread is parked
463 	 * so no new jobs are being inserted or removed. Also concurrent
464 	 * GPU recovers can't run in parallel.
465 	 */
466 	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
467 		struct dma_fence *fence = s_job->s_fence->parent;
468 
469 		atomic_inc(&sched->hw_rq_count);
470 
471 		if (!full_recovery)
472 			continue;
473 
474 		if (fence) {
475 			r = dma_fence_add_callback(fence, &s_job->cb,
476 						   drm_sched_process_job);
477 			if (r == -ENOENT)
478 				drm_sched_process_job(fence, &s_job->cb);
479 			else if (r)
480 				DRM_ERROR("fence add callback failed (%d)\n",
481 					  r);
482 		} else
483 			drm_sched_process_job(NULL, &s_job->cb);
484 	}
485 
486 	if (full_recovery) {
487 		spin_lock(&sched->job_list_lock);
488 		drm_sched_start_timeout(sched);
489 		spin_unlock(&sched->job_list_lock);
490 	}
491 
492 	kthread_unpark(sched->thread);
493 }
494 EXPORT_SYMBOL(drm_sched_start);
495 
496 /**
497  * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list
498  *
499  * @sched: scheduler instance
500  *
501  */
502 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
503 {
504 	struct drm_sched_job *s_job, *tmp;
505 	uint64_t guilty_context;
506 	bool found_guilty = false;
507 	struct dma_fence *fence;
508 
509 	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
510 		struct drm_sched_fence *s_fence = s_job->s_fence;
511 
512 		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
513 			found_guilty = true;
514 			guilty_context = s_job->s_fence->scheduled.context;
515 		}
516 
517 		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
518 			dma_fence_set_error(&s_fence->finished, -ECANCELED);
519 
520 		dma_fence_put(s_job->s_fence->parent);
521 		fence = sched->ops->run_job(s_job);
522 
523 		if (IS_ERR_OR_NULL(fence)) {
524 			if (IS_ERR(fence))
525 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
526 
527 			s_job->s_fence->parent = NULL;
528 		} else {
529 			s_job->s_fence->parent = fence;
530 		}
531 
532 
533 	}
534 }
535 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
536 
537 /**
538  * drm_sched_job_init - init a scheduler job
539  *
540  * @job: scheduler job to init
541  * @entity: scheduler entity to use
542  * @owner: job owner for debugging
543  *
544  * Refer to drm_sched_entity_push_job() documentation
545  * for locking considerations.
546  *
547  * Returns 0 for success, negative error code otherwise.
548  */
549 int drm_sched_job_init(struct drm_sched_job *job,
550 		       struct drm_sched_entity *entity,
551 		       void *owner)
552 {
553 	struct drm_gpu_scheduler *sched;
554 
555 	drm_sched_entity_select_rq(entity);
556 	if (!entity->rq)
557 		return -ENOENT;
558 
559 	sched = entity->rq->sched;
560 
561 	job->sched = sched;
562 	job->entity = entity;
563 	job->s_priority = entity->rq - sched->sched_rq;
564 	job->s_fence = drm_sched_fence_create(entity, owner);
565 	if (!job->s_fence)
566 		return -ENOMEM;
567 	job->id = atomic64_inc_return(&sched->job_id_count);
568 
569 	INIT_LIST_HEAD(&job->node);
570 
571 	return 0;
572 }
573 EXPORT_SYMBOL(drm_sched_job_init);
574 
575 /**
576  * drm_sched_job_cleanup - clean up scheduler job resources
577  *
578  * @job: scheduler job to clean up
579  */
580 void drm_sched_job_cleanup(struct drm_sched_job *job)
581 {
582 	dma_fence_put(&job->s_fence->finished);
583 	job->s_fence = NULL;
584 }
585 EXPORT_SYMBOL(drm_sched_job_cleanup);
586 
587 /**
588  * drm_sched_ready - is the scheduler ready
589  *
590  * @sched: scheduler instance
591  *
592  * Return true if we can push more jobs to the hw, otherwise false.
593  */
594 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
595 {
596 	return atomic_read(&sched->hw_rq_count) <
597 		sched->hw_submission_limit;
598 }
599 
600 /**
601  * drm_sched_wakeup - Wake up the scheduler when it is ready
602  *
603  * @sched: scheduler instance
604  *
605  */
606 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
607 {
608 	if (drm_sched_ready(sched))
609 		wake_up_interruptible(&sched->wake_up_worker);
610 }
611 
612 /**
613  * drm_sched_select_entity - Select next entity to process
614  *
615  * @sched: scheduler instance
616  *
617  * Returns the entity to process or NULL if none are found.
618  */
619 static struct drm_sched_entity *
620 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
621 {
622 	struct drm_sched_entity *entity;
623 	int i;
624 
625 	if (!drm_sched_ready(sched))
626 		return NULL;
627 
628 	/* Kernel run queue has higher priority than normal run queue*/
629 	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
630 		entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
631 		if (entity)
632 			break;
633 	}
634 
635 	return entity;
636 }
637 
638 /**
639  * drm_sched_process_job - process a job
640  *
641  * @f: fence
642  * @cb: fence callbacks
643  *
644  * Called after job has finished execution.
645  */
646 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
647 {
648 	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
649 	struct drm_sched_fence *s_fence = s_job->s_fence;
650 	struct drm_gpu_scheduler *sched = s_fence->sched;
651 
652 	atomic_dec(&sched->hw_rq_count);
653 	atomic_dec(&sched->score);
654 
655 	trace_drm_sched_process_job(s_fence);
656 
657 	dma_fence_get(&s_fence->finished);
658 	drm_sched_fence_finished(s_fence);
659 	dma_fence_put(&s_fence->finished);
660 	wake_up_interruptible(&sched->wake_up_worker);
661 }
662 
663 /**
664  * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
665  *
666  * @sched: scheduler instance
667  *
668  * Returns the next finished job from the mirror list (if there is one)
669  * ready for it to be destroyed.
670  */
671 static struct drm_sched_job *
672 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
673 {
674 	struct drm_sched_job *job;
675 
676 	/*
677 	 * Don't destroy jobs while the timeout worker is running  OR thread
678 	 * is being parked and hence assumed to not touch ring_mirror_list
679 	 */
680 	if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
681 	    !cancel_delayed_work(&sched->work_tdr)) ||
682 	    kthread_should_park())
683 		return NULL;
684 
685 	spin_lock(&sched->job_list_lock);
686 
687 	job = list_first_entry_or_null(&sched->ring_mirror_list,
688 				       struct drm_sched_job, node);
689 
690 	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
691 		/* remove job from ring_mirror_list */
692 		list_del_init(&job->node);
693 	} else {
694 		job = NULL;
695 		/* queue timeout for next job */
696 		drm_sched_start_timeout(sched);
697 	}
698 
699 	spin_unlock(&sched->job_list_lock);
700 
701 	return job;
702 }
703 
704 /**
705  * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
706  * @sched_list: list of drm_gpu_schedulers
707  * @num_sched_list: number of drm_gpu_schedulers in the sched_list
708  *
709  * Returns pointer of the sched with the least load or NULL if none of the
710  * drm_gpu_schedulers are ready
711  */
712 struct drm_gpu_scheduler *
713 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
714 		     unsigned int num_sched_list)
715 {
716 	struct drm_gpu_scheduler *sched, *picked_sched = NULL;
717 	int i;
718 	unsigned int min_score = UINT_MAX, num_score;
719 
720 	for (i = 0; i < num_sched_list; ++i) {
721 		sched = sched_list[i];
722 
723 		if (!sched->ready) {
724 			DRM_WARN("scheduler %s is not ready, skipping",
725 				 sched->name);
726 			continue;
727 		}
728 
729 		num_score = atomic_read(&sched->score);
730 		if (num_score < min_score) {
731 			min_score = num_score;
732 			picked_sched = sched;
733 		}
734 	}
735 
736 	return picked_sched;
737 }
738 EXPORT_SYMBOL(drm_sched_pick_best);
739 
740 /**
741  * drm_sched_blocked - check if the scheduler is blocked
742  *
743  * @sched: scheduler instance
744  *
745  * Returns true if blocked, otherwise false.
746  */
747 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
748 {
749 	if (kthread_should_park()) {
750 		kthread_parkme();
751 		return true;
752 	}
753 
754 	return false;
755 }
756 
757 /**
758  * drm_sched_main - main scheduler thread
759  *
760  * @param: scheduler instance
761  *
762  * Returns 0.
763  */
764 static int drm_sched_main(void *param)
765 {
766 	struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
767 	int r;
768 
769 	sched_set_fifo_low(current);
770 
771 	while (!kthread_should_stop()) {
772 		struct drm_sched_entity *entity = NULL;
773 		struct drm_sched_fence *s_fence;
774 		struct drm_sched_job *sched_job;
775 		struct dma_fence *fence;
776 		struct drm_sched_job *cleanup_job = NULL;
777 
778 		wait_event_interruptible(sched->wake_up_worker,
779 					 (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
780 					 (!drm_sched_blocked(sched) &&
781 					  (entity = drm_sched_select_entity(sched))) ||
782 					 kthread_should_stop());
783 
784 		if (cleanup_job) {
785 			sched->ops->free_job(cleanup_job);
786 			/* queue timeout for next job */
787 			drm_sched_start_timeout(sched);
788 		}
789 
790 		if (!entity)
791 			continue;
792 
793 		sched_job = drm_sched_entity_pop_job(entity);
794 
795 		complete(&entity->entity_idle);
796 
797 		if (!sched_job)
798 			continue;
799 
800 		s_fence = sched_job->s_fence;
801 
802 		atomic_inc(&sched->hw_rq_count);
803 		drm_sched_job_begin(sched_job);
804 
805 		trace_drm_run_job(sched_job, entity);
806 		fence = sched->ops->run_job(sched_job);
807 		drm_sched_fence_scheduled(s_fence);
808 
809 		if (!IS_ERR_OR_NULL(fence)) {
810 			s_fence->parent = dma_fence_get(fence);
811 			r = dma_fence_add_callback(fence, &sched_job->cb,
812 						   drm_sched_process_job);
813 			if (r == -ENOENT)
814 				drm_sched_process_job(fence, &sched_job->cb);
815 			else if (r)
816 				DRM_ERROR("fence add callback failed (%d)\n",
817 					  r);
818 			dma_fence_put(fence);
819 		} else {
820 			if (IS_ERR(fence))
821 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
822 
823 			drm_sched_process_job(NULL, &sched_job->cb);
824 		}
825 
826 		wake_up(&sched->job_scheduled);
827 	}
828 	return 0;
829 }
830 
831 /**
832  * drm_sched_init - Init a gpu scheduler instance
833  *
834  * @sched: scheduler instance
835  * @ops: backend operations for this scheduler
836  * @hw_submission: number of hw submissions that can be in flight
837  * @hang_limit: number of times to allow a job to hang before dropping it
838  * @timeout: timeout value in jiffies for the scheduler
839  * @name: name used for debugging
840  *
841  * Return 0 on success, otherwise error code.
842  */
843 int drm_sched_init(struct drm_gpu_scheduler *sched,
844 		   const struct drm_sched_backend_ops *ops,
845 		   unsigned hw_submission,
846 		   unsigned hang_limit,
847 		   long timeout,
848 		   const char *name)
849 {
850 	int i, ret;
851 	sched->ops = ops;
852 	sched->hw_submission_limit = hw_submission;
853 	sched->name = name;
854 	sched->timeout = timeout;
855 	sched->hang_limit = hang_limit;
856 	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
857 		drm_sched_rq_init(sched, &sched->sched_rq[i]);
858 
859 	init_waitqueue_head(&sched->wake_up_worker);
860 	init_waitqueue_head(&sched->job_scheduled);
861 	INIT_LIST_HEAD(&sched->ring_mirror_list);
862 	spin_lock_init(&sched->job_list_lock);
863 	atomic_set(&sched->hw_rq_count, 0);
864 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
865 	atomic_set(&sched->score, 0);
866 	atomic64_set(&sched->job_id_count, 0);
867 
868 	/* Each scheduler will run on a seperate kernel thread */
869 	sched->thread = kthread_run(drm_sched_main, sched, sched->name);
870 	if (IS_ERR(sched->thread)) {
871 		ret = PTR_ERR(sched->thread);
872 		sched->thread = NULL;
873 		DRM_ERROR("Failed to create scheduler for %s.\n", name);
874 		return ret;
875 	}
876 
877 	sched->ready = true;
878 	return 0;
879 }
880 EXPORT_SYMBOL(drm_sched_init);
881 
882 /**
883  * drm_sched_fini - Destroy a gpu scheduler
884  *
885  * @sched: scheduler instance
886  *
887  * Tears down and cleans up the scheduler.
888  */
889 void drm_sched_fini(struct drm_gpu_scheduler *sched)
890 {
891 	if (sched->thread)
892 		kthread_stop(sched->thread);
893 
894 	sched->ready = false;
895 }
896 EXPORT_SYMBOL(drm_sched_fini);
897