1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 /**
25  * DOC: Overview
26  *
27  * The GPU scheduler provides entities which allow userspace to push jobs
28  * into software queues which are then scheduled on a hardware run queue.
29  * The software queues have a priority among them. The scheduler selects the entities
30  * from the run queue using a FIFO. The scheduler provides dependency handling
31  * features among jobs. The driver is supposed to provide callback functions for
32  * backend operations to the scheduler like submitting a job to hardware run queue,
33  * returning the dependencies of a job etc.
34  *
35  * The organisation of the scheduler is the following:
36  *
37  * 1. Each hw run queue has one scheduler
38  * 2. Each scheduler has multiple run queues with different priorities
39  *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40  * 3. Each scheduler run queue has a queue of entities to schedule
41  * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42  *    the hardware.
43  *
44  * The jobs in a entity are always scheduled in the order that they were pushed.
45  */
46 
47 #include <linux/kthread.h>
48 #include <linux/wait.h>
49 #include <linux/sched.h>
50 #include <uapi/linux/sched/types.h>
51 
52 #include <drm/drm_print.h>
53 #include <drm/gpu_scheduler.h>
54 #include <drm/spsc_queue.h>
55 
56 #define CREATE_TRACE_POINTS
57 #include "gpu_scheduler_trace.h"
58 
59 #define to_drm_sched_job(sched_job)		\
60 		container_of((sched_job), struct drm_sched_job, queue_node)
61 
62 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
63 
64 /**
65  * drm_sched_rq_init - initialize a given run queue struct
66  *
67  * @rq: scheduler run queue
68  *
69  * Initializes a scheduler runqueue.
70  */
71 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
72 			      struct drm_sched_rq *rq)
73 {
74 	spin_lock_init(&rq->lock);
75 	INIT_LIST_HEAD(&rq->entities);
76 	rq->current_entity = NULL;
77 	rq->sched = sched;
78 }
79 
80 /**
81  * drm_sched_rq_add_entity - add an entity
82  *
83  * @rq: scheduler run queue
84  * @entity: scheduler entity
85  *
86  * Adds a scheduler entity to the run queue.
87  */
88 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
89 			     struct drm_sched_entity *entity)
90 {
91 	if (!list_empty(&entity->list))
92 		return;
93 	spin_lock(&rq->lock);
94 	list_add_tail(&entity->list, &rq->entities);
95 	spin_unlock(&rq->lock);
96 }
97 
98 /**
99  * drm_sched_rq_remove_entity - remove an entity
100  *
101  * @rq: scheduler run queue
102  * @entity: scheduler entity
103  *
104  * Removes a scheduler entity from the run queue.
105  */
106 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
107 				struct drm_sched_entity *entity)
108 {
109 	if (list_empty(&entity->list))
110 		return;
111 	spin_lock(&rq->lock);
112 	list_del_init(&entity->list);
113 	if (rq->current_entity == entity)
114 		rq->current_entity = NULL;
115 	spin_unlock(&rq->lock);
116 }
117 
118 /**
119  * drm_sched_rq_select_entity - Select an entity which could provide a job to run
120  *
121  * @rq: scheduler run queue to check.
122  *
123  * Try to find a ready entity, returns NULL if none found.
124  */
125 static struct drm_sched_entity *
126 drm_sched_rq_select_entity(struct drm_sched_rq *rq)
127 {
128 	struct drm_sched_entity *entity;
129 
130 	spin_lock(&rq->lock);
131 
132 	entity = rq->current_entity;
133 	if (entity) {
134 		list_for_each_entry_continue(entity, &rq->entities, list) {
135 			if (drm_sched_entity_is_ready(entity)) {
136 				rq->current_entity = entity;
137 				spin_unlock(&rq->lock);
138 				return entity;
139 			}
140 		}
141 	}
142 
143 	list_for_each_entry(entity, &rq->entities, list) {
144 
145 		if (drm_sched_entity_is_ready(entity)) {
146 			rq->current_entity = entity;
147 			spin_unlock(&rq->lock);
148 			return entity;
149 		}
150 
151 		if (entity == rq->current_entity)
152 			break;
153 	}
154 
155 	spin_unlock(&rq->lock);
156 
157 	return NULL;
158 }
159 
160 /**
161  * drm_sched_dependency_optimized
162  *
163  * @fence: the dependency fence
164  * @entity: the entity which depends on the above fence
165  *
166  * Returns true if the dependency can be optimized and false otherwise
167  */
168 bool drm_sched_dependency_optimized(struct dma_fence* fence,
169 				    struct drm_sched_entity *entity)
170 {
171 	struct drm_gpu_scheduler *sched = entity->rq->sched;
172 	struct drm_sched_fence *s_fence;
173 
174 	if (!fence || dma_fence_is_signaled(fence))
175 		return false;
176 	if (fence->context == entity->fence_context)
177 		return true;
178 	s_fence = to_drm_sched_fence(fence);
179 	if (s_fence && s_fence->sched == sched)
180 		return true;
181 
182 	return false;
183 }
184 EXPORT_SYMBOL(drm_sched_dependency_optimized);
185 
186 /**
187  * drm_sched_start_timeout - start timeout for reset worker
188  *
189  * @sched: scheduler instance to start the worker for
190  *
191  * Start the timeout for the given scheduler.
192  */
193 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
194 {
195 	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
196 	    !list_empty(&sched->ring_mirror_list))
197 		schedule_delayed_work(&sched->work_tdr, sched->timeout);
198 }
199 
200 /**
201  * drm_sched_fault - immediately start timeout handler
202  *
203  * @sched: scheduler where the timeout handling should be started.
204  *
205  * Start timeout handling immediately when the driver detects a hardware fault.
206  */
207 void drm_sched_fault(struct drm_gpu_scheduler *sched)
208 {
209 	mod_delayed_work(system_wq, &sched->work_tdr, 0);
210 }
211 EXPORT_SYMBOL(drm_sched_fault);
212 
213 /**
214  * drm_sched_suspend_timeout - Suspend scheduler job timeout
215  *
216  * @sched: scheduler instance for which to suspend the timeout
217  *
218  * Suspend the delayed work timeout for the scheduler. This is done by
219  * modifying the delayed work timeout to an arbitrary large value,
220  * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
221  * called from an IRQ context.
222  *
223  * Returns the timeout remaining
224  *
225  */
226 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
227 {
228 	unsigned long sched_timeout, now = jiffies;
229 
230 	sched_timeout = sched->work_tdr.timer.expires;
231 
232 	/*
233 	 * Modify the timeout to an arbitrarily large value. This also prevents
234 	 * the timeout to be restarted when new submissions arrive
235 	 */
236 	if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
237 			&& time_after(sched_timeout, now))
238 		return sched_timeout - now;
239 	else
240 		return sched->timeout;
241 }
242 EXPORT_SYMBOL(drm_sched_suspend_timeout);
243 
244 /**
245  * drm_sched_resume_timeout - Resume scheduler job timeout
246  *
247  * @sched: scheduler instance for which to resume the timeout
248  * @remaining: remaining timeout
249  *
250  * Resume the delayed work timeout for the scheduler. Note that
251  * this function can be called from an IRQ context.
252  */
253 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
254 		unsigned long remaining)
255 {
256 	unsigned long flags;
257 
258 	spin_lock_irqsave(&sched->job_list_lock, flags);
259 
260 	if (list_empty(&sched->ring_mirror_list))
261 		cancel_delayed_work(&sched->work_tdr);
262 	else
263 		mod_delayed_work(system_wq, &sched->work_tdr, remaining);
264 
265 	spin_unlock_irqrestore(&sched->job_list_lock, flags);
266 }
267 EXPORT_SYMBOL(drm_sched_resume_timeout);
268 
269 static void drm_sched_job_begin(struct drm_sched_job *s_job)
270 {
271 	struct drm_gpu_scheduler *sched = s_job->sched;
272 	unsigned long flags;
273 
274 	spin_lock_irqsave(&sched->job_list_lock, flags);
275 	list_add_tail(&s_job->node, &sched->ring_mirror_list);
276 	drm_sched_start_timeout(sched);
277 	spin_unlock_irqrestore(&sched->job_list_lock, flags);
278 }
279 
280 static void drm_sched_job_timedout(struct work_struct *work)
281 {
282 	struct drm_gpu_scheduler *sched;
283 	struct drm_sched_job *job;
284 	unsigned long flags;
285 
286 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
287 	job = list_first_entry_or_null(&sched->ring_mirror_list,
288 				       struct drm_sched_job, node);
289 
290 	if (job) {
291 		job->sched->ops->timedout_job(job);
292 
293 		/*
294 		 * Guilty job did complete and hence needs to be manually removed
295 		 * See drm_sched_stop doc.
296 		 */
297 		if (sched->free_guilty) {
298 			job->sched->ops->free_job(job);
299 			sched->free_guilty = false;
300 		}
301 	}
302 
303 	spin_lock_irqsave(&sched->job_list_lock, flags);
304 	drm_sched_start_timeout(sched);
305 	spin_unlock_irqrestore(&sched->job_list_lock, flags);
306 }
307 
308  /**
309   * drm_sched_increase_karma - Update sched_entity guilty flag
310   *
311   * @bad: The job guilty of time out
312   *
313   * Increment on every hang caused by the 'bad' job. If this exceeds the hang
314   * limit of the scheduler then the respective sched entity is marked guilty and
315   * jobs from it will not be scheduled further
316   */
317 void drm_sched_increase_karma(struct drm_sched_job *bad)
318 {
319 	int i;
320 	struct drm_sched_entity *tmp;
321 	struct drm_sched_entity *entity;
322 	struct drm_gpu_scheduler *sched = bad->sched;
323 
324 	/* don't increase @bad's karma if it's from KERNEL RQ,
325 	 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
326 	 * corrupt but keep in mind that kernel jobs always considered good.
327 	 */
328 	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
329 		atomic_inc(&bad->karma);
330 		for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
331 		     i++) {
332 			struct drm_sched_rq *rq = &sched->sched_rq[i];
333 
334 			spin_lock(&rq->lock);
335 			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
336 				if (bad->s_fence->scheduled.context ==
337 				    entity->fence_context) {
338 					if (atomic_read(&bad->karma) >
339 					    bad->sched->hang_limit)
340 						if (entity->guilty)
341 							atomic_set(entity->guilty, 1);
342 					break;
343 				}
344 			}
345 			spin_unlock(&rq->lock);
346 			if (&entity->list != &rq->entities)
347 				break;
348 		}
349 	}
350 }
351 EXPORT_SYMBOL(drm_sched_increase_karma);
352 
353 /**
354  * drm_sched_stop - stop the scheduler
355  *
356  * @sched: scheduler instance
357  * @bad: job which caused the time out
358  *
359  * Stop the scheduler and also removes and frees all completed jobs.
360  * Note: bad job will not be freed as it might be used later and so it's
361  * callers responsibility to release it manually if it's not part of the
362  * mirror list any more.
363  *
364  */
365 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
366 {
367 	struct drm_sched_job *s_job, *tmp;
368 	unsigned long flags;
369 
370 	kthread_park(sched->thread);
371 
372 	/*
373 	 * Iterate the job list from later to  earlier one and either deactive
374 	 * their HW callbacks or remove them from mirror list if they already
375 	 * signaled.
376 	 * This iteration is thread safe as sched thread is stopped.
377 	 */
378 	list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) {
379 		if (s_job->s_fence->parent &&
380 		    dma_fence_remove_callback(s_job->s_fence->parent,
381 					      &s_job->cb)) {
382 			atomic_dec(&sched->hw_rq_count);
383 		} else {
384 			/*
385 			 * remove job from ring_mirror_list.
386 			 * Locking here is for concurrent resume timeout
387 			 */
388 			spin_lock_irqsave(&sched->job_list_lock, flags);
389 			list_del_init(&s_job->node);
390 			spin_unlock_irqrestore(&sched->job_list_lock, flags);
391 
392 			/*
393 			 * Wait for job's HW fence callback to finish using s_job
394 			 * before releasing it.
395 			 *
396 			 * Job is still alive so fence refcount at least 1
397 			 */
398 			dma_fence_wait(&s_job->s_fence->finished, false);
399 
400 			/*
401 			 * We must keep bad job alive for later use during
402 			 * recovery by some of the drivers but leave a hint
403 			 * that the guilty job must be released.
404 			 */
405 			if (bad != s_job)
406 				sched->ops->free_job(s_job);
407 			else
408 				sched->free_guilty = true;
409 		}
410 	}
411 
412 	/*
413 	 * Stop pending timer in flight as we rearm it in  drm_sched_start. This
414 	 * avoids the pending timeout work in progress to fire right away after
415 	 * this TDR finished and before the newly restarted jobs had a
416 	 * chance to complete.
417 	 */
418 	cancel_delayed_work(&sched->work_tdr);
419 }
420 
421 EXPORT_SYMBOL(drm_sched_stop);
422 
423 /**
424  * drm_sched_job_recovery - recover jobs after a reset
425  *
426  * @sched: scheduler instance
427  * @full_recovery: proceed with complete sched restart
428  *
429  */
430 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
431 {
432 	struct drm_sched_job *s_job, *tmp;
433 	unsigned long flags;
434 	int r;
435 
436 	/*
437 	 * Locking the list is not required here as the sched thread is parked
438 	 * so no new jobs are being inserted or removed. Also concurrent
439 	 * GPU recovers can't run in parallel.
440 	 */
441 	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
442 		struct dma_fence *fence = s_job->s_fence->parent;
443 
444 		atomic_inc(&sched->hw_rq_count);
445 
446 		if (!full_recovery)
447 			continue;
448 
449 		if (fence) {
450 			r = dma_fence_add_callback(fence, &s_job->cb,
451 						   drm_sched_process_job);
452 			if (r == -ENOENT)
453 				drm_sched_process_job(fence, &s_job->cb);
454 			else if (r)
455 				DRM_ERROR("fence add callback failed (%d)\n",
456 					  r);
457 		} else
458 			drm_sched_process_job(NULL, &s_job->cb);
459 	}
460 
461 	if (full_recovery) {
462 		spin_lock_irqsave(&sched->job_list_lock, flags);
463 		drm_sched_start_timeout(sched);
464 		spin_unlock_irqrestore(&sched->job_list_lock, flags);
465 	}
466 
467 	kthread_unpark(sched->thread);
468 }
469 EXPORT_SYMBOL(drm_sched_start);
470 
471 /**
472  * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list
473  *
474  * @sched: scheduler instance
475  *
476  */
477 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
478 {
479 	struct drm_sched_job *s_job, *tmp;
480 	uint64_t guilty_context;
481 	bool found_guilty = false;
482 
483 	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
484 		struct drm_sched_fence *s_fence = s_job->s_fence;
485 
486 		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
487 			found_guilty = true;
488 			guilty_context = s_job->s_fence->scheduled.context;
489 		}
490 
491 		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
492 			dma_fence_set_error(&s_fence->finished, -ECANCELED);
493 
494 		dma_fence_put(s_job->s_fence->parent);
495 		s_job->s_fence->parent = sched->ops->run_job(s_job);
496 	}
497 }
498 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
499 
500 /**
501  * drm_sched_job_init - init a scheduler job
502  *
503  * @job: scheduler job to init
504  * @entity: scheduler entity to use
505  * @owner: job owner for debugging
506  *
507  * Refer to drm_sched_entity_push_job() documentation
508  * for locking considerations.
509  *
510  * Returns 0 for success, negative error code otherwise.
511  */
512 int drm_sched_job_init(struct drm_sched_job *job,
513 		       struct drm_sched_entity *entity,
514 		       void *owner)
515 {
516 	struct drm_gpu_scheduler *sched;
517 
518 	drm_sched_entity_select_rq(entity);
519 	if (!entity->rq)
520 		return -ENOENT;
521 
522 	sched = entity->rq->sched;
523 
524 	job->sched = sched;
525 	job->entity = entity;
526 	job->s_priority = entity->rq - sched->sched_rq;
527 	job->s_fence = drm_sched_fence_create(entity, owner);
528 	if (!job->s_fence)
529 		return -ENOMEM;
530 	job->id = atomic64_inc_return(&sched->job_id_count);
531 
532 	INIT_LIST_HEAD(&job->node);
533 
534 	return 0;
535 }
536 EXPORT_SYMBOL(drm_sched_job_init);
537 
538 /**
539  * drm_sched_job_cleanup - clean up scheduler job resources
540  *
541  * @job: scheduler job to clean up
542  */
543 void drm_sched_job_cleanup(struct drm_sched_job *job)
544 {
545 	dma_fence_put(&job->s_fence->finished);
546 	job->s_fence = NULL;
547 }
548 EXPORT_SYMBOL(drm_sched_job_cleanup);
549 
550 /**
551  * drm_sched_ready - is the scheduler ready
552  *
553  * @sched: scheduler instance
554  *
555  * Return true if we can push more jobs to the hw, otherwise false.
556  */
557 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
558 {
559 	return atomic_read(&sched->hw_rq_count) <
560 		sched->hw_submission_limit;
561 }
562 
563 /**
564  * drm_sched_wakeup - Wake up the scheduler when it is ready
565  *
566  * @sched: scheduler instance
567  *
568  */
569 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
570 {
571 	if (drm_sched_ready(sched))
572 		wake_up_interruptible(&sched->wake_up_worker);
573 }
574 
575 /**
576  * drm_sched_select_entity - Select next entity to process
577  *
578  * @sched: scheduler instance
579  *
580  * Returns the entity to process or NULL if none are found.
581  */
582 static struct drm_sched_entity *
583 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
584 {
585 	struct drm_sched_entity *entity;
586 	int i;
587 
588 	if (!drm_sched_ready(sched))
589 		return NULL;
590 
591 	/* Kernel run queue has higher priority than normal run queue*/
592 	for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
593 		entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
594 		if (entity)
595 			break;
596 	}
597 
598 	return entity;
599 }
600 
601 /**
602  * drm_sched_process_job - process a job
603  *
604  * @f: fence
605  * @cb: fence callbacks
606  *
607  * Called after job has finished execution.
608  */
609 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
610 {
611 	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
612 	struct drm_sched_fence *s_fence = s_job->s_fence;
613 	struct drm_gpu_scheduler *sched = s_fence->sched;
614 
615 	atomic_dec(&sched->hw_rq_count);
616 	atomic_dec(&sched->num_jobs);
617 
618 	trace_drm_sched_process_job(s_fence);
619 
620 	drm_sched_fence_finished(s_fence);
621 	wake_up_interruptible(&sched->wake_up_worker);
622 }
623 
624 /**
625  * drm_sched_cleanup_jobs - destroy finished jobs
626  *
627  * @sched: scheduler instance
628  *
629  * Remove all finished jobs from the mirror list and destroy them.
630  */
631 static void drm_sched_cleanup_jobs(struct drm_gpu_scheduler *sched)
632 {
633 	unsigned long flags;
634 
635 	/* Don't destroy jobs while the timeout worker is running */
636 	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
637 	    !cancel_delayed_work(&sched->work_tdr))
638 		return;
639 
640 
641 	while (!list_empty(&sched->ring_mirror_list)) {
642 		struct drm_sched_job *job;
643 
644 		job = list_first_entry(&sched->ring_mirror_list,
645 				       struct drm_sched_job, node);
646 		if (!dma_fence_is_signaled(&job->s_fence->finished))
647 			break;
648 
649 		spin_lock_irqsave(&sched->job_list_lock, flags);
650 		/* remove job from ring_mirror_list */
651 		list_del_init(&job->node);
652 		spin_unlock_irqrestore(&sched->job_list_lock, flags);
653 
654 		sched->ops->free_job(job);
655 	}
656 
657 	/* queue timeout for next job */
658 	spin_lock_irqsave(&sched->job_list_lock, flags);
659 	drm_sched_start_timeout(sched);
660 	spin_unlock_irqrestore(&sched->job_list_lock, flags);
661 
662 }
663 
664 /**
665  * drm_sched_blocked - check if the scheduler is blocked
666  *
667  * @sched: scheduler instance
668  *
669  * Returns true if blocked, otherwise false.
670  */
671 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
672 {
673 	if (kthread_should_park()) {
674 		kthread_parkme();
675 		return true;
676 	}
677 
678 	return false;
679 }
680 
681 /**
682  * drm_sched_main - main scheduler thread
683  *
684  * @param: scheduler instance
685  *
686  * Returns 0.
687  */
688 static int drm_sched_main(void *param)
689 {
690 	struct sched_param sparam = {.sched_priority = 1};
691 	struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
692 	int r;
693 
694 	sched_setscheduler(current, SCHED_FIFO, &sparam);
695 
696 	while (!kthread_should_stop()) {
697 		struct drm_sched_entity *entity = NULL;
698 		struct drm_sched_fence *s_fence;
699 		struct drm_sched_job *sched_job;
700 		struct dma_fence *fence;
701 
702 		wait_event_interruptible(sched->wake_up_worker,
703 					 (drm_sched_cleanup_jobs(sched),
704 					 (!drm_sched_blocked(sched) &&
705 					  (entity = drm_sched_select_entity(sched))) ||
706 					 kthread_should_stop()));
707 
708 		if (!entity)
709 			continue;
710 
711 		sched_job = drm_sched_entity_pop_job(entity);
712 		if (!sched_job)
713 			continue;
714 
715 		s_fence = sched_job->s_fence;
716 
717 		atomic_inc(&sched->hw_rq_count);
718 		drm_sched_job_begin(sched_job);
719 
720 		fence = sched->ops->run_job(sched_job);
721 		drm_sched_fence_scheduled(s_fence);
722 
723 		if (fence) {
724 			s_fence->parent = dma_fence_get(fence);
725 			r = dma_fence_add_callback(fence, &sched_job->cb,
726 						   drm_sched_process_job);
727 			if (r == -ENOENT)
728 				drm_sched_process_job(fence, &sched_job->cb);
729 			else if (r)
730 				DRM_ERROR("fence add callback failed (%d)\n",
731 					  r);
732 			dma_fence_put(fence);
733 		} else
734 			drm_sched_process_job(NULL, &sched_job->cb);
735 
736 		wake_up(&sched->job_scheduled);
737 	}
738 	return 0;
739 }
740 
741 /**
742  * drm_sched_init - Init a gpu scheduler instance
743  *
744  * @sched: scheduler instance
745  * @ops: backend operations for this scheduler
746  * @hw_submission: number of hw submissions that can be in flight
747  * @hang_limit: number of times to allow a job to hang before dropping it
748  * @timeout: timeout value in jiffies for the scheduler
749  * @name: name used for debugging
750  *
751  * Return 0 on success, otherwise error code.
752  */
753 int drm_sched_init(struct drm_gpu_scheduler *sched,
754 		   const struct drm_sched_backend_ops *ops,
755 		   unsigned hw_submission,
756 		   unsigned hang_limit,
757 		   long timeout,
758 		   const char *name)
759 {
760 	int i, ret;
761 	sched->ops = ops;
762 	sched->hw_submission_limit = hw_submission;
763 	sched->name = name;
764 	sched->timeout = timeout;
765 	sched->hang_limit = hang_limit;
766 	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
767 		drm_sched_rq_init(sched, &sched->sched_rq[i]);
768 
769 	init_waitqueue_head(&sched->wake_up_worker);
770 	init_waitqueue_head(&sched->job_scheduled);
771 	INIT_LIST_HEAD(&sched->ring_mirror_list);
772 	spin_lock_init(&sched->job_list_lock);
773 	atomic_set(&sched->hw_rq_count, 0);
774 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
775 	atomic_set(&sched->num_jobs, 0);
776 	atomic64_set(&sched->job_id_count, 0);
777 
778 	/* Each scheduler will run on a seperate kernel thread */
779 	sched->thread = kthread_run(drm_sched_main, sched, sched->name);
780 	if (IS_ERR(sched->thread)) {
781 		ret = PTR_ERR(sched->thread);
782 		sched->thread = NULL;
783 		DRM_ERROR("Failed to create scheduler for %s.\n", name);
784 		return ret;
785 	}
786 
787 	sched->ready = true;
788 	return 0;
789 }
790 EXPORT_SYMBOL(drm_sched_init);
791 
792 /**
793  * drm_sched_fini - Destroy a gpu scheduler
794  *
795  * @sched: scheduler instance
796  *
797  * Tears down and cleans up the scheduler.
798  */
799 void drm_sched_fini(struct drm_gpu_scheduler *sched)
800 {
801 	if (sched->thread)
802 		kthread_stop(sched->thread);
803 
804 	sched->ready = false;
805 }
806 EXPORT_SYMBOL(drm_sched_fini);
807