1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 /**
25  * DOC: Overview
26  *
27  * The GPU scheduler provides entities which allow userspace to push jobs
28  * into software queues which are then scheduled on a hardware run queue.
29  * The software queues have a priority among them. The scheduler selects the entities
30  * from the run queue using a FIFO. The scheduler provides dependency handling
31  * features among jobs. The driver is supposed to provide callback functions for
32  * backend operations to the scheduler like submitting a job to hardware run queue,
33  * returning the dependencies of a job etc.
34  *
35  * The organisation of the scheduler is the following:
36  *
37  * 1. Each hw run queue has one scheduler
38  * 2. Each scheduler has multiple run queues with different priorities
39  *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40  * 3. Each scheduler run queue has a queue of entities to schedule
41  * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42  *    the hardware.
43  *
44  * The jobs in a entity are always scheduled in the order that they were pushed.
45  */
46 
47 #include <linux/kthread.h>
48 #include <linux/wait.h>
49 #include <linux/sched.h>
50 #include <linux/completion.h>
51 #include <uapi/linux/sched/types.h>
52 
53 #include <drm/drm_print.h>
54 #include <drm/gpu_scheduler.h>
55 #include <drm/spsc_queue.h>
56 
57 #define CREATE_TRACE_POINTS
58 #include "gpu_scheduler_trace.h"
59 
60 #define to_drm_sched_job(sched_job)		\
61 		container_of((sched_job), struct drm_sched_job, queue_node)
62 
63 /**
64  * drm_sched_rq_init - initialize a given run queue struct
65  *
66  * @sched: scheduler instance to associate with this run queue
67  * @rq: scheduler run queue
68  *
69  * Initializes a scheduler runqueue.
70  */
71 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
72 			      struct drm_sched_rq *rq)
73 {
74 	spin_lock_init(&rq->lock);
75 	INIT_LIST_HEAD(&rq->entities);
76 	rq->current_entity = NULL;
77 	rq->sched = sched;
78 }
79 
80 /**
81  * drm_sched_rq_add_entity - add an entity
82  *
83  * @rq: scheduler run queue
84  * @entity: scheduler entity
85  *
86  * Adds a scheduler entity to the run queue.
87  */
88 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
89 			     struct drm_sched_entity *entity)
90 {
91 	if (!list_empty(&entity->list))
92 		return;
93 	spin_lock(&rq->lock);
94 	atomic_inc(rq->sched->score);
95 	list_add_tail(&entity->list, &rq->entities);
96 	spin_unlock(&rq->lock);
97 }
98 
99 /**
100  * drm_sched_rq_remove_entity - remove an entity
101  *
102  * @rq: scheduler run queue
103  * @entity: scheduler entity
104  *
105  * Removes a scheduler entity from the run queue.
106  */
107 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
108 				struct drm_sched_entity *entity)
109 {
110 	if (list_empty(&entity->list))
111 		return;
112 	spin_lock(&rq->lock);
113 	atomic_dec(rq->sched->score);
114 	list_del_init(&entity->list);
115 	if (rq->current_entity == entity)
116 		rq->current_entity = NULL;
117 	spin_unlock(&rq->lock);
118 }
119 
120 /**
121  * drm_sched_rq_select_entity - Select an entity which could provide a job to run
122  *
123  * @rq: scheduler run queue to check.
124  *
125  * Try to find a ready entity, returns NULL if none found.
126  */
127 static struct drm_sched_entity *
128 drm_sched_rq_select_entity(struct drm_sched_rq *rq)
129 {
130 	struct drm_sched_entity *entity;
131 
132 	spin_lock(&rq->lock);
133 
134 	entity = rq->current_entity;
135 	if (entity) {
136 		list_for_each_entry_continue(entity, &rq->entities, list) {
137 			if (drm_sched_entity_is_ready(entity)) {
138 				rq->current_entity = entity;
139 				reinit_completion(&entity->entity_idle);
140 				spin_unlock(&rq->lock);
141 				return entity;
142 			}
143 		}
144 	}
145 
146 	list_for_each_entry(entity, &rq->entities, list) {
147 
148 		if (drm_sched_entity_is_ready(entity)) {
149 			rq->current_entity = entity;
150 			reinit_completion(&entity->entity_idle);
151 			spin_unlock(&rq->lock);
152 			return entity;
153 		}
154 
155 		if (entity == rq->current_entity)
156 			break;
157 	}
158 
159 	spin_unlock(&rq->lock);
160 
161 	return NULL;
162 }
163 
164 /**
165  * drm_sched_job_done - complete a job
166  * @s_job: pointer to the job which is done
167  *
168  * Finish the job's fence and wake up the worker thread.
169  */
170 static void drm_sched_job_done(struct drm_sched_job *s_job)
171 {
172 	struct drm_sched_fence *s_fence = s_job->s_fence;
173 	struct drm_gpu_scheduler *sched = s_fence->sched;
174 
175 	atomic_dec(&sched->hw_rq_count);
176 	atomic_dec(sched->score);
177 
178 	trace_drm_sched_process_job(s_fence);
179 
180 	dma_fence_get(&s_fence->finished);
181 	drm_sched_fence_finished(s_fence);
182 	dma_fence_put(&s_fence->finished);
183 	wake_up_interruptible(&sched->wake_up_worker);
184 }
185 
186 /**
187  * drm_sched_job_done_cb - the callback for a done job
188  * @f: fence
189  * @cb: fence callbacks
190  */
191 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
192 {
193 	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
194 
195 	drm_sched_job_done(s_job);
196 }
197 
198 /**
199  * drm_sched_dependency_optimized
200  *
201  * @fence: the dependency fence
202  * @entity: the entity which depends on the above fence
203  *
204  * Returns true if the dependency can be optimized and false otherwise
205  */
206 bool drm_sched_dependency_optimized(struct dma_fence* fence,
207 				    struct drm_sched_entity *entity)
208 {
209 	struct drm_gpu_scheduler *sched = entity->rq->sched;
210 	struct drm_sched_fence *s_fence;
211 
212 	if (!fence || dma_fence_is_signaled(fence))
213 		return false;
214 	if (fence->context == entity->fence_context)
215 		return true;
216 	s_fence = to_drm_sched_fence(fence);
217 	if (s_fence && s_fence->sched == sched)
218 		return true;
219 
220 	return false;
221 }
222 EXPORT_SYMBOL(drm_sched_dependency_optimized);
223 
224 /**
225  * drm_sched_start_timeout - start timeout for reset worker
226  *
227  * @sched: scheduler instance to start the worker for
228  *
229  * Start the timeout for the given scheduler.
230  */
231 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
232 {
233 	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
234 	    !list_empty(&sched->pending_list))
235 		schedule_delayed_work(&sched->work_tdr, sched->timeout);
236 }
237 
238 /**
239  * drm_sched_fault - immediately start timeout handler
240  *
241  * @sched: scheduler where the timeout handling should be started.
242  *
243  * Start timeout handling immediately when the driver detects a hardware fault.
244  */
245 void drm_sched_fault(struct drm_gpu_scheduler *sched)
246 {
247 	mod_delayed_work(system_wq, &sched->work_tdr, 0);
248 }
249 EXPORT_SYMBOL(drm_sched_fault);
250 
251 /**
252  * drm_sched_suspend_timeout - Suspend scheduler job timeout
253  *
254  * @sched: scheduler instance for which to suspend the timeout
255  *
256  * Suspend the delayed work timeout for the scheduler. This is done by
257  * modifying the delayed work timeout to an arbitrary large value,
258  * MAX_SCHEDULE_TIMEOUT in this case.
259  *
260  * Returns the timeout remaining
261  *
262  */
263 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
264 {
265 	unsigned long sched_timeout, now = jiffies;
266 
267 	sched_timeout = sched->work_tdr.timer.expires;
268 
269 	/*
270 	 * Modify the timeout to an arbitrarily large value. This also prevents
271 	 * the timeout to be restarted when new submissions arrive
272 	 */
273 	if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
274 			&& time_after(sched_timeout, now))
275 		return sched_timeout - now;
276 	else
277 		return sched->timeout;
278 }
279 EXPORT_SYMBOL(drm_sched_suspend_timeout);
280 
281 /**
282  * drm_sched_resume_timeout - Resume scheduler job timeout
283  *
284  * @sched: scheduler instance for which to resume the timeout
285  * @remaining: remaining timeout
286  *
287  * Resume the delayed work timeout for the scheduler.
288  */
289 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
290 		unsigned long remaining)
291 {
292 	spin_lock(&sched->job_list_lock);
293 
294 	if (list_empty(&sched->pending_list))
295 		cancel_delayed_work(&sched->work_tdr);
296 	else
297 		mod_delayed_work(system_wq, &sched->work_tdr, remaining);
298 
299 	spin_unlock(&sched->job_list_lock);
300 }
301 EXPORT_SYMBOL(drm_sched_resume_timeout);
302 
303 static void drm_sched_job_begin(struct drm_sched_job *s_job)
304 {
305 	struct drm_gpu_scheduler *sched = s_job->sched;
306 
307 	spin_lock(&sched->job_list_lock);
308 	list_add_tail(&s_job->list, &sched->pending_list);
309 	drm_sched_start_timeout(sched);
310 	spin_unlock(&sched->job_list_lock);
311 }
312 
313 static void drm_sched_job_timedout(struct work_struct *work)
314 {
315 	struct drm_gpu_scheduler *sched;
316 	struct drm_sched_job *job;
317 
318 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
319 
320 	/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
321 	spin_lock(&sched->job_list_lock);
322 	job = list_first_entry_or_null(&sched->pending_list,
323 				       struct drm_sched_job, list);
324 
325 	if (job) {
326 		/*
327 		 * Remove the bad job so it cannot be freed by concurrent
328 		 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
329 		 * is parked at which point it's safe.
330 		 */
331 		list_del_init(&job->list);
332 		spin_unlock(&sched->job_list_lock);
333 
334 		job->sched->ops->timedout_job(job);
335 
336 		/*
337 		 * Guilty job did complete and hence needs to be manually removed
338 		 * See drm_sched_stop doc.
339 		 */
340 		if (sched->free_guilty) {
341 			job->sched->ops->free_job(job);
342 			sched->free_guilty = false;
343 		}
344 	} else {
345 		spin_unlock(&sched->job_list_lock);
346 	}
347 
348 	spin_lock(&sched->job_list_lock);
349 	drm_sched_start_timeout(sched);
350 	spin_unlock(&sched->job_list_lock);
351 }
352 
353  /**
354   * drm_sched_increase_karma - Update sched_entity guilty flag
355   *
356   * @bad: The job guilty of time out
357   *
358   * Increment on every hang caused by the 'bad' job. If this exceeds the hang
359   * limit of the scheduler then the respective sched entity is marked guilty and
360   * jobs from it will not be scheduled further
361   */
362 void drm_sched_increase_karma(struct drm_sched_job *bad)
363 {
364 	drm_sched_increase_karma_ext(bad, 1);
365 }
366 EXPORT_SYMBOL(drm_sched_increase_karma);
367 
368 void drm_sched_reset_karma(struct drm_sched_job *bad)
369 {
370 	drm_sched_increase_karma_ext(bad, 0);
371 }
372 EXPORT_SYMBOL(drm_sched_reset_karma);
373 
374 /**
375  * drm_sched_stop - stop the scheduler
376  *
377  * @sched: scheduler instance
378  * @bad: job which caused the time out
379  *
380  * Stop the scheduler and also removes and frees all completed jobs.
381  * Note: bad job will not be freed as it might be used later and so it's
382  * callers responsibility to release it manually if it's not part of the
383  * pending list any more.
384  *
385  */
386 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
387 {
388 	struct drm_sched_job *s_job, *tmp;
389 
390 	kthread_park(sched->thread);
391 
392 	/*
393 	 * Reinsert back the bad job here - now it's safe as
394 	 * drm_sched_get_cleanup_job cannot race against us and release the
395 	 * bad job at this point - we parked (waited for) any in progress
396 	 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
397 	 * now until the scheduler thread is unparked.
398 	 */
399 	if (bad && bad->sched == sched)
400 		/*
401 		 * Add at the head of the queue to reflect it was the earliest
402 		 * job extracted.
403 		 */
404 		list_add(&bad->list, &sched->pending_list);
405 
406 	/*
407 	 * Iterate the job list from later to  earlier one and either deactive
408 	 * their HW callbacks or remove them from pending list if they already
409 	 * signaled.
410 	 * This iteration is thread safe as sched thread is stopped.
411 	 */
412 	list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
413 					 list) {
414 		if (s_job->s_fence->parent &&
415 		    dma_fence_remove_callback(s_job->s_fence->parent,
416 					      &s_job->cb)) {
417 			atomic_dec(&sched->hw_rq_count);
418 		} else {
419 			/*
420 			 * remove job from pending_list.
421 			 * Locking here is for concurrent resume timeout
422 			 */
423 			spin_lock(&sched->job_list_lock);
424 			list_del_init(&s_job->list);
425 			spin_unlock(&sched->job_list_lock);
426 
427 			/*
428 			 * Wait for job's HW fence callback to finish using s_job
429 			 * before releasing it.
430 			 *
431 			 * Job is still alive so fence refcount at least 1
432 			 */
433 			dma_fence_wait(&s_job->s_fence->finished, false);
434 
435 			/*
436 			 * We must keep bad job alive for later use during
437 			 * recovery by some of the drivers but leave a hint
438 			 * that the guilty job must be released.
439 			 */
440 			if (bad != s_job)
441 				sched->ops->free_job(s_job);
442 			else
443 				sched->free_guilty = true;
444 		}
445 	}
446 
447 	/*
448 	 * Stop pending timer in flight as we rearm it in  drm_sched_start. This
449 	 * avoids the pending timeout work in progress to fire right away after
450 	 * this TDR finished and before the newly restarted jobs had a
451 	 * chance to complete.
452 	 */
453 	cancel_delayed_work(&sched->work_tdr);
454 }
455 
456 EXPORT_SYMBOL(drm_sched_stop);
457 
458 /**
459  * drm_sched_start - recover jobs after a reset
460  *
461  * @sched: scheduler instance
462  * @full_recovery: proceed with complete sched restart
463  *
464  */
465 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
466 {
467 	struct drm_sched_job *s_job, *tmp;
468 	int r;
469 
470 	/*
471 	 * Locking the list is not required here as the sched thread is parked
472 	 * so no new jobs are being inserted or removed. Also concurrent
473 	 * GPU recovers can't run in parallel.
474 	 */
475 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
476 		struct dma_fence *fence = s_job->s_fence->parent;
477 
478 		atomic_inc(&sched->hw_rq_count);
479 
480 		if (!full_recovery)
481 			continue;
482 
483 		if (fence) {
484 			r = dma_fence_add_callback(fence, &s_job->cb,
485 						   drm_sched_job_done_cb);
486 			if (r == -ENOENT)
487 				drm_sched_job_done(s_job);
488 			else if (r)
489 				DRM_ERROR("fence add callback failed (%d)\n",
490 					  r);
491 		} else
492 			drm_sched_job_done(s_job);
493 	}
494 
495 	if (full_recovery) {
496 		spin_lock(&sched->job_list_lock);
497 		drm_sched_start_timeout(sched);
498 		spin_unlock(&sched->job_list_lock);
499 	}
500 
501 	kthread_unpark(sched->thread);
502 }
503 EXPORT_SYMBOL(drm_sched_start);
504 
505 /**
506  * drm_sched_resubmit_jobs - helper to relaunch jobs from the pending list
507  *
508  * @sched: scheduler instance
509  *
510  */
511 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
512 {
513 	drm_sched_resubmit_jobs_ext(sched, INT_MAX);
514 }
515 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
516 
517 /**
518  * drm_sched_resubmit_jobs_ext - helper to relunch certain number of jobs from mirror ring list
519  *
520  * @sched: scheduler instance
521  * @max: job numbers to relaunch
522  *
523  */
524 void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max)
525 {
526 	struct drm_sched_job *s_job, *tmp;
527 	uint64_t guilty_context;
528 	bool found_guilty = false;
529 	struct dma_fence *fence;
530 	int i = 0;
531 
532 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
533 		struct drm_sched_fence *s_fence = s_job->s_fence;
534 
535 		if (i >= max)
536 			break;
537 
538 		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
539 			found_guilty = true;
540 			guilty_context = s_job->s_fence->scheduled.context;
541 		}
542 
543 		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
544 			dma_fence_set_error(&s_fence->finished, -ECANCELED);
545 
546 		dma_fence_put(s_job->s_fence->parent);
547 		fence = sched->ops->run_job(s_job);
548 		i++;
549 
550 		if (IS_ERR_OR_NULL(fence)) {
551 			if (IS_ERR(fence))
552 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
553 
554 			s_job->s_fence->parent = NULL;
555 		} else {
556 			s_job->s_fence->parent = fence;
557 		}
558 	}
559 }
560 EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext);
561 
562 /**
563  * drm_sched_job_init - init a scheduler job
564  *
565  * @job: scheduler job to init
566  * @entity: scheduler entity to use
567  * @owner: job owner for debugging
568  *
569  * Refer to drm_sched_entity_push_job() documentation
570  * for locking considerations.
571  *
572  * Returns 0 for success, negative error code otherwise.
573  */
574 int drm_sched_job_init(struct drm_sched_job *job,
575 		       struct drm_sched_entity *entity,
576 		       void *owner)
577 {
578 	struct drm_gpu_scheduler *sched;
579 
580 	drm_sched_entity_select_rq(entity);
581 	if (!entity->rq)
582 		return -ENOENT;
583 
584 	sched = entity->rq->sched;
585 
586 	job->sched = sched;
587 	job->entity = entity;
588 	job->s_priority = entity->rq - sched->sched_rq;
589 	job->s_fence = drm_sched_fence_create(entity, owner);
590 	if (!job->s_fence)
591 		return -ENOMEM;
592 	job->id = atomic64_inc_return(&sched->job_id_count);
593 
594 	INIT_LIST_HEAD(&job->list);
595 
596 	return 0;
597 }
598 EXPORT_SYMBOL(drm_sched_job_init);
599 
600 /**
601  * drm_sched_job_cleanup - clean up scheduler job resources
602  *
603  * @job: scheduler job to clean up
604  */
605 void drm_sched_job_cleanup(struct drm_sched_job *job)
606 {
607 	dma_fence_put(&job->s_fence->finished);
608 	job->s_fence = NULL;
609 }
610 EXPORT_SYMBOL(drm_sched_job_cleanup);
611 
612 /**
613  * drm_sched_ready - is the scheduler ready
614  *
615  * @sched: scheduler instance
616  *
617  * Return true if we can push more jobs to the hw, otherwise false.
618  */
619 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
620 {
621 	return atomic_read(&sched->hw_rq_count) <
622 		sched->hw_submission_limit;
623 }
624 
625 /**
626  * drm_sched_wakeup - Wake up the scheduler when it is ready
627  *
628  * @sched: scheduler instance
629  *
630  */
631 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
632 {
633 	if (drm_sched_ready(sched))
634 		wake_up_interruptible(&sched->wake_up_worker);
635 }
636 
637 /**
638  * drm_sched_select_entity - Select next entity to process
639  *
640  * @sched: scheduler instance
641  *
642  * Returns the entity to process or NULL if none are found.
643  */
644 static struct drm_sched_entity *
645 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
646 {
647 	struct drm_sched_entity *entity;
648 	int i;
649 
650 	if (!drm_sched_ready(sched))
651 		return NULL;
652 
653 	/* Kernel run queue has higher priority than normal run queue*/
654 	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
655 		entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
656 		if (entity)
657 			break;
658 	}
659 
660 	return entity;
661 }
662 
663 /**
664  * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
665  *
666  * @sched: scheduler instance
667  *
668  * Returns the next finished job from the pending list (if there is one)
669  * ready for it to be destroyed.
670  */
671 static struct drm_sched_job *
672 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
673 {
674 	struct drm_sched_job *job;
675 
676 	/*
677 	 * Don't destroy jobs while the timeout worker is running  OR thread
678 	 * is being parked and hence assumed to not touch pending_list
679 	 */
680 	if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
681 	    !cancel_delayed_work(&sched->work_tdr)) ||
682 	    kthread_should_park())
683 		return NULL;
684 
685 	spin_lock(&sched->job_list_lock);
686 
687 	job = list_first_entry_or_null(&sched->pending_list,
688 				       struct drm_sched_job, list);
689 
690 	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
691 		/* remove job from pending_list */
692 		list_del_init(&job->list);
693 	} else {
694 		job = NULL;
695 		/* queue timeout for next job */
696 		drm_sched_start_timeout(sched);
697 	}
698 
699 	spin_unlock(&sched->job_list_lock);
700 
701 	return job;
702 }
703 
704 /**
705  * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
706  * @sched_list: list of drm_gpu_schedulers
707  * @num_sched_list: number of drm_gpu_schedulers in the sched_list
708  *
709  * Returns pointer of the sched with the least load or NULL if none of the
710  * drm_gpu_schedulers are ready
711  */
712 struct drm_gpu_scheduler *
713 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
714 		     unsigned int num_sched_list)
715 {
716 	struct drm_gpu_scheduler *sched, *picked_sched = NULL;
717 	int i;
718 	unsigned int min_score = UINT_MAX, num_score;
719 
720 	for (i = 0; i < num_sched_list; ++i) {
721 		sched = sched_list[i];
722 
723 		if (!sched->ready) {
724 			DRM_WARN("scheduler %s is not ready, skipping",
725 				 sched->name);
726 			continue;
727 		}
728 
729 		num_score = atomic_read(sched->score);
730 		if (num_score < min_score) {
731 			min_score = num_score;
732 			picked_sched = sched;
733 		}
734 	}
735 
736 	return picked_sched;
737 }
738 EXPORT_SYMBOL(drm_sched_pick_best);
739 
740 /**
741  * drm_sched_blocked - check if the scheduler is blocked
742  *
743  * @sched: scheduler instance
744  *
745  * Returns true if blocked, otherwise false.
746  */
747 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
748 {
749 	if (kthread_should_park()) {
750 		kthread_parkme();
751 		return true;
752 	}
753 
754 	return false;
755 }
756 
757 /**
758  * drm_sched_main - main scheduler thread
759  *
760  * @param: scheduler instance
761  *
762  * Returns 0.
763  */
764 static int drm_sched_main(void *param)
765 {
766 	struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
767 	int r;
768 
769 	sched_set_fifo_low(current);
770 
771 	while (!kthread_should_stop()) {
772 		struct drm_sched_entity *entity = NULL;
773 		struct drm_sched_fence *s_fence;
774 		struct drm_sched_job *sched_job;
775 		struct dma_fence *fence;
776 		struct drm_sched_job *cleanup_job = NULL;
777 
778 		wait_event_interruptible(sched->wake_up_worker,
779 					 (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
780 					 (!drm_sched_blocked(sched) &&
781 					  (entity = drm_sched_select_entity(sched))) ||
782 					 kthread_should_stop());
783 
784 		if (cleanup_job) {
785 			sched->ops->free_job(cleanup_job);
786 			/* queue timeout for next job */
787 			drm_sched_start_timeout(sched);
788 		}
789 
790 		if (!entity)
791 			continue;
792 
793 		sched_job = drm_sched_entity_pop_job(entity);
794 
795 		complete(&entity->entity_idle);
796 
797 		if (!sched_job)
798 			continue;
799 
800 		s_fence = sched_job->s_fence;
801 
802 		atomic_inc(&sched->hw_rq_count);
803 		drm_sched_job_begin(sched_job);
804 
805 		trace_drm_run_job(sched_job, entity);
806 		fence = sched->ops->run_job(sched_job);
807 		drm_sched_fence_scheduled(s_fence);
808 
809 		if (!IS_ERR_OR_NULL(fence)) {
810 			s_fence->parent = dma_fence_get(fence);
811 			r = dma_fence_add_callback(fence, &sched_job->cb,
812 						   drm_sched_job_done_cb);
813 			if (r == -ENOENT)
814 				drm_sched_job_done(sched_job);
815 			else if (r)
816 				DRM_ERROR("fence add callback failed (%d)\n",
817 					  r);
818 			dma_fence_put(fence);
819 		} else {
820 			if (IS_ERR(fence))
821 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
822 
823 			drm_sched_job_done(sched_job);
824 		}
825 
826 		wake_up(&sched->job_scheduled);
827 	}
828 	return 0;
829 }
830 
831 /**
832  * drm_sched_init - Init a gpu scheduler instance
833  *
834  * @sched: scheduler instance
835  * @ops: backend operations for this scheduler
836  * @hw_submission: number of hw submissions that can be in flight
837  * @hang_limit: number of times to allow a job to hang before dropping it
838  * @timeout: timeout value in jiffies for the scheduler
839  * @score: optional score atomic shared with other schedulers
840  * @name: name used for debugging
841  *
842  * Return 0 on success, otherwise error code.
843  */
844 int drm_sched_init(struct drm_gpu_scheduler *sched,
845 		   const struct drm_sched_backend_ops *ops,
846 		   unsigned hw_submission, unsigned hang_limit, long timeout,
847 		   atomic_t *score, const char *name)
848 {
849 	int i, ret;
850 	sched->ops = ops;
851 	sched->hw_submission_limit = hw_submission;
852 	sched->name = name;
853 	sched->timeout = timeout;
854 	sched->hang_limit = hang_limit;
855 	sched->score = score ? score : &sched->_score;
856 	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
857 		drm_sched_rq_init(sched, &sched->sched_rq[i]);
858 
859 	init_waitqueue_head(&sched->wake_up_worker);
860 	init_waitqueue_head(&sched->job_scheduled);
861 	INIT_LIST_HEAD(&sched->pending_list);
862 	spin_lock_init(&sched->job_list_lock);
863 	atomic_set(&sched->hw_rq_count, 0);
864 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
865 	atomic_set(&sched->_score, 0);
866 	atomic64_set(&sched->job_id_count, 0);
867 
868 	/* Each scheduler will run on a seperate kernel thread */
869 	sched->thread = kthread_run(drm_sched_main, sched, sched->name);
870 	if (IS_ERR(sched->thread)) {
871 		ret = PTR_ERR(sched->thread);
872 		sched->thread = NULL;
873 		DRM_ERROR("Failed to create scheduler for %s.\n", name);
874 		return ret;
875 	}
876 
877 	sched->ready = true;
878 	return 0;
879 }
880 EXPORT_SYMBOL(drm_sched_init);
881 
882 /**
883  * drm_sched_fini - Destroy a gpu scheduler
884  *
885  * @sched: scheduler instance
886  *
887  * Tears down and cleans up the scheduler.
888  */
889 void drm_sched_fini(struct drm_gpu_scheduler *sched)
890 {
891 	if (sched->thread)
892 		kthread_stop(sched->thread);
893 
894 	/* Confirm no work left behind accessing device structures */
895 	cancel_delayed_work_sync(&sched->work_tdr);
896 
897 	sched->ready = false;
898 }
899 EXPORT_SYMBOL(drm_sched_fini);
900 
901 /**
902  * drm_sched_increase_karma_ext - Update sched_entity guilty flag
903  *
904  * @bad: The job guilty of time out
905  * @type: type for increase/reset karma
906  *
907  */
908 void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type)
909 {
910 	int i;
911 	struct drm_sched_entity *tmp;
912 	struct drm_sched_entity *entity;
913 	struct drm_gpu_scheduler *sched = bad->sched;
914 
915 	/* don't change @bad's karma if it's from KERNEL RQ,
916 	 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
917 	 * corrupt but keep in mind that kernel jobs always considered good.
918 	 */
919 	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
920 		if (type == 0)
921 			atomic_set(&bad->karma, 0);
922 		else if (type == 1)
923 			atomic_inc(&bad->karma);
924 
925 		for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
926 		     i++) {
927 			struct drm_sched_rq *rq = &sched->sched_rq[i];
928 
929 			spin_lock(&rq->lock);
930 			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
931 				if (bad->s_fence->scheduled.context ==
932 				    entity->fence_context) {
933 					if (entity->guilty)
934 						atomic_set(entity->guilty, type);
935 					break;
936 				}
937 			}
938 			spin_unlock(&rq->lock);
939 			if (&entity->list != &rq->entities)
940 				break;
941 		}
942 	}
943 }
944 EXPORT_SYMBOL(drm_sched_increase_karma_ext);
945