1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 /**
25  * DOC: Overview
26  *
27  * The GPU scheduler provides entities which allow userspace to push jobs
28  * into software queues which are then scheduled on a hardware run queue.
29  * The software queues have a priority among them. The scheduler selects the entities
30  * from the run queue using a FIFO. The scheduler provides dependency handling
31  * features among jobs. The driver is supposed to provide callback functions for
32  * backend operations to the scheduler like submitting a job to hardware run queue,
33  * returning the dependencies of a job etc.
34  *
35  * The organisation of the scheduler is the following:
36  *
37  * 1. Each hw run queue has one scheduler
38  * 2. Each scheduler has multiple run queues with different priorities
39  *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40  * 3. Each scheduler run queue has a queue of entities to schedule
41  * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42  *    the hardware.
43  *
44  * The jobs in a entity are always scheduled in the order that they were pushed.
45  */
46 
47 #include <linux/kthread.h>
48 #include <linux/wait.h>
49 #include <linux/sched.h>
50 #include <linux/completion.h>
51 #include <linux/dma-resv.h>
52 #include <uapi/linux/sched/types.h>
53 
54 #include <drm/drm_print.h>
55 #include <drm/drm_gem.h>
56 #include <drm/gpu_scheduler.h>
57 #include <drm/spsc_queue.h>
58 
59 #define CREATE_TRACE_POINTS
60 #include "gpu_scheduler_trace.h"
61 
62 #define to_drm_sched_job(sched_job)		\
63 		container_of((sched_job), struct drm_sched_job, queue_node)
64 
65 int drm_sched_policy = DRM_SCHED_POLICY_RR;
66 
67 /**
68  * DOC: sched_policy (int)
69  * Used to override default entities scheduling policy in a run queue.
70  */
71 MODULE_PARM_DESC(sched_policy, "Specify schedule policy for entities on a runqueue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin (default), " __stringify(DRM_SCHED_POLICY_FIFO) " = use FIFO.");
72 module_param_named(sched_policy, drm_sched_policy, int, 0444);
73 
74 static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
75 							    const struct rb_node *b)
76 {
77 	struct drm_sched_entity *ent_a =  rb_entry((a), struct drm_sched_entity, rb_tree_node);
78 	struct drm_sched_entity *ent_b =  rb_entry((b), struct drm_sched_entity, rb_tree_node);
79 
80 	return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
81 }
82 
83 static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity)
84 {
85 	struct drm_sched_rq *rq = entity->rq;
86 
87 	if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
88 		rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
89 		RB_CLEAR_NODE(&entity->rb_tree_node);
90 	}
91 }
92 
93 void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
94 {
95 	/*
96 	 * Both locks need to be grabbed, one to protect from entity->rq change
97 	 * for entity from within concurrent drm_sched_entity_select_rq and the
98 	 * other to update the rb tree structure.
99 	 */
100 	spin_lock(&entity->rq_lock);
101 	spin_lock(&entity->rq->lock);
102 
103 	drm_sched_rq_remove_fifo_locked(entity);
104 
105 	entity->oldest_job_waiting = ts;
106 
107 	rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
108 		      drm_sched_entity_compare_before);
109 
110 	spin_unlock(&entity->rq->lock);
111 	spin_unlock(&entity->rq_lock);
112 }
113 
114 /**
115  * drm_sched_rq_init - initialize a given run queue struct
116  *
117  * @sched: scheduler instance to associate with this run queue
118  * @rq: scheduler run queue
119  *
120  * Initializes a scheduler runqueue.
121  */
122 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
123 			      struct drm_sched_rq *rq)
124 {
125 	spin_lock_init(&rq->lock);
126 	INIT_LIST_HEAD(&rq->entities);
127 	rq->rb_tree_root = RB_ROOT_CACHED;
128 	rq->current_entity = NULL;
129 	rq->sched = sched;
130 }
131 
132 /**
133  * drm_sched_rq_add_entity - add an entity
134  *
135  * @rq: scheduler run queue
136  * @entity: scheduler entity
137  *
138  * Adds a scheduler entity to the run queue.
139  */
140 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
141 			     struct drm_sched_entity *entity)
142 {
143 	if (!list_empty(&entity->list))
144 		return;
145 
146 	spin_lock(&rq->lock);
147 
148 	atomic_inc(rq->sched->score);
149 	list_add_tail(&entity->list, &rq->entities);
150 
151 	spin_unlock(&rq->lock);
152 }
153 
154 /**
155  * drm_sched_rq_remove_entity - remove an entity
156  *
157  * @rq: scheduler run queue
158  * @entity: scheduler entity
159  *
160  * Removes a scheduler entity from the run queue.
161  */
162 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
163 				struct drm_sched_entity *entity)
164 {
165 	if (list_empty(&entity->list))
166 		return;
167 
168 	spin_lock(&rq->lock);
169 
170 	atomic_dec(rq->sched->score);
171 	list_del_init(&entity->list);
172 
173 	if (rq->current_entity == entity)
174 		rq->current_entity = NULL;
175 
176 	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
177 		drm_sched_rq_remove_fifo_locked(entity);
178 
179 	spin_unlock(&rq->lock);
180 }
181 
182 /**
183  * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
184  *
185  * @rq: scheduler run queue to check.
186  *
187  * Try to find a ready entity, returns NULL if none found.
188  */
189 static struct drm_sched_entity *
190 drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
191 {
192 	struct drm_sched_entity *entity;
193 
194 	spin_lock(&rq->lock);
195 
196 	entity = rq->current_entity;
197 	if (entity) {
198 		list_for_each_entry_continue(entity, &rq->entities, list) {
199 			if (drm_sched_entity_is_ready(entity)) {
200 				rq->current_entity = entity;
201 				reinit_completion(&entity->entity_idle);
202 				spin_unlock(&rq->lock);
203 				return entity;
204 			}
205 		}
206 	}
207 
208 	list_for_each_entry(entity, &rq->entities, list) {
209 
210 		if (drm_sched_entity_is_ready(entity)) {
211 			rq->current_entity = entity;
212 			reinit_completion(&entity->entity_idle);
213 			spin_unlock(&rq->lock);
214 			return entity;
215 		}
216 
217 		if (entity == rq->current_entity)
218 			break;
219 	}
220 
221 	spin_unlock(&rq->lock);
222 
223 	return NULL;
224 }
225 
226 /**
227  * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
228  *
229  * @rq: scheduler run queue to check.
230  *
231  * Find oldest waiting ready entity, returns NULL if none found.
232  */
233 static struct drm_sched_entity *
234 drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
235 {
236 	struct rb_node *rb;
237 
238 	spin_lock(&rq->lock);
239 	for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
240 		struct drm_sched_entity *entity;
241 
242 		entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
243 		if (drm_sched_entity_is_ready(entity)) {
244 			rq->current_entity = entity;
245 			reinit_completion(&entity->entity_idle);
246 			break;
247 		}
248 	}
249 	spin_unlock(&rq->lock);
250 
251 	return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
252 }
253 
254 /**
255  * drm_sched_job_done - complete a job
256  * @s_job: pointer to the job which is done
257  *
258  * Finish the job's fence and wake up the worker thread.
259  */
260 static void drm_sched_job_done(struct drm_sched_job *s_job)
261 {
262 	struct drm_sched_fence *s_fence = s_job->s_fence;
263 	struct drm_gpu_scheduler *sched = s_fence->sched;
264 
265 	atomic_dec(&sched->hw_rq_count);
266 	atomic_dec(sched->score);
267 
268 	trace_drm_sched_process_job(s_fence);
269 
270 	dma_fence_get(&s_fence->finished);
271 	drm_sched_fence_finished(s_fence);
272 	dma_fence_put(&s_fence->finished);
273 	wake_up_interruptible(&sched->wake_up_worker);
274 }
275 
276 /**
277  * drm_sched_job_done_cb - the callback for a done job
278  * @f: fence
279  * @cb: fence callbacks
280  */
281 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
282 {
283 	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
284 
285 	drm_sched_job_done(s_job);
286 }
287 
288 /**
289  * drm_sched_dependency_optimized - test if the dependency can be optimized
290  *
291  * @fence: the dependency fence
292  * @entity: the entity which depends on the above fence
293  *
294  * Returns true if the dependency can be optimized and false otherwise
295  */
296 bool drm_sched_dependency_optimized(struct dma_fence* fence,
297 				    struct drm_sched_entity *entity)
298 {
299 	struct drm_gpu_scheduler *sched = entity->rq->sched;
300 	struct drm_sched_fence *s_fence;
301 
302 	if (!fence || dma_fence_is_signaled(fence))
303 		return false;
304 	if (fence->context == entity->fence_context)
305 		return true;
306 	s_fence = to_drm_sched_fence(fence);
307 	if (s_fence && s_fence->sched == sched)
308 		return true;
309 
310 	return false;
311 }
312 EXPORT_SYMBOL(drm_sched_dependency_optimized);
313 
314 /**
315  * drm_sched_start_timeout - start timeout for reset worker
316  *
317  * @sched: scheduler instance to start the worker for
318  *
319  * Start the timeout for the given scheduler.
320  */
321 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
322 {
323 	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
324 	    !list_empty(&sched->pending_list))
325 		queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
326 }
327 
328 /**
329  * drm_sched_fault - immediately start timeout handler
330  *
331  * @sched: scheduler where the timeout handling should be started.
332  *
333  * Start timeout handling immediately when the driver detects a hardware fault.
334  */
335 void drm_sched_fault(struct drm_gpu_scheduler *sched)
336 {
337 	mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
338 }
339 EXPORT_SYMBOL(drm_sched_fault);
340 
341 /**
342  * drm_sched_suspend_timeout - Suspend scheduler job timeout
343  *
344  * @sched: scheduler instance for which to suspend the timeout
345  *
346  * Suspend the delayed work timeout for the scheduler. This is done by
347  * modifying the delayed work timeout to an arbitrary large value,
348  * MAX_SCHEDULE_TIMEOUT in this case.
349  *
350  * Returns the timeout remaining
351  *
352  */
353 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
354 {
355 	unsigned long sched_timeout, now = jiffies;
356 
357 	sched_timeout = sched->work_tdr.timer.expires;
358 
359 	/*
360 	 * Modify the timeout to an arbitrarily large value. This also prevents
361 	 * the timeout to be restarted when new submissions arrive
362 	 */
363 	if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
364 			&& time_after(sched_timeout, now))
365 		return sched_timeout - now;
366 	else
367 		return sched->timeout;
368 }
369 EXPORT_SYMBOL(drm_sched_suspend_timeout);
370 
371 /**
372  * drm_sched_resume_timeout - Resume scheduler job timeout
373  *
374  * @sched: scheduler instance for which to resume the timeout
375  * @remaining: remaining timeout
376  *
377  * Resume the delayed work timeout for the scheduler.
378  */
379 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
380 		unsigned long remaining)
381 {
382 	spin_lock(&sched->job_list_lock);
383 
384 	if (list_empty(&sched->pending_list))
385 		cancel_delayed_work(&sched->work_tdr);
386 	else
387 		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
388 
389 	spin_unlock(&sched->job_list_lock);
390 }
391 EXPORT_SYMBOL(drm_sched_resume_timeout);
392 
393 static void drm_sched_job_begin(struct drm_sched_job *s_job)
394 {
395 	struct drm_gpu_scheduler *sched = s_job->sched;
396 
397 	spin_lock(&sched->job_list_lock);
398 	list_add_tail(&s_job->list, &sched->pending_list);
399 	drm_sched_start_timeout(sched);
400 	spin_unlock(&sched->job_list_lock);
401 }
402 
403 static void drm_sched_job_timedout(struct work_struct *work)
404 {
405 	struct drm_gpu_scheduler *sched;
406 	struct drm_sched_job *job;
407 	enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
408 
409 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
410 
411 	/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
412 	spin_lock(&sched->job_list_lock);
413 	job = list_first_entry_or_null(&sched->pending_list,
414 				       struct drm_sched_job, list);
415 
416 	if (job) {
417 		/*
418 		 * Remove the bad job so it cannot be freed by concurrent
419 		 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
420 		 * is parked at which point it's safe.
421 		 */
422 		list_del_init(&job->list);
423 		spin_unlock(&sched->job_list_lock);
424 
425 		status = job->sched->ops->timedout_job(job);
426 
427 		/*
428 		 * Guilty job did complete and hence needs to be manually removed
429 		 * See drm_sched_stop doc.
430 		 */
431 		if (sched->free_guilty) {
432 			job->sched->ops->free_job(job);
433 			sched->free_guilty = false;
434 		}
435 	} else {
436 		spin_unlock(&sched->job_list_lock);
437 	}
438 
439 	if (status != DRM_GPU_SCHED_STAT_ENODEV) {
440 		spin_lock(&sched->job_list_lock);
441 		drm_sched_start_timeout(sched);
442 		spin_unlock(&sched->job_list_lock);
443 	}
444 }
445 
446  /**
447   * drm_sched_increase_karma - Update sched_entity guilty flag
448   *
449   * @bad: The job guilty of time out
450   *
451   * Increment on every hang caused by the 'bad' job. If this exceeds the hang
452   * limit of the scheduler then the respective sched entity is marked guilty and
453   * jobs from it will not be scheduled further
454   */
455 void drm_sched_increase_karma(struct drm_sched_job *bad)
456 {
457 	drm_sched_increase_karma_ext(bad, 1);
458 }
459 EXPORT_SYMBOL(drm_sched_increase_karma);
460 
461 void drm_sched_reset_karma(struct drm_sched_job *bad)
462 {
463 	drm_sched_increase_karma_ext(bad, 0);
464 }
465 EXPORT_SYMBOL(drm_sched_reset_karma);
466 
467 /**
468  * drm_sched_stop - stop the scheduler
469  *
470  * @sched: scheduler instance
471  * @bad: job which caused the time out
472  *
473  * Stop the scheduler and also removes and frees all completed jobs.
474  * Note: bad job will not be freed as it might be used later and so it's
475  * callers responsibility to release it manually if it's not part of the
476  * pending list any more.
477  *
478  */
479 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
480 {
481 	struct drm_sched_job *s_job, *tmp;
482 
483 	kthread_park(sched->thread);
484 
485 	/*
486 	 * Reinsert back the bad job here - now it's safe as
487 	 * drm_sched_get_cleanup_job cannot race against us and release the
488 	 * bad job at this point - we parked (waited for) any in progress
489 	 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
490 	 * now until the scheduler thread is unparked.
491 	 */
492 	if (bad && bad->sched == sched)
493 		/*
494 		 * Add at the head of the queue to reflect it was the earliest
495 		 * job extracted.
496 		 */
497 		list_add(&bad->list, &sched->pending_list);
498 
499 	/*
500 	 * Iterate the job list from later to  earlier one and either deactive
501 	 * their HW callbacks or remove them from pending list if they already
502 	 * signaled.
503 	 * This iteration is thread safe as sched thread is stopped.
504 	 */
505 	list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
506 					 list) {
507 		if (s_job->s_fence->parent &&
508 		    dma_fence_remove_callback(s_job->s_fence->parent,
509 					      &s_job->cb)) {
510 			dma_fence_put(s_job->s_fence->parent);
511 			s_job->s_fence->parent = NULL;
512 			atomic_dec(&sched->hw_rq_count);
513 		} else {
514 			/*
515 			 * remove job from pending_list.
516 			 * Locking here is for concurrent resume timeout
517 			 */
518 			spin_lock(&sched->job_list_lock);
519 			list_del_init(&s_job->list);
520 			spin_unlock(&sched->job_list_lock);
521 
522 			/*
523 			 * Wait for job's HW fence callback to finish using s_job
524 			 * before releasing it.
525 			 *
526 			 * Job is still alive so fence refcount at least 1
527 			 */
528 			dma_fence_wait(&s_job->s_fence->finished, false);
529 
530 			/*
531 			 * We must keep bad job alive for later use during
532 			 * recovery by some of the drivers but leave a hint
533 			 * that the guilty job must be released.
534 			 */
535 			if (bad != s_job)
536 				sched->ops->free_job(s_job);
537 			else
538 				sched->free_guilty = true;
539 		}
540 	}
541 
542 	/*
543 	 * Stop pending timer in flight as we rearm it in  drm_sched_start. This
544 	 * avoids the pending timeout work in progress to fire right away after
545 	 * this TDR finished and before the newly restarted jobs had a
546 	 * chance to complete.
547 	 */
548 	cancel_delayed_work(&sched->work_tdr);
549 }
550 
551 EXPORT_SYMBOL(drm_sched_stop);
552 
553 /**
554  * drm_sched_start - recover jobs after a reset
555  *
556  * @sched: scheduler instance
557  * @full_recovery: proceed with complete sched restart
558  *
559  */
560 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
561 {
562 	struct drm_sched_job *s_job, *tmp;
563 	int r;
564 
565 	/*
566 	 * Locking the list is not required here as the sched thread is parked
567 	 * so no new jobs are being inserted or removed. Also concurrent
568 	 * GPU recovers can't run in parallel.
569 	 */
570 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
571 		struct dma_fence *fence = s_job->s_fence->parent;
572 
573 		atomic_inc(&sched->hw_rq_count);
574 
575 		if (!full_recovery)
576 			continue;
577 
578 		if (fence) {
579 			r = dma_fence_add_callback(fence, &s_job->cb,
580 						   drm_sched_job_done_cb);
581 			if (r == -ENOENT)
582 				drm_sched_job_done(s_job);
583 			else if (r)
584 				DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
585 					  r);
586 		} else
587 			drm_sched_job_done(s_job);
588 	}
589 
590 	if (full_recovery) {
591 		spin_lock(&sched->job_list_lock);
592 		drm_sched_start_timeout(sched);
593 		spin_unlock(&sched->job_list_lock);
594 	}
595 
596 	kthread_unpark(sched->thread);
597 }
598 EXPORT_SYMBOL(drm_sched_start);
599 
600 /**
601  * drm_sched_resubmit_jobs - helper to relaunch jobs from the pending list
602  *
603  * @sched: scheduler instance
604  *
605  */
606 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
607 {
608 	drm_sched_resubmit_jobs_ext(sched, INT_MAX);
609 }
610 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
611 
612 /**
613  * drm_sched_resubmit_jobs_ext - helper to relunch certain number of jobs from mirror ring list
614  *
615  * @sched: scheduler instance
616  * @max: job numbers to relaunch
617  *
618  */
619 void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max)
620 {
621 	struct drm_sched_job *s_job, *tmp;
622 	uint64_t guilty_context;
623 	bool found_guilty = false;
624 	struct dma_fence *fence;
625 	int i = 0;
626 
627 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
628 		struct drm_sched_fence *s_fence = s_job->s_fence;
629 
630 		if (i >= max)
631 			break;
632 
633 		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
634 			found_guilty = true;
635 			guilty_context = s_job->s_fence->scheduled.context;
636 		}
637 
638 		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
639 			dma_fence_set_error(&s_fence->finished, -ECANCELED);
640 
641 		fence = sched->ops->run_job(s_job);
642 		i++;
643 
644 		if (IS_ERR_OR_NULL(fence)) {
645 			if (IS_ERR(fence))
646 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
647 
648 			s_job->s_fence->parent = NULL;
649 		} else {
650 
651 			s_job->s_fence->parent = dma_fence_get(fence);
652 
653 			/* Drop for orignal kref_init */
654 			dma_fence_put(fence);
655 		}
656 	}
657 }
658 EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext);
659 
660 /**
661  * drm_sched_job_init - init a scheduler job
662  * @job: scheduler job to init
663  * @entity: scheduler entity to use
664  * @owner: job owner for debugging
665  *
666  * Refer to drm_sched_entity_push_job() documentation
667  * for locking considerations.
668  *
669  * Drivers must make sure drm_sched_job_cleanup() if this function returns
670  * successfully, even when @job is aborted before drm_sched_job_arm() is called.
671  *
672  * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
673  * has died, which can mean that there's no valid runqueue for a @entity.
674  * This function returns -ENOENT in this case (which probably should be -EIO as
675  * a more meanigful return value).
676  *
677  * Returns 0 for success, negative error code otherwise.
678  */
679 int drm_sched_job_init(struct drm_sched_job *job,
680 		       struct drm_sched_entity *entity,
681 		       void *owner)
682 {
683 	if (!entity->rq)
684 		return -ENOENT;
685 
686 	job->entity = entity;
687 	job->s_fence = drm_sched_fence_alloc(entity, owner);
688 	if (!job->s_fence)
689 		return -ENOMEM;
690 
691 	INIT_LIST_HEAD(&job->list);
692 
693 	xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
694 
695 	return 0;
696 }
697 EXPORT_SYMBOL(drm_sched_job_init);
698 
699 /**
700  * drm_sched_job_arm - arm a scheduler job for execution
701  * @job: scheduler job to arm
702  *
703  * This arms a scheduler job for execution. Specifically it initializes the
704  * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
705  * or other places that need to track the completion of this job.
706  *
707  * Refer to drm_sched_entity_push_job() documentation for locking
708  * considerations.
709  *
710  * This can only be called if drm_sched_job_init() succeeded.
711  */
712 void drm_sched_job_arm(struct drm_sched_job *job)
713 {
714 	struct drm_gpu_scheduler *sched;
715 	struct drm_sched_entity *entity = job->entity;
716 
717 	BUG_ON(!entity);
718 	drm_sched_entity_select_rq(entity);
719 	sched = entity->rq->sched;
720 
721 	job->sched = sched;
722 	job->s_priority = entity->rq - sched->sched_rq;
723 	job->id = atomic64_inc_return(&sched->job_id_count);
724 
725 	drm_sched_fence_init(job->s_fence, job->entity);
726 }
727 EXPORT_SYMBOL(drm_sched_job_arm);
728 
729 /**
730  * drm_sched_job_add_dependency - adds the fence as a job dependency
731  * @job: scheduler job to add the dependencies to
732  * @fence: the dma_fence to add to the list of dependencies.
733  *
734  * Note that @fence is consumed in both the success and error cases.
735  *
736  * Returns:
737  * 0 on success, or an error on failing to expand the array.
738  */
739 int drm_sched_job_add_dependency(struct drm_sched_job *job,
740 				 struct dma_fence *fence)
741 {
742 	struct dma_fence *entry;
743 	unsigned long index;
744 	u32 id = 0;
745 	int ret;
746 
747 	if (!fence)
748 		return 0;
749 
750 	/* Deduplicate if we already depend on a fence from the same context.
751 	 * This lets the size of the array of deps scale with the number of
752 	 * engines involved, rather than the number of BOs.
753 	 */
754 	xa_for_each(&job->dependencies, index, entry) {
755 		if (entry->context != fence->context)
756 			continue;
757 
758 		if (dma_fence_is_later(fence, entry)) {
759 			dma_fence_put(entry);
760 			xa_store(&job->dependencies, index, fence, GFP_KERNEL);
761 		} else {
762 			dma_fence_put(fence);
763 		}
764 		return 0;
765 	}
766 
767 	ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
768 	if (ret != 0)
769 		dma_fence_put(fence);
770 
771 	return ret;
772 }
773 EXPORT_SYMBOL(drm_sched_job_add_dependency);
774 
775 /**
776  * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
777  *   dependencies
778  * @job: scheduler job to add the dependencies to
779  * @obj: the gem object to add new dependencies from.
780  * @write: whether the job might write the object (so we need to depend on
781  * shared fences in the reservation object).
782  *
783  * This should be called after drm_gem_lock_reservations() on your array of
784  * GEM objects used in the job but before updating the reservations with your
785  * own fences.
786  *
787  * Returns:
788  * 0 on success, or an error on failing to expand the array.
789  */
790 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
791 					    struct drm_gem_object *obj,
792 					    bool write)
793 {
794 	struct dma_resv_iter cursor;
795 	struct dma_fence *fence;
796 	int ret;
797 
798 	dma_resv_assert_held(obj->resv);
799 
800 	dma_resv_for_each_fence(&cursor, obj->resv, dma_resv_usage_rw(write),
801 				fence) {
802 		/* Make sure to grab an additional ref on the added fence */
803 		dma_fence_get(fence);
804 		ret = drm_sched_job_add_dependency(job, fence);
805 		if (ret) {
806 			dma_fence_put(fence);
807 			return ret;
808 		}
809 	}
810 	return 0;
811 }
812 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
813 
814 
815 /**
816  * drm_sched_job_cleanup - clean up scheduler job resources
817  * @job: scheduler job to clean up
818  *
819  * Cleans up the resources allocated with drm_sched_job_init().
820  *
821  * Drivers should call this from their error unwind code if @job is aborted
822  * before drm_sched_job_arm() is called.
823  *
824  * After that point of no return @job is committed to be executed by the
825  * scheduler, and this function should be called from the
826  * &drm_sched_backend_ops.free_job callback.
827  */
828 void drm_sched_job_cleanup(struct drm_sched_job *job)
829 {
830 	struct dma_fence *fence;
831 	unsigned long index;
832 
833 	if (kref_read(&job->s_fence->finished.refcount)) {
834 		/* drm_sched_job_arm() has been called */
835 		dma_fence_put(&job->s_fence->finished);
836 	} else {
837 		/* aborted job before committing to run it */
838 		drm_sched_fence_free(job->s_fence);
839 	}
840 
841 	job->s_fence = NULL;
842 
843 	xa_for_each(&job->dependencies, index, fence) {
844 		dma_fence_put(fence);
845 	}
846 	xa_destroy(&job->dependencies);
847 
848 }
849 EXPORT_SYMBOL(drm_sched_job_cleanup);
850 
851 /**
852  * drm_sched_ready - is the scheduler ready
853  *
854  * @sched: scheduler instance
855  *
856  * Return true if we can push more jobs to the hw, otherwise false.
857  */
858 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
859 {
860 	return atomic_read(&sched->hw_rq_count) <
861 		sched->hw_submission_limit;
862 }
863 
864 /**
865  * drm_sched_wakeup - Wake up the scheduler when it is ready
866  *
867  * @sched: scheduler instance
868  *
869  */
870 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
871 {
872 	if (drm_sched_ready(sched))
873 		wake_up_interruptible(&sched->wake_up_worker);
874 }
875 
876 /**
877  * drm_sched_select_entity - Select next entity to process
878  *
879  * @sched: scheduler instance
880  *
881  * Returns the entity to process or NULL if none are found.
882  */
883 static struct drm_sched_entity *
884 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
885 {
886 	struct drm_sched_entity *entity;
887 	int i;
888 
889 	if (!drm_sched_ready(sched))
890 		return NULL;
891 
892 	/* Kernel run queue has higher priority than normal run queue*/
893 	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
894 		entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
895 			drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) :
896 			drm_sched_rq_select_entity_rr(&sched->sched_rq[i]);
897 		if (entity)
898 			break;
899 	}
900 
901 	return entity;
902 }
903 
904 /**
905  * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
906  *
907  * @sched: scheduler instance
908  *
909  * Returns the next finished job from the pending list (if there is one)
910  * ready for it to be destroyed.
911  */
912 static struct drm_sched_job *
913 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
914 {
915 	struct drm_sched_job *job, *next;
916 
917 	spin_lock(&sched->job_list_lock);
918 
919 	job = list_first_entry_or_null(&sched->pending_list,
920 				       struct drm_sched_job, list);
921 
922 	if (job && (!job->s_fence->parent ||
923 		    dma_fence_is_signaled(job->s_fence->parent))) {
924 		/* remove job from pending_list */
925 		list_del_init(&job->list);
926 
927 		/* cancel this job's TO timer */
928 		cancel_delayed_work(&sched->work_tdr);
929 		/* make the scheduled timestamp more accurate */
930 		next = list_first_entry_or_null(&sched->pending_list,
931 						typeof(*next), list);
932 
933 		if (next && job->s_fence->parent) {
934 			next->s_fence->scheduled.timestamp =
935 				job->s_fence->parent->timestamp;
936 			/* start TO timer for next job */
937 			drm_sched_start_timeout(sched);
938 		}
939 	} else {
940 		job = NULL;
941 	}
942 
943 	spin_unlock(&sched->job_list_lock);
944 
945 	return job;
946 }
947 
948 /**
949  * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
950  * @sched_list: list of drm_gpu_schedulers
951  * @num_sched_list: number of drm_gpu_schedulers in the sched_list
952  *
953  * Returns pointer of the sched with the least load or NULL if none of the
954  * drm_gpu_schedulers are ready
955  */
956 struct drm_gpu_scheduler *
957 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
958 		     unsigned int num_sched_list)
959 {
960 	struct drm_gpu_scheduler *sched, *picked_sched = NULL;
961 	int i;
962 	unsigned int min_score = UINT_MAX, num_score;
963 
964 	for (i = 0; i < num_sched_list; ++i) {
965 		sched = sched_list[i];
966 
967 		if (!sched->ready) {
968 			DRM_WARN("scheduler %s is not ready, skipping",
969 				 sched->name);
970 			continue;
971 		}
972 
973 		num_score = atomic_read(sched->score);
974 		if (num_score < min_score) {
975 			min_score = num_score;
976 			picked_sched = sched;
977 		}
978 	}
979 
980 	return picked_sched;
981 }
982 EXPORT_SYMBOL(drm_sched_pick_best);
983 
984 /**
985  * drm_sched_blocked - check if the scheduler is blocked
986  *
987  * @sched: scheduler instance
988  *
989  * Returns true if blocked, otherwise false.
990  */
991 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
992 {
993 	if (kthread_should_park()) {
994 		kthread_parkme();
995 		return true;
996 	}
997 
998 	return false;
999 }
1000 
1001 /**
1002  * drm_sched_main - main scheduler thread
1003  *
1004  * @param: scheduler instance
1005  *
1006  * Returns 0.
1007  */
1008 static int drm_sched_main(void *param)
1009 {
1010 	struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
1011 	int r;
1012 
1013 	sched_set_fifo_low(current);
1014 
1015 	while (!kthread_should_stop()) {
1016 		struct drm_sched_entity *entity = NULL;
1017 		struct drm_sched_fence *s_fence;
1018 		struct drm_sched_job *sched_job;
1019 		struct dma_fence *fence;
1020 		struct drm_sched_job *cleanup_job = NULL;
1021 
1022 		wait_event_interruptible(sched->wake_up_worker,
1023 					 (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
1024 					 (!drm_sched_blocked(sched) &&
1025 					  (entity = drm_sched_select_entity(sched))) ||
1026 					 kthread_should_stop());
1027 
1028 		if (cleanup_job)
1029 			sched->ops->free_job(cleanup_job);
1030 
1031 		if (!entity)
1032 			continue;
1033 
1034 		sched_job = drm_sched_entity_pop_job(entity);
1035 
1036 		if (!sched_job) {
1037 			complete(&entity->entity_idle);
1038 			continue;
1039 		}
1040 
1041 		s_fence = sched_job->s_fence;
1042 
1043 		atomic_inc(&sched->hw_rq_count);
1044 		drm_sched_job_begin(sched_job);
1045 
1046 		trace_drm_run_job(sched_job, entity);
1047 		fence = sched->ops->run_job(sched_job);
1048 		complete(&entity->entity_idle);
1049 		drm_sched_fence_scheduled(s_fence);
1050 
1051 		if (!IS_ERR_OR_NULL(fence)) {
1052 			s_fence->parent = dma_fence_get(fence);
1053 			/* Drop for original kref_init of the fence */
1054 			dma_fence_put(fence);
1055 
1056 			r = dma_fence_add_callback(fence, &sched_job->cb,
1057 						   drm_sched_job_done_cb);
1058 			if (r == -ENOENT)
1059 				drm_sched_job_done(sched_job);
1060 			else if (r)
1061 				DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
1062 					  r);
1063 		} else {
1064 			if (IS_ERR(fence))
1065 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
1066 
1067 			drm_sched_job_done(sched_job);
1068 		}
1069 
1070 		wake_up(&sched->job_scheduled);
1071 	}
1072 	return 0;
1073 }
1074 
1075 /**
1076  * drm_sched_init - Init a gpu scheduler instance
1077  *
1078  * @sched: scheduler instance
1079  * @ops: backend operations for this scheduler
1080  * @hw_submission: number of hw submissions that can be in flight
1081  * @hang_limit: number of times to allow a job to hang before dropping it
1082  * @timeout: timeout value in jiffies for the scheduler
1083  * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
1084  *		used
1085  * @score: optional score atomic shared with other schedulers
1086  * @name: name used for debugging
1087  * @dev: target &struct device
1088  *
1089  * Return 0 on success, otherwise error code.
1090  */
1091 int drm_sched_init(struct drm_gpu_scheduler *sched,
1092 		   const struct drm_sched_backend_ops *ops,
1093 		   unsigned hw_submission, unsigned hang_limit,
1094 		   long timeout, struct workqueue_struct *timeout_wq,
1095 		   atomic_t *score, const char *name, struct device *dev)
1096 {
1097 	int i, ret;
1098 	sched->ops = ops;
1099 	sched->hw_submission_limit = hw_submission;
1100 	sched->name = name;
1101 	sched->timeout = timeout;
1102 	sched->timeout_wq = timeout_wq ? : system_wq;
1103 	sched->hang_limit = hang_limit;
1104 	sched->score = score ? score : &sched->_score;
1105 	sched->dev = dev;
1106 	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
1107 		drm_sched_rq_init(sched, &sched->sched_rq[i]);
1108 
1109 	init_waitqueue_head(&sched->wake_up_worker);
1110 	init_waitqueue_head(&sched->job_scheduled);
1111 	INIT_LIST_HEAD(&sched->pending_list);
1112 	spin_lock_init(&sched->job_list_lock);
1113 	atomic_set(&sched->hw_rq_count, 0);
1114 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
1115 	atomic_set(&sched->_score, 0);
1116 	atomic64_set(&sched->job_id_count, 0);
1117 
1118 	/* Each scheduler will run on a seperate kernel thread */
1119 	sched->thread = kthread_run(drm_sched_main, sched, sched->name);
1120 	if (IS_ERR(sched->thread)) {
1121 		ret = PTR_ERR(sched->thread);
1122 		sched->thread = NULL;
1123 		DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
1124 		return ret;
1125 	}
1126 
1127 	sched->ready = true;
1128 	return 0;
1129 }
1130 EXPORT_SYMBOL(drm_sched_init);
1131 
1132 /**
1133  * drm_sched_fini - Destroy a gpu scheduler
1134  *
1135  * @sched: scheduler instance
1136  *
1137  * Tears down and cleans up the scheduler.
1138  */
1139 void drm_sched_fini(struct drm_gpu_scheduler *sched)
1140 {
1141 	struct drm_sched_entity *s_entity;
1142 	int i;
1143 
1144 	if (sched->thread)
1145 		kthread_stop(sched->thread);
1146 
1147 	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
1148 		struct drm_sched_rq *rq = &sched->sched_rq[i];
1149 
1150 		if (!rq)
1151 			continue;
1152 
1153 		spin_lock(&rq->lock);
1154 		list_for_each_entry(s_entity, &rq->entities, list)
1155 			/*
1156 			 * Prevents reinsertion and marks job_queue as idle,
1157 			 * it will removed from rq in drm_sched_entity_fini
1158 			 * eventually
1159 			 */
1160 			s_entity->stopped = true;
1161 		spin_unlock(&rq->lock);
1162 
1163 	}
1164 
1165 	/* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
1166 	wake_up_all(&sched->job_scheduled);
1167 
1168 	/* Confirm no work left behind accessing device structures */
1169 	cancel_delayed_work_sync(&sched->work_tdr);
1170 
1171 	sched->ready = false;
1172 }
1173 EXPORT_SYMBOL(drm_sched_fini);
1174 
1175 /**
1176  * drm_sched_increase_karma_ext - Update sched_entity guilty flag
1177  *
1178  * @bad: The job guilty of time out
1179  * @type: type for increase/reset karma
1180  *
1181  */
1182 void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type)
1183 {
1184 	int i;
1185 	struct drm_sched_entity *tmp;
1186 	struct drm_sched_entity *entity;
1187 	struct drm_gpu_scheduler *sched = bad->sched;
1188 
1189 	/* don't change @bad's karma if it's from KERNEL RQ,
1190 	 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
1191 	 * corrupt but keep in mind that kernel jobs always considered good.
1192 	 */
1193 	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
1194 		if (type == 0)
1195 			atomic_set(&bad->karma, 0);
1196 		else if (type == 1)
1197 			atomic_inc(&bad->karma);
1198 
1199 		for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
1200 		     i++) {
1201 			struct drm_sched_rq *rq = &sched->sched_rq[i];
1202 
1203 			spin_lock(&rq->lock);
1204 			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
1205 				if (bad->s_fence->scheduled.context ==
1206 				    entity->fence_context) {
1207 					if (entity->guilty)
1208 						atomic_set(entity->guilty, type);
1209 					break;
1210 				}
1211 			}
1212 			spin_unlock(&rq->lock);
1213 			if (&entity->list != &rq->entities)
1214 				break;
1215 		}
1216 	}
1217 }
1218 EXPORT_SYMBOL(drm_sched_increase_karma_ext);
1219