1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 /**
25  * DOC: Overview
26  *
27  * The GPU scheduler provides entities which allow userspace to push jobs
28  * into software queues which are then scheduled on a hardware run queue.
29  * The software queues have a priority among them. The scheduler selects the entities
30  * from the run queue using a FIFO. The scheduler provides dependency handling
31  * features among jobs. The driver is supposed to provide callback functions for
32  * backend operations to the scheduler like submitting a job to hardware run queue,
33  * returning the dependencies of a job etc.
34  *
35  * The organisation of the scheduler is the following:
36  *
37  * 1. Each hw run queue has one scheduler
38  * 2. Each scheduler has multiple run queues with different priorities
39  *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40  * 3. Each scheduler run queue has a queue of entities to schedule
41  * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42  *    the hardware.
43  *
44  * The jobs in a entity are always scheduled in the order that they were pushed.
45  */
46 
47 #include <linux/kthread.h>
48 #include <linux/wait.h>
49 #include <linux/sched.h>
50 #include <linux/completion.h>
51 #include <linux/dma-resv.h>
52 #include <uapi/linux/sched/types.h>
53 
54 #include <drm/drm_print.h>
55 #include <drm/drm_gem.h>
56 #include <drm/drm_syncobj.h>
57 #include <drm/gpu_scheduler.h>
58 #include <drm/spsc_queue.h>
59 
60 #define CREATE_TRACE_POINTS
61 #include "gpu_scheduler_trace.h"
62 
63 #define to_drm_sched_job(sched_job)		\
64 		container_of((sched_job), struct drm_sched_job, queue_node)
65 
66 int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
67 
68 /**
69  * DOC: sched_policy (int)
70  * Used to override default entities scheduling policy in a run queue.
71  */
72 MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
73 module_param_named(sched_policy, drm_sched_policy, int, 0444);
74 
75 static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
76 							    const struct rb_node *b)
77 {
78 	struct drm_sched_entity *ent_a =  rb_entry((a), struct drm_sched_entity, rb_tree_node);
79 	struct drm_sched_entity *ent_b =  rb_entry((b), struct drm_sched_entity, rb_tree_node);
80 
81 	return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
82 }
83 
84 static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity)
85 {
86 	struct drm_sched_rq *rq = entity->rq;
87 
88 	if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
89 		rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
90 		RB_CLEAR_NODE(&entity->rb_tree_node);
91 	}
92 }
93 
94 void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
95 {
96 	/*
97 	 * Both locks need to be grabbed, one to protect from entity->rq change
98 	 * for entity from within concurrent drm_sched_entity_select_rq and the
99 	 * other to update the rb tree structure.
100 	 */
101 	spin_lock(&entity->rq_lock);
102 	spin_lock(&entity->rq->lock);
103 
104 	drm_sched_rq_remove_fifo_locked(entity);
105 
106 	entity->oldest_job_waiting = ts;
107 
108 	rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
109 		      drm_sched_entity_compare_before);
110 
111 	spin_unlock(&entity->rq->lock);
112 	spin_unlock(&entity->rq_lock);
113 }
114 
115 /**
116  * drm_sched_rq_init - initialize a given run queue struct
117  *
118  * @sched: scheduler instance to associate with this run queue
119  * @rq: scheduler run queue
120  *
121  * Initializes a scheduler runqueue.
122  */
123 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
124 			      struct drm_sched_rq *rq)
125 {
126 	spin_lock_init(&rq->lock);
127 	INIT_LIST_HEAD(&rq->entities);
128 	rq->rb_tree_root = RB_ROOT_CACHED;
129 	rq->current_entity = NULL;
130 	rq->sched = sched;
131 }
132 
133 /**
134  * drm_sched_rq_add_entity - add an entity
135  *
136  * @rq: scheduler run queue
137  * @entity: scheduler entity
138  *
139  * Adds a scheduler entity to the run queue.
140  */
141 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
142 			     struct drm_sched_entity *entity)
143 {
144 	if (!list_empty(&entity->list))
145 		return;
146 
147 	spin_lock(&rq->lock);
148 
149 	atomic_inc(rq->sched->score);
150 	list_add_tail(&entity->list, &rq->entities);
151 
152 	spin_unlock(&rq->lock);
153 }
154 
155 /**
156  * drm_sched_rq_remove_entity - remove an entity
157  *
158  * @rq: scheduler run queue
159  * @entity: scheduler entity
160  *
161  * Removes a scheduler entity from the run queue.
162  */
163 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
164 				struct drm_sched_entity *entity)
165 {
166 	if (list_empty(&entity->list))
167 		return;
168 
169 	spin_lock(&rq->lock);
170 
171 	atomic_dec(rq->sched->score);
172 	list_del_init(&entity->list);
173 
174 	if (rq->current_entity == entity)
175 		rq->current_entity = NULL;
176 
177 	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
178 		drm_sched_rq_remove_fifo_locked(entity);
179 
180 	spin_unlock(&rq->lock);
181 }
182 
183 /**
184  * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
185  *
186  * @rq: scheduler run queue to check.
187  *
188  * Try to find a ready entity, returns NULL if none found.
189  */
190 static struct drm_sched_entity *
191 drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
192 {
193 	struct drm_sched_entity *entity;
194 
195 	spin_lock(&rq->lock);
196 
197 	entity = rq->current_entity;
198 	if (entity) {
199 		list_for_each_entry_continue(entity, &rq->entities, list) {
200 			if (drm_sched_entity_is_ready(entity)) {
201 				rq->current_entity = entity;
202 				reinit_completion(&entity->entity_idle);
203 				spin_unlock(&rq->lock);
204 				return entity;
205 			}
206 		}
207 	}
208 
209 	list_for_each_entry(entity, &rq->entities, list) {
210 
211 		if (drm_sched_entity_is_ready(entity)) {
212 			rq->current_entity = entity;
213 			reinit_completion(&entity->entity_idle);
214 			spin_unlock(&rq->lock);
215 			return entity;
216 		}
217 
218 		if (entity == rq->current_entity)
219 			break;
220 	}
221 
222 	spin_unlock(&rq->lock);
223 
224 	return NULL;
225 }
226 
227 /**
228  * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
229  *
230  * @rq: scheduler run queue to check.
231  *
232  * Find oldest waiting ready entity, returns NULL if none found.
233  */
234 static struct drm_sched_entity *
235 drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
236 {
237 	struct rb_node *rb;
238 
239 	spin_lock(&rq->lock);
240 	for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
241 		struct drm_sched_entity *entity;
242 
243 		entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
244 		if (drm_sched_entity_is_ready(entity)) {
245 			rq->current_entity = entity;
246 			reinit_completion(&entity->entity_idle);
247 			break;
248 		}
249 	}
250 	spin_unlock(&rq->lock);
251 
252 	return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
253 }
254 
255 /**
256  * drm_sched_job_done - complete a job
257  * @s_job: pointer to the job which is done
258  *
259  * Finish the job's fence and wake up the worker thread.
260  */
261 static void drm_sched_job_done(struct drm_sched_job *s_job)
262 {
263 	struct drm_sched_fence *s_fence = s_job->s_fence;
264 	struct drm_gpu_scheduler *sched = s_fence->sched;
265 
266 	atomic_dec(&sched->hw_rq_count);
267 	atomic_dec(sched->score);
268 
269 	trace_drm_sched_process_job(s_fence);
270 
271 	dma_fence_get(&s_fence->finished);
272 	drm_sched_fence_finished(s_fence);
273 	dma_fence_put(&s_fence->finished);
274 	wake_up_interruptible(&sched->wake_up_worker);
275 }
276 
277 /**
278  * drm_sched_job_done_cb - the callback for a done job
279  * @f: fence
280  * @cb: fence callbacks
281  */
282 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
283 {
284 	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
285 
286 	drm_sched_job_done(s_job);
287 }
288 
289 /**
290  * drm_sched_start_timeout - start timeout for reset worker
291  *
292  * @sched: scheduler instance to start the worker for
293  *
294  * Start the timeout for the given scheduler.
295  */
296 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
297 {
298 	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
299 	    !list_empty(&sched->pending_list))
300 		queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
301 }
302 
303 /**
304  * drm_sched_fault - immediately start timeout handler
305  *
306  * @sched: scheduler where the timeout handling should be started.
307  *
308  * Start timeout handling immediately when the driver detects a hardware fault.
309  */
310 void drm_sched_fault(struct drm_gpu_scheduler *sched)
311 {
312 	mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
313 }
314 EXPORT_SYMBOL(drm_sched_fault);
315 
316 /**
317  * drm_sched_suspend_timeout - Suspend scheduler job timeout
318  *
319  * @sched: scheduler instance for which to suspend the timeout
320  *
321  * Suspend the delayed work timeout for the scheduler. This is done by
322  * modifying the delayed work timeout to an arbitrary large value,
323  * MAX_SCHEDULE_TIMEOUT in this case.
324  *
325  * Returns the timeout remaining
326  *
327  */
328 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
329 {
330 	unsigned long sched_timeout, now = jiffies;
331 
332 	sched_timeout = sched->work_tdr.timer.expires;
333 
334 	/*
335 	 * Modify the timeout to an arbitrarily large value. This also prevents
336 	 * the timeout to be restarted when new submissions arrive
337 	 */
338 	if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
339 			&& time_after(sched_timeout, now))
340 		return sched_timeout - now;
341 	else
342 		return sched->timeout;
343 }
344 EXPORT_SYMBOL(drm_sched_suspend_timeout);
345 
346 /**
347  * drm_sched_resume_timeout - Resume scheduler job timeout
348  *
349  * @sched: scheduler instance for which to resume the timeout
350  * @remaining: remaining timeout
351  *
352  * Resume the delayed work timeout for the scheduler.
353  */
354 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
355 		unsigned long remaining)
356 {
357 	spin_lock(&sched->job_list_lock);
358 
359 	if (list_empty(&sched->pending_list))
360 		cancel_delayed_work(&sched->work_tdr);
361 	else
362 		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
363 
364 	spin_unlock(&sched->job_list_lock);
365 }
366 EXPORT_SYMBOL(drm_sched_resume_timeout);
367 
368 static void drm_sched_job_begin(struct drm_sched_job *s_job)
369 {
370 	struct drm_gpu_scheduler *sched = s_job->sched;
371 
372 	spin_lock(&sched->job_list_lock);
373 	list_add_tail(&s_job->list, &sched->pending_list);
374 	drm_sched_start_timeout(sched);
375 	spin_unlock(&sched->job_list_lock);
376 }
377 
378 static void drm_sched_job_timedout(struct work_struct *work)
379 {
380 	struct drm_gpu_scheduler *sched;
381 	struct drm_sched_job *job;
382 	enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
383 
384 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
385 
386 	/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
387 	spin_lock(&sched->job_list_lock);
388 	job = list_first_entry_or_null(&sched->pending_list,
389 				       struct drm_sched_job, list);
390 
391 	if (job) {
392 		/*
393 		 * Remove the bad job so it cannot be freed by concurrent
394 		 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
395 		 * is parked at which point it's safe.
396 		 */
397 		list_del_init(&job->list);
398 		spin_unlock(&sched->job_list_lock);
399 
400 		status = job->sched->ops->timedout_job(job);
401 
402 		/*
403 		 * Guilty job did complete and hence needs to be manually removed
404 		 * See drm_sched_stop doc.
405 		 */
406 		if (sched->free_guilty) {
407 			job->sched->ops->free_job(job);
408 			sched->free_guilty = false;
409 		}
410 	} else {
411 		spin_unlock(&sched->job_list_lock);
412 	}
413 
414 	if (status != DRM_GPU_SCHED_STAT_ENODEV) {
415 		spin_lock(&sched->job_list_lock);
416 		drm_sched_start_timeout(sched);
417 		spin_unlock(&sched->job_list_lock);
418 	}
419 }
420 
421 /**
422  * drm_sched_stop - stop the scheduler
423  *
424  * @sched: scheduler instance
425  * @bad: job which caused the time out
426  *
427  * Stop the scheduler and also removes and frees all completed jobs.
428  * Note: bad job will not be freed as it might be used later and so it's
429  * callers responsibility to release it manually if it's not part of the
430  * pending list any more.
431  *
432  */
433 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
434 {
435 	struct drm_sched_job *s_job, *tmp;
436 
437 	kthread_park(sched->thread);
438 
439 	/*
440 	 * Reinsert back the bad job here - now it's safe as
441 	 * drm_sched_get_cleanup_job cannot race against us and release the
442 	 * bad job at this point - we parked (waited for) any in progress
443 	 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
444 	 * now until the scheduler thread is unparked.
445 	 */
446 	if (bad && bad->sched == sched)
447 		/*
448 		 * Add at the head of the queue to reflect it was the earliest
449 		 * job extracted.
450 		 */
451 		list_add(&bad->list, &sched->pending_list);
452 
453 	/*
454 	 * Iterate the job list from later to  earlier one and either deactive
455 	 * their HW callbacks or remove them from pending list if they already
456 	 * signaled.
457 	 * This iteration is thread safe as sched thread is stopped.
458 	 */
459 	list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
460 					 list) {
461 		if (s_job->s_fence->parent &&
462 		    dma_fence_remove_callback(s_job->s_fence->parent,
463 					      &s_job->cb)) {
464 			dma_fence_put(s_job->s_fence->parent);
465 			s_job->s_fence->parent = NULL;
466 			atomic_dec(&sched->hw_rq_count);
467 		} else {
468 			/*
469 			 * remove job from pending_list.
470 			 * Locking here is for concurrent resume timeout
471 			 */
472 			spin_lock(&sched->job_list_lock);
473 			list_del_init(&s_job->list);
474 			spin_unlock(&sched->job_list_lock);
475 
476 			/*
477 			 * Wait for job's HW fence callback to finish using s_job
478 			 * before releasing it.
479 			 *
480 			 * Job is still alive so fence refcount at least 1
481 			 */
482 			dma_fence_wait(&s_job->s_fence->finished, false);
483 
484 			/*
485 			 * We must keep bad job alive for later use during
486 			 * recovery by some of the drivers but leave a hint
487 			 * that the guilty job must be released.
488 			 */
489 			if (bad != s_job)
490 				sched->ops->free_job(s_job);
491 			else
492 				sched->free_guilty = true;
493 		}
494 	}
495 
496 	/*
497 	 * Stop pending timer in flight as we rearm it in  drm_sched_start. This
498 	 * avoids the pending timeout work in progress to fire right away after
499 	 * this TDR finished and before the newly restarted jobs had a
500 	 * chance to complete.
501 	 */
502 	cancel_delayed_work(&sched->work_tdr);
503 }
504 
505 EXPORT_SYMBOL(drm_sched_stop);
506 
507 /**
508  * drm_sched_start - recover jobs after a reset
509  *
510  * @sched: scheduler instance
511  * @full_recovery: proceed with complete sched restart
512  *
513  */
514 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
515 {
516 	struct drm_sched_job *s_job, *tmp;
517 	int r;
518 
519 	/*
520 	 * Locking the list is not required here as the sched thread is parked
521 	 * so no new jobs are being inserted or removed. Also concurrent
522 	 * GPU recovers can't run in parallel.
523 	 */
524 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
525 		struct dma_fence *fence = s_job->s_fence->parent;
526 
527 		atomic_inc(&sched->hw_rq_count);
528 
529 		if (!full_recovery)
530 			continue;
531 
532 		if (fence) {
533 			r = dma_fence_add_callback(fence, &s_job->cb,
534 						   drm_sched_job_done_cb);
535 			if (r == -ENOENT)
536 				drm_sched_job_done(s_job);
537 			else if (r)
538 				DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
539 					  r);
540 		} else
541 			drm_sched_job_done(s_job);
542 	}
543 
544 	if (full_recovery) {
545 		spin_lock(&sched->job_list_lock);
546 		drm_sched_start_timeout(sched);
547 		spin_unlock(&sched->job_list_lock);
548 	}
549 
550 	kthread_unpark(sched->thread);
551 }
552 EXPORT_SYMBOL(drm_sched_start);
553 
554 /**
555  * drm_sched_resubmit_jobs - Deprecated, don't use in new code!
556  *
557  * @sched: scheduler instance
558  *
559  * Re-submitting jobs was a concept AMD came up as cheap way to implement
560  * recovery after a job timeout.
561  *
562  * This turned out to be not working very well. First of all there are many
563  * problem with the dma_fence implementation and requirements. Either the
564  * implementation is risking deadlocks with core memory management or violating
565  * documented implementation details of the dma_fence object.
566  *
567  * Drivers can still save and restore their state for recovery operations, but
568  * we shouldn't make this a general scheduler feature around the dma_fence
569  * interface.
570  */
571 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
572 {
573 	struct drm_sched_job *s_job, *tmp;
574 	uint64_t guilty_context;
575 	bool found_guilty = false;
576 	struct dma_fence *fence;
577 
578 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
579 		struct drm_sched_fence *s_fence = s_job->s_fence;
580 
581 		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
582 			found_guilty = true;
583 			guilty_context = s_job->s_fence->scheduled.context;
584 		}
585 
586 		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
587 			dma_fence_set_error(&s_fence->finished, -ECANCELED);
588 
589 		fence = sched->ops->run_job(s_job);
590 
591 		if (IS_ERR_OR_NULL(fence)) {
592 			if (IS_ERR(fence))
593 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
594 
595 			s_job->s_fence->parent = NULL;
596 		} else {
597 
598 			s_job->s_fence->parent = dma_fence_get(fence);
599 
600 			/* Drop for orignal kref_init */
601 			dma_fence_put(fence);
602 		}
603 	}
604 }
605 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
606 
607 /**
608  * drm_sched_job_init - init a scheduler job
609  * @job: scheduler job to init
610  * @entity: scheduler entity to use
611  * @owner: job owner for debugging
612  *
613  * Refer to drm_sched_entity_push_job() documentation
614  * for locking considerations.
615  *
616  * Drivers must make sure drm_sched_job_cleanup() if this function returns
617  * successfully, even when @job is aborted before drm_sched_job_arm() is called.
618  *
619  * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
620  * has died, which can mean that there's no valid runqueue for a @entity.
621  * This function returns -ENOENT in this case (which probably should be -EIO as
622  * a more meanigful return value).
623  *
624  * Returns 0 for success, negative error code otherwise.
625  */
626 int drm_sched_job_init(struct drm_sched_job *job,
627 		       struct drm_sched_entity *entity,
628 		       void *owner)
629 {
630 	if (!entity->rq)
631 		return -ENOENT;
632 
633 	job->entity = entity;
634 	job->s_fence = drm_sched_fence_alloc(entity, owner);
635 	if (!job->s_fence)
636 		return -ENOMEM;
637 
638 	INIT_LIST_HEAD(&job->list);
639 
640 	xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
641 
642 	return 0;
643 }
644 EXPORT_SYMBOL(drm_sched_job_init);
645 
646 /**
647  * drm_sched_job_arm - arm a scheduler job for execution
648  * @job: scheduler job to arm
649  *
650  * This arms a scheduler job for execution. Specifically it initializes the
651  * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
652  * or other places that need to track the completion of this job.
653  *
654  * Refer to drm_sched_entity_push_job() documentation for locking
655  * considerations.
656  *
657  * This can only be called if drm_sched_job_init() succeeded.
658  */
659 void drm_sched_job_arm(struct drm_sched_job *job)
660 {
661 	struct drm_gpu_scheduler *sched;
662 	struct drm_sched_entity *entity = job->entity;
663 
664 	BUG_ON(!entity);
665 	drm_sched_entity_select_rq(entity);
666 	sched = entity->rq->sched;
667 
668 	job->sched = sched;
669 	job->s_priority = entity->rq - sched->sched_rq;
670 	job->id = atomic64_inc_return(&sched->job_id_count);
671 
672 	drm_sched_fence_init(job->s_fence, job->entity);
673 }
674 EXPORT_SYMBOL(drm_sched_job_arm);
675 
676 /**
677  * drm_sched_job_add_dependency - adds the fence as a job dependency
678  * @job: scheduler job to add the dependencies to
679  * @fence: the dma_fence to add to the list of dependencies.
680  *
681  * Note that @fence is consumed in both the success and error cases.
682  *
683  * Returns:
684  * 0 on success, or an error on failing to expand the array.
685  */
686 int drm_sched_job_add_dependency(struct drm_sched_job *job,
687 				 struct dma_fence *fence)
688 {
689 	struct dma_fence *entry;
690 	unsigned long index;
691 	u32 id = 0;
692 	int ret;
693 
694 	if (!fence)
695 		return 0;
696 
697 	/* Deduplicate if we already depend on a fence from the same context.
698 	 * This lets the size of the array of deps scale with the number of
699 	 * engines involved, rather than the number of BOs.
700 	 */
701 	xa_for_each(&job->dependencies, index, entry) {
702 		if (entry->context != fence->context)
703 			continue;
704 
705 		if (dma_fence_is_later(fence, entry)) {
706 			dma_fence_put(entry);
707 			xa_store(&job->dependencies, index, fence, GFP_KERNEL);
708 		} else {
709 			dma_fence_put(fence);
710 		}
711 		return 0;
712 	}
713 
714 	ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
715 	if (ret != 0)
716 		dma_fence_put(fence);
717 
718 	return ret;
719 }
720 EXPORT_SYMBOL(drm_sched_job_add_dependency);
721 
722 /**
723  * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
724  * @job: scheduler job to add the dependencies to
725  * @file: drm file private pointer
726  * @handle: syncobj handle to lookup
727  * @point: timeline point
728  *
729  * This adds the fence matching the given syncobj to @job.
730  *
731  * Returns:
732  * 0 on success, or an error on failing to expand the array.
733  */
734 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
735 					 struct drm_file *file,
736 					 u32 handle,
737 					 u32 point)
738 {
739 	struct dma_fence *fence;
740 	int ret;
741 
742 	ret = drm_syncobj_find_fence(file, handle, point, 0, &fence);
743 	if (ret)
744 		return ret;
745 
746 	return drm_sched_job_add_dependency(job, fence);
747 }
748 EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency);
749 
750 /**
751  * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
752  * @job: scheduler job to add the dependencies to
753  * @resv: the dma_resv object to get the fences from
754  * @usage: the dma_resv_usage to use to filter the fences
755  *
756  * This adds all fences matching the given usage from @resv to @job.
757  * Must be called with the @resv lock held.
758  *
759  * Returns:
760  * 0 on success, or an error on failing to expand the array.
761  */
762 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
763 					struct dma_resv *resv,
764 					enum dma_resv_usage usage)
765 {
766 	struct dma_resv_iter cursor;
767 	struct dma_fence *fence;
768 	int ret;
769 
770 	dma_resv_assert_held(resv);
771 
772 	dma_resv_for_each_fence(&cursor, resv, usage, fence) {
773 		/* Make sure to grab an additional ref on the added fence */
774 		dma_fence_get(fence);
775 		ret = drm_sched_job_add_dependency(job, fence);
776 		if (ret) {
777 			dma_fence_put(fence);
778 			return ret;
779 		}
780 	}
781 	return 0;
782 }
783 EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
784 
785 /**
786  * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
787  *   dependencies
788  * @job: scheduler job to add the dependencies to
789  * @obj: the gem object to add new dependencies from.
790  * @write: whether the job might write the object (so we need to depend on
791  * shared fences in the reservation object).
792  *
793  * This should be called after drm_gem_lock_reservations() on your array of
794  * GEM objects used in the job but before updating the reservations with your
795  * own fences.
796  *
797  * Returns:
798  * 0 on success, or an error on failing to expand the array.
799  */
800 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
801 					    struct drm_gem_object *obj,
802 					    bool write)
803 {
804 	return drm_sched_job_add_resv_dependencies(job, obj->resv,
805 						   dma_resv_usage_rw(write));
806 }
807 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
808 
809 /**
810  * drm_sched_job_cleanup - clean up scheduler job resources
811  * @job: scheduler job to clean up
812  *
813  * Cleans up the resources allocated with drm_sched_job_init().
814  *
815  * Drivers should call this from their error unwind code if @job is aborted
816  * before drm_sched_job_arm() is called.
817  *
818  * After that point of no return @job is committed to be executed by the
819  * scheduler, and this function should be called from the
820  * &drm_sched_backend_ops.free_job callback.
821  */
822 void drm_sched_job_cleanup(struct drm_sched_job *job)
823 {
824 	struct dma_fence *fence;
825 	unsigned long index;
826 
827 	if (kref_read(&job->s_fence->finished.refcount)) {
828 		/* drm_sched_job_arm() has been called */
829 		dma_fence_put(&job->s_fence->finished);
830 	} else {
831 		/* aborted job before committing to run it */
832 		drm_sched_fence_free(job->s_fence);
833 	}
834 
835 	job->s_fence = NULL;
836 
837 	xa_for_each(&job->dependencies, index, fence) {
838 		dma_fence_put(fence);
839 	}
840 	xa_destroy(&job->dependencies);
841 
842 }
843 EXPORT_SYMBOL(drm_sched_job_cleanup);
844 
845 /**
846  * drm_sched_ready - is the scheduler ready
847  *
848  * @sched: scheduler instance
849  *
850  * Return true if we can push more jobs to the hw, otherwise false.
851  */
852 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
853 {
854 	return atomic_read(&sched->hw_rq_count) <
855 		sched->hw_submission_limit;
856 }
857 
858 /**
859  * drm_sched_wakeup - Wake up the scheduler when it is ready
860  *
861  * @sched: scheduler instance
862  *
863  */
864 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
865 {
866 	if (drm_sched_ready(sched))
867 		wake_up_interruptible(&sched->wake_up_worker);
868 }
869 
870 /**
871  * drm_sched_select_entity - Select next entity to process
872  *
873  * @sched: scheduler instance
874  *
875  * Returns the entity to process or NULL if none are found.
876  */
877 static struct drm_sched_entity *
878 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
879 {
880 	struct drm_sched_entity *entity;
881 	int i;
882 
883 	if (!drm_sched_ready(sched))
884 		return NULL;
885 
886 	/* Kernel run queue has higher priority than normal run queue*/
887 	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
888 		entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
889 			drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) :
890 			drm_sched_rq_select_entity_rr(&sched->sched_rq[i]);
891 		if (entity)
892 			break;
893 	}
894 
895 	return entity;
896 }
897 
898 /**
899  * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
900  *
901  * @sched: scheduler instance
902  *
903  * Returns the next finished job from the pending list (if there is one)
904  * ready for it to be destroyed.
905  */
906 static struct drm_sched_job *
907 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
908 {
909 	struct drm_sched_job *job, *next;
910 
911 	spin_lock(&sched->job_list_lock);
912 
913 	job = list_first_entry_or_null(&sched->pending_list,
914 				       struct drm_sched_job, list);
915 
916 	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
917 		/* remove job from pending_list */
918 		list_del_init(&job->list);
919 
920 		/* cancel this job's TO timer */
921 		cancel_delayed_work(&sched->work_tdr);
922 		/* make the scheduled timestamp more accurate */
923 		next = list_first_entry_or_null(&sched->pending_list,
924 						typeof(*next), list);
925 
926 		if (next) {
927 			next->s_fence->scheduled.timestamp =
928 				job->s_fence->finished.timestamp;
929 			/* start TO timer for next job */
930 			drm_sched_start_timeout(sched);
931 		}
932 	} else {
933 		job = NULL;
934 	}
935 
936 	spin_unlock(&sched->job_list_lock);
937 
938 	if (job) {
939 		job->entity->elapsed_ns += ktime_to_ns(
940 			ktime_sub(job->s_fence->finished.timestamp,
941 				  job->s_fence->scheduled.timestamp));
942 	}
943 
944 	return job;
945 }
946 
947 /**
948  * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
949  * @sched_list: list of drm_gpu_schedulers
950  * @num_sched_list: number of drm_gpu_schedulers in the sched_list
951  *
952  * Returns pointer of the sched with the least load or NULL if none of the
953  * drm_gpu_schedulers are ready
954  */
955 struct drm_gpu_scheduler *
956 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
957 		     unsigned int num_sched_list)
958 {
959 	struct drm_gpu_scheduler *sched, *picked_sched = NULL;
960 	int i;
961 	unsigned int min_score = UINT_MAX, num_score;
962 
963 	for (i = 0; i < num_sched_list; ++i) {
964 		sched = sched_list[i];
965 
966 		if (!sched->ready) {
967 			DRM_WARN("scheduler %s is not ready, skipping",
968 				 sched->name);
969 			continue;
970 		}
971 
972 		num_score = atomic_read(sched->score);
973 		if (num_score < min_score) {
974 			min_score = num_score;
975 			picked_sched = sched;
976 		}
977 	}
978 
979 	return picked_sched;
980 }
981 EXPORT_SYMBOL(drm_sched_pick_best);
982 
983 /**
984  * drm_sched_blocked - check if the scheduler is blocked
985  *
986  * @sched: scheduler instance
987  *
988  * Returns true if blocked, otherwise false.
989  */
990 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
991 {
992 	if (kthread_should_park()) {
993 		kthread_parkme();
994 		return true;
995 	}
996 
997 	return false;
998 }
999 
1000 /**
1001  * drm_sched_main - main scheduler thread
1002  *
1003  * @param: scheduler instance
1004  *
1005  * Returns 0.
1006  */
1007 static int drm_sched_main(void *param)
1008 {
1009 	struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
1010 	int r;
1011 
1012 	sched_set_fifo_low(current);
1013 
1014 	while (!kthread_should_stop()) {
1015 		struct drm_sched_entity *entity = NULL;
1016 		struct drm_sched_fence *s_fence;
1017 		struct drm_sched_job *sched_job;
1018 		struct dma_fence *fence;
1019 		struct drm_sched_job *cleanup_job = NULL;
1020 
1021 		wait_event_interruptible(sched->wake_up_worker,
1022 					 (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
1023 					 (!drm_sched_blocked(sched) &&
1024 					  (entity = drm_sched_select_entity(sched))) ||
1025 					 kthread_should_stop());
1026 
1027 		if (cleanup_job)
1028 			sched->ops->free_job(cleanup_job);
1029 
1030 		if (!entity)
1031 			continue;
1032 
1033 		sched_job = drm_sched_entity_pop_job(entity);
1034 
1035 		if (!sched_job) {
1036 			complete_all(&entity->entity_idle);
1037 			continue;
1038 		}
1039 
1040 		s_fence = sched_job->s_fence;
1041 
1042 		atomic_inc(&sched->hw_rq_count);
1043 		drm_sched_job_begin(sched_job);
1044 
1045 		trace_drm_run_job(sched_job, entity);
1046 		fence = sched->ops->run_job(sched_job);
1047 		complete_all(&entity->entity_idle);
1048 		drm_sched_fence_scheduled(s_fence);
1049 
1050 		if (!IS_ERR_OR_NULL(fence)) {
1051 			drm_sched_fence_set_parent(s_fence, fence);
1052 			/* Drop for original kref_init of the fence */
1053 			dma_fence_put(fence);
1054 
1055 			r = dma_fence_add_callback(fence, &sched_job->cb,
1056 						   drm_sched_job_done_cb);
1057 			if (r == -ENOENT)
1058 				drm_sched_job_done(sched_job);
1059 			else if (r)
1060 				DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
1061 					  r);
1062 		} else {
1063 			if (IS_ERR(fence))
1064 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
1065 
1066 			drm_sched_job_done(sched_job);
1067 		}
1068 
1069 		wake_up(&sched->job_scheduled);
1070 	}
1071 	return 0;
1072 }
1073 
1074 /**
1075  * drm_sched_init - Init a gpu scheduler instance
1076  *
1077  * @sched: scheduler instance
1078  * @ops: backend operations for this scheduler
1079  * @hw_submission: number of hw submissions that can be in flight
1080  * @hang_limit: number of times to allow a job to hang before dropping it
1081  * @timeout: timeout value in jiffies for the scheduler
1082  * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
1083  *		used
1084  * @score: optional score atomic shared with other schedulers
1085  * @name: name used for debugging
1086  * @dev: target &struct device
1087  *
1088  * Return 0 on success, otherwise error code.
1089  */
1090 int drm_sched_init(struct drm_gpu_scheduler *sched,
1091 		   const struct drm_sched_backend_ops *ops,
1092 		   unsigned hw_submission, unsigned hang_limit,
1093 		   long timeout, struct workqueue_struct *timeout_wq,
1094 		   atomic_t *score, const char *name, struct device *dev)
1095 {
1096 	int i, ret;
1097 	sched->ops = ops;
1098 	sched->hw_submission_limit = hw_submission;
1099 	sched->name = name;
1100 	sched->timeout = timeout;
1101 	sched->timeout_wq = timeout_wq ? : system_wq;
1102 	sched->hang_limit = hang_limit;
1103 	sched->score = score ? score : &sched->_score;
1104 	sched->dev = dev;
1105 	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
1106 		drm_sched_rq_init(sched, &sched->sched_rq[i]);
1107 
1108 	init_waitqueue_head(&sched->wake_up_worker);
1109 	init_waitqueue_head(&sched->job_scheduled);
1110 	INIT_LIST_HEAD(&sched->pending_list);
1111 	spin_lock_init(&sched->job_list_lock);
1112 	atomic_set(&sched->hw_rq_count, 0);
1113 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
1114 	atomic_set(&sched->_score, 0);
1115 	atomic64_set(&sched->job_id_count, 0);
1116 
1117 	/* Each scheduler will run on a seperate kernel thread */
1118 	sched->thread = kthread_run(drm_sched_main, sched, sched->name);
1119 	if (IS_ERR(sched->thread)) {
1120 		ret = PTR_ERR(sched->thread);
1121 		sched->thread = NULL;
1122 		DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
1123 		return ret;
1124 	}
1125 
1126 	sched->ready = true;
1127 	return 0;
1128 }
1129 EXPORT_SYMBOL(drm_sched_init);
1130 
1131 /**
1132  * drm_sched_fini - Destroy a gpu scheduler
1133  *
1134  * @sched: scheduler instance
1135  *
1136  * Tears down and cleans up the scheduler.
1137  */
1138 void drm_sched_fini(struct drm_gpu_scheduler *sched)
1139 {
1140 	struct drm_sched_entity *s_entity;
1141 	int i;
1142 
1143 	if (sched->thread)
1144 		kthread_stop(sched->thread);
1145 
1146 	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
1147 		struct drm_sched_rq *rq = &sched->sched_rq[i];
1148 
1149 		if (!rq)
1150 			continue;
1151 
1152 		spin_lock(&rq->lock);
1153 		list_for_each_entry(s_entity, &rq->entities, list)
1154 			/*
1155 			 * Prevents reinsertion and marks job_queue as idle,
1156 			 * it will removed from rq in drm_sched_entity_fini
1157 			 * eventually
1158 			 */
1159 			s_entity->stopped = true;
1160 		spin_unlock(&rq->lock);
1161 
1162 	}
1163 
1164 	/* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
1165 	wake_up_all(&sched->job_scheduled);
1166 
1167 	/* Confirm no work left behind accessing device structures */
1168 	cancel_delayed_work_sync(&sched->work_tdr);
1169 
1170 	sched->ready = false;
1171 }
1172 EXPORT_SYMBOL(drm_sched_fini);
1173 
1174 /**
1175  * drm_sched_increase_karma - Update sched_entity guilty flag
1176  *
1177  * @bad: The job guilty of time out
1178  *
1179  * Increment on every hang caused by the 'bad' job. If this exceeds the hang
1180  * limit of the scheduler then the respective sched entity is marked guilty and
1181  * jobs from it will not be scheduled further
1182  */
1183 void drm_sched_increase_karma(struct drm_sched_job *bad)
1184 {
1185 	int i;
1186 	struct drm_sched_entity *tmp;
1187 	struct drm_sched_entity *entity;
1188 	struct drm_gpu_scheduler *sched = bad->sched;
1189 
1190 	/* don't change @bad's karma if it's from KERNEL RQ,
1191 	 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
1192 	 * corrupt but keep in mind that kernel jobs always considered good.
1193 	 */
1194 	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
1195 		atomic_inc(&bad->karma);
1196 
1197 		for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
1198 		     i++) {
1199 			struct drm_sched_rq *rq = &sched->sched_rq[i];
1200 
1201 			spin_lock(&rq->lock);
1202 			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
1203 				if (bad->s_fence->scheduled.context ==
1204 				    entity->fence_context) {
1205 					if (entity->guilty)
1206 						atomic_set(entity->guilty, 1);
1207 					break;
1208 				}
1209 			}
1210 			spin_unlock(&rq->lock);
1211 			if (&entity->list != &rq->entities)
1212 				break;
1213 		}
1214 	}
1215 }
1216 EXPORT_SYMBOL(drm_sched_increase_karma);
1217