xref: /openbmc/linux/include/drm/gpu_scheduler.h (revision ea47eed33a3fe3d919e6e3cf4e4eb5507b817188)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #ifndef _DRM_GPU_SCHEDULER_H_
25 #define _DRM_GPU_SCHEDULER_H_
26 
27 #include <drm/spsc_queue.h>
28 #include <linux/dma-fence.h>
29 
30 struct drm_gpu_scheduler;
31 struct drm_sched_rq;
32 
33 enum drm_sched_priority {
34 	DRM_SCHED_PRIORITY_MIN,
35 	DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
36 	DRM_SCHED_PRIORITY_NORMAL,
37 	DRM_SCHED_PRIORITY_HIGH_SW,
38 	DRM_SCHED_PRIORITY_HIGH_HW,
39 	DRM_SCHED_PRIORITY_KERNEL,
40 	DRM_SCHED_PRIORITY_MAX,
41 	DRM_SCHED_PRIORITY_INVALID = -1,
42 	DRM_SCHED_PRIORITY_UNSET = -2
43 };
44 
45 /**
46  * drm_sched_entity - A wrapper around a job queue (typically attached
47  * to the DRM file_priv).
48  *
49  * Entities will emit jobs in order to their corresponding hardware
50  * ring, and the scheduler will alternate between entities based on
51  * scheduling policy.
52 */
53 struct drm_sched_entity {
54 	struct list_head		list;
55 	struct drm_sched_rq		*rq;
56 	spinlock_t			rq_lock;
57 	struct drm_gpu_scheduler	*sched;
58 
59 	struct spsc_queue		job_queue;
60 
61 	atomic_t			fence_seq;
62 	uint64_t			fence_context;
63 
64 	struct dma_fence		*dependency;
65 	struct dma_fence_cb		cb;
66 	atomic_t			*guilty; /* points to ctx's guilty */
67 	int            fini_status;
68 	struct dma_fence    *last_scheduled;
69 };
70 
71 /**
72  * Run queue is a set of entities scheduling command submissions for
73  * one specific ring. It implements the scheduling policy that selects
74  * the next entity to emit commands from.
75 */
76 struct drm_sched_rq {
77 	spinlock_t			lock;
78 	struct list_head		entities;
79 	struct drm_sched_entity		*current_entity;
80 };
81 
82 struct drm_sched_fence {
83 	struct dma_fence		scheduled;
84 
85 	/* This fence is what will be signaled by the scheduler when
86 	 * the job is completed.
87 	 *
88 	 * When setting up an out fence for the job, you should use
89 	 * this, since it's available immediately upon
90 	 * drm_sched_job_init(), and the fence returned by the driver
91 	 * from run_job() won't be created until the dependencies have
92 	 * resolved.
93 	 */
94 	struct dma_fence		finished;
95 
96 	struct dma_fence_cb		cb;
97 	struct dma_fence		*parent;
98 	struct drm_gpu_scheduler	*sched;
99 	spinlock_t			lock;
100 	void				*owner;
101 };
102 
103 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
104 
105 /**
106  * drm_sched_job - A job to be run by an entity.
107  *
108  * A job is created by the driver using drm_sched_job_init(), and
109  * should call drm_sched_entity_push_job() once it wants the scheduler
110  * to schedule the job.
111  */
112 struct drm_sched_job {
113 	struct spsc_node		queue_node;
114 	struct drm_gpu_scheduler	*sched;
115 	struct drm_sched_fence		*s_fence;
116 	struct dma_fence_cb		finish_cb;
117 	struct work_struct		finish_work;
118 	struct list_head		node;
119 	struct delayed_work		work_tdr;
120 	uint64_t			id;
121 	atomic_t			karma;
122 	enum drm_sched_priority		s_priority;
123 	struct drm_sched_entity  *entity;
124 };
125 
126 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
127 					    int threshold)
128 {
129 	return (s_job && atomic_inc_return(&s_job->karma) > threshold);
130 }
131 
132 /**
133  * Define the backend operations called by the scheduler,
134  * these functions should be implemented in driver side
135 */
136 struct drm_sched_backend_ops {
137 	/* Called when the scheduler is considering scheduling this
138 	 * job next, to get another struct dma_fence for this job to
139 	 * block on.  Once it returns NULL, run_job() may be called.
140 	 */
141 	struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
142 					struct drm_sched_entity *s_entity);
143 
144 	/* Called to execute the job once all of the dependencies have
145 	 * been resolved.  This may be called multiple times, if
146 	 * timedout_job() has happened and drm_sched_job_recovery()
147 	 * decides to try it again.
148 	 */
149 	struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
150 
151 	/* Called when a job has taken too long to execute, to trigger
152 	 * GPU recovery.
153 	 */
154 	void (*timedout_job)(struct drm_sched_job *sched_job);
155 
156 	/* Called once the job's finished fence has been signaled and
157 	 * it's time to clean it up.
158 	 */
159 	void (*free_job)(struct drm_sched_job *sched_job);
160 };
161 
162 /**
163  * One scheduler is implemented for each hardware ring
164 */
165 struct drm_gpu_scheduler {
166 	const struct drm_sched_backend_ops	*ops;
167 	uint32_t			hw_submission_limit;
168 	long				timeout;
169 	const char			*name;
170 	struct drm_sched_rq		sched_rq[DRM_SCHED_PRIORITY_MAX];
171 	wait_queue_head_t		wake_up_worker;
172 	wait_queue_head_t		job_scheduled;
173 	atomic_t			hw_rq_count;
174 	atomic64_t			job_id_count;
175 	struct task_struct		*thread;
176 	struct list_head		ring_mirror_list;
177 	spinlock_t			job_list_lock;
178 	int				hang_limit;
179 };
180 
181 int drm_sched_init(struct drm_gpu_scheduler *sched,
182 		   const struct drm_sched_backend_ops *ops,
183 		   uint32_t hw_submission, unsigned hang_limit, long timeout,
184 		   const char *name);
185 void drm_sched_fini(struct drm_gpu_scheduler *sched);
186 
187 int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
188 			  struct drm_sched_entity *entity,
189 			  struct drm_sched_rq *rq,
190 			  atomic_t *guilty);
191 void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
192 			   struct drm_sched_entity *entity);
193 void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
194 			   struct drm_sched_entity *entity);
195 void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
196 			   struct drm_sched_entity *entity);
197 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
198 			       struct drm_sched_entity *entity);
199 void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
200 			     struct drm_sched_rq *rq);
201 
202 struct drm_sched_fence *drm_sched_fence_create(
203 	struct drm_sched_entity *s_entity, void *owner);
204 void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
205 void drm_sched_fence_finished(struct drm_sched_fence *fence);
206 int drm_sched_job_init(struct drm_sched_job *job,
207 		       struct drm_gpu_scheduler *sched,
208 		       struct drm_sched_entity *entity,
209 		       void *owner);
210 void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
211 			    struct drm_sched_job *job);
212 void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
213 bool drm_sched_dependency_optimized(struct dma_fence* fence,
214 				    struct drm_sched_entity *entity);
215 void drm_sched_job_kickout(struct drm_sched_job *s_job);
216 
217 #endif
218