xref: /openbmc/linux/include/drm/gpu_scheduler.h (revision aa5b395b)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #ifndef _DRM_GPU_SCHEDULER_H_
25 #define _DRM_GPU_SCHEDULER_H_
26 
27 #include <drm/spsc_queue.h>
28 #include <linux/dma-fence.h>
29 #include <linux/completion.h>
30 
31 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
32 
33 struct drm_gpu_scheduler;
34 struct drm_sched_rq;
35 
36 enum drm_sched_priority {
37 	DRM_SCHED_PRIORITY_MIN,
38 	DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
39 	DRM_SCHED_PRIORITY_NORMAL,
40 	DRM_SCHED_PRIORITY_HIGH_SW,
41 	DRM_SCHED_PRIORITY_HIGH_HW,
42 	DRM_SCHED_PRIORITY_KERNEL,
43 	DRM_SCHED_PRIORITY_MAX,
44 	DRM_SCHED_PRIORITY_INVALID = -1,
45 	DRM_SCHED_PRIORITY_UNSET = -2
46 };
47 
48 /**
49  * struct drm_sched_entity - A wrapper around a job queue (typically
50  * attached to the DRM file_priv).
51  *
52  * @list: used to append this struct to the list of entities in the
53  *        runqueue.
54  * @rq: runqueue on which this entity is currently scheduled.
55  * @rq_list: a list of run queues on which jobs from this entity can
56  *           be scheduled
57  * @num_rq_list: number of run queues in the rq_list
58  * @rq_lock: lock to modify the runqueue to which this entity belongs.
59  * @job_queue: the list of jobs of this entity.
60  * @fence_seq: a linearly increasing seqno incremented with each
61  *             new &drm_sched_fence which is part of the entity.
62  * @fence_context: a unique context for all the fences which belong
63  *                 to this entity.
64  *                 The &drm_sched_fence.scheduled uses the
65  *                 fence_context but &drm_sched_fence.finished uses
66  *                 fence_context + 1.
67  * @dependency: the dependency fence of the job which is on the top
68  *              of the job queue.
69  * @cb: callback for the dependency fence above.
70  * @guilty: points to ctx's guilty.
71  * @fini_status: contains the exit status in case the process was signalled.
72  * @last_scheduled: points to the finished fence of the last scheduled job.
73  * @last_user: last group leader pushing a job into the entity.
74  * @stopped: Marks the enity as removed from rq and destined for termination.
75  * @entity_idle: Signals when enityt is not in use
76  *
77  * Entities will emit jobs in order to their corresponding hardware
78  * ring, and the scheduler will alternate between entities based on
79  * scheduling policy.
80  */
81 struct drm_sched_entity {
82 	struct list_head		list;
83 	struct drm_sched_rq		*rq;
84 	struct drm_sched_rq		**rq_list;
85 	unsigned int                    num_rq_list;
86 	spinlock_t			rq_lock;
87 
88 	struct spsc_queue		job_queue;
89 
90 	atomic_t			fence_seq;
91 	uint64_t			fence_context;
92 
93 	struct dma_fence		*dependency;
94 	struct dma_fence_cb		cb;
95 	atomic_t			*guilty;
96 	struct dma_fence                *last_scheduled;
97 	struct task_struct		*last_user;
98 	bool 				stopped;
99 	struct completion		entity_idle;
100 };
101 
102 /**
103  * struct drm_sched_rq - queue of entities to be scheduled.
104  *
105  * @lock: to modify the entities list.
106  * @sched: the scheduler to which this rq belongs to.
107  * @entities: list of the entities to be scheduled.
108  * @current_entity: the entity which is to be scheduled.
109  *
110  * Run queue is a set of entities scheduling command submissions for
111  * one specific ring. It implements the scheduling policy that selects
112  * the next entity to emit commands from.
113  */
114 struct drm_sched_rq {
115 	spinlock_t			lock;
116 	struct drm_gpu_scheduler	*sched;
117 	struct list_head		entities;
118 	struct drm_sched_entity		*current_entity;
119 };
120 
121 /**
122  * struct drm_sched_fence - fences corresponding to the scheduling of a job.
123  */
124 struct drm_sched_fence {
125         /**
126          * @scheduled: this fence is what will be signaled by the scheduler
127          * when the job is scheduled.
128          */
129 	struct dma_fence		scheduled;
130 
131         /**
132          * @finished: this fence is what will be signaled by the scheduler
133          * when the job is completed.
134          *
135          * When setting up an out fence for the job, you should use
136          * this, since it's available immediately upon
137          * drm_sched_job_init(), and the fence returned by the driver
138          * from run_job() won't be created until the dependencies have
139          * resolved.
140          */
141 	struct dma_fence		finished;
142 
143         /**
144          * @parent: the fence returned by &drm_sched_backend_ops.run_job
145          * when scheduling the job on hardware. We signal the
146          * &drm_sched_fence.finished fence once parent is signalled.
147          */
148 	struct dma_fence		*parent;
149         /**
150          * @sched: the scheduler instance to which the job having this struct
151          * belongs to.
152          */
153 	struct drm_gpu_scheduler	*sched;
154         /**
155          * @lock: the lock used by the scheduled and the finished fences.
156          */
157 	spinlock_t			lock;
158         /**
159          * @owner: job owner for debugging
160          */
161 	void				*owner;
162 };
163 
164 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
165 
166 /**
167  * struct drm_sched_job - A job to be run by an entity.
168  *
169  * @queue_node: used to append this struct to the queue of jobs in an entity.
170  * @sched: the scheduler instance on which this job is scheduled.
171  * @s_fence: contains the fences for the scheduling of job.
172  * @finish_cb: the callback for the finished fence.
173  * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
174  * @id: a unique id assigned to each job scheduled on the scheduler.
175  * @karma: increment on every hang caused by this job. If this exceeds the hang
176  *         limit of the scheduler then the job is marked guilty and will not
177  *         be scheduled further.
178  * @s_priority: the priority of the job.
179  * @entity: the entity to which this job belongs.
180  * @cb: the callback for the parent fence in s_fence.
181  *
182  * A job is created by the driver using drm_sched_job_init(), and
183  * should call drm_sched_entity_push_job() once it wants the scheduler
184  * to schedule the job.
185  */
186 struct drm_sched_job {
187 	struct spsc_node		queue_node;
188 	struct drm_gpu_scheduler	*sched;
189 	struct drm_sched_fence		*s_fence;
190 	struct dma_fence_cb		finish_cb;
191 	struct list_head		node;
192 	uint64_t			id;
193 	atomic_t			karma;
194 	enum drm_sched_priority		s_priority;
195 	struct drm_sched_entity  *entity;
196 	struct dma_fence_cb		cb;
197 };
198 
199 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
200 					    int threshold)
201 {
202 	return (s_job && atomic_inc_return(&s_job->karma) > threshold);
203 }
204 
205 /**
206  * struct drm_sched_backend_ops
207  *
208  * Define the backend operations called by the scheduler,
209  * these functions should be implemented in driver side.
210  */
211 struct drm_sched_backend_ops {
212 	/**
213          * @dependency: Called when the scheduler is considering scheduling
214          * this job next, to get another struct dma_fence for this job to
215 	 * block on.  Once it returns NULL, run_job() may be called.
216 	 */
217 	struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
218 					struct drm_sched_entity *s_entity);
219 
220 	/**
221          * @run_job: Called to execute the job once all of the dependencies
222          * have been resolved.  This may be called multiple times, if
223 	 * timedout_job() has happened and drm_sched_job_recovery()
224 	 * decides to try it again.
225 	 */
226 	struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
227 
228 	/**
229          * @timedout_job: Called when a job has taken too long to execute,
230          * to trigger GPU recovery.
231 	 */
232 	void (*timedout_job)(struct drm_sched_job *sched_job);
233 
234 	/**
235          * @free_job: Called once the job's finished fence has been signaled
236          * and it's time to clean it up.
237 	 */
238 	void (*free_job)(struct drm_sched_job *sched_job);
239 };
240 
241 /**
242  * struct drm_gpu_scheduler
243  *
244  * @ops: backend operations provided by the driver.
245  * @hw_submission_limit: the max size of the hardware queue.
246  * @timeout: the time after which a job is removed from the scheduler.
247  * @name: name of the ring for which this scheduler is being used.
248  * @sched_rq: priority wise array of run queues.
249  * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
250  *                  is ready to be scheduled.
251  * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
252  *                 waits on this wait queue until all the scheduled jobs are
253  *                 finished.
254  * @hw_rq_count: the number of jobs currently in the hardware queue.
255  * @job_id_count: used to assign unique id to the each job.
256  * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
257  *            timeout interval is over.
258  * @thread: the kthread on which the scheduler which run.
259  * @ring_mirror_list: the list of jobs which are currently in the job queue.
260  * @job_list_lock: lock to protect the ring_mirror_list.
261  * @hang_limit: once the hangs by a job crosses this limit then it is marked
262  *              guilty and it will be considered for scheduling further.
263  * @num_jobs: the number of jobs in queue in the scheduler
264  * @ready: marks if the underlying HW is ready to work
265  * @free_guilty: A hit to time out handler to free the guilty job.
266  *
267  * One scheduler is implemented for each hardware ring.
268  */
269 struct drm_gpu_scheduler {
270 	const struct drm_sched_backend_ops	*ops;
271 	uint32_t			hw_submission_limit;
272 	long				timeout;
273 	const char			*name;
274 	struct drm_sched_rq		sched_rq[DRM_SCHED_PRIORITY_MAX];
275 	wait_queue_head_t		wake_up_worker;
276 	wait_queue_head_t		job_scheduled;
277 	atomic_t			hw_rq_count;
278 	atomic64_t			job_id_count;
279 	struct delayed_work		work_tdr;
280 	struct task_struct		*thread;
281 	struct list_head		ring_mirror_list;
282 	spinlock_t			job_list_lock;
283 	int				hang_limit;
284 	atomic_t                        num_jobs;
285 	bool			ready;
286 	bool				free_guilty;
287 };
288 
289 int drm_sched_init(struct drm_gpu_scheduler *sched,
290 		   const struct drm_sched_backend_ops *ops,
291 		   uint32_t hw_submission, unsigned hang_limit, long timeout,
292 		   const char *name);
293 
294 void drm_sched_fini(struct drm_gpu_scheduler *sched);
295 int drm_sched_job_init(struct drm_sched_job *job,
296 		       struct drm_sched_entity *entity,
297 		       void *owner);
298 void drm_sched_job_cleanup(struct drm_sched_job *job);
299 void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
300 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
301 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
302 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
303 void drm_sched_increase_karma(struct drm_sched_job *bad);
304 bool drm_sched_dependency_optimized(struct dma_fence* fence,
305 				    struct drm_sched_entity *entity);
306 void drm_sched_fault(struct drm_gpu_scheduler *sched);
307 void drm_sched_job_kickout(struct drm_sched_job *s_job);
308 
309 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
310 			     struct drm_sched_entity *entity);
311 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
312 				struct drm_sched_entity *entity);
313 
314 int drm_sched_entity_init(struct drm_sched_entity *entity,
315 			  struct drm_sched_rq **rq_list,
316 			  unsigned int num_rq_list,
317 			  atomic_t *guilty);
318 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
319 void drm_sched_entity_fini(struct drm_sched_entity *entity);
320 void drm_sched_entity_destroy(struct drm_sched_entity *entity);
321 void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
322 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
323 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
324 			       struct drm_sched_entity *entity);
325 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
326 				   enum drm_sched_priority priority);
327 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
328 
329 struct drm_sched_fence *drm_sched_fence_create(
330 	struct drm_sched_entity *s_entity, void *owner);
331 void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
332 void drm_sched_fence_finished(struct drm_sched_fence *fence);
333 
334 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
335 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
336 		                unsigned long remaining);
337 
338 #endif
339