11b1f42d8SLucas Stach /*
21b1f42d8SLucas Stach * Copyright 2015 Advanced Micro Devices, Inc.
31b1f42d8SLucas Stach *
41b1f42d8SLucas Stach * Permission is hereby granted, free of charge, to any person obtaining a
51b1f42d8SLucas Stach * copy of this software and associated documentation files (the "Software"),
61b1f42d8SLucas Stach * to deal in the Software without restriction, including without limitation
71b1f42d8SLucas Stach * the rights to use, copy, modify, merge, publish, distribute, sublicense,
81b1f42d8SLucas Stach * and/or sell copies of the Software, and to permit persons to whom the
91b1f42d8SLucas Stach * Software is furnished to do so, subject to the following conditions:
101b1f42d8SLucas Stach *
111b1f42d8SLucas Stach * The above copyright notice and this permission notice shall be included in
121b1f42d8SLucas Stach * all copies or substantial portions of the Software.
131b1f42d8SLucas Stach *
141b1f42d8SLucas Stach * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
151b1f42d8SLucas Stach * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
161b1f42d8SLucas Stach * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
171b1f42d8SLucas Stach * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
181b1f42d8SLucas Stach * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
191b1f42d8SLucas Stach * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
201b1f42d8SLucas Stach * OTHER DEALINGS IN THE SOFTWARE.
211b1f42d8SLucas Stach *
221b1f42d8SLucas Stach */
231b1f42d8SLucas Stach
241b1f42d8SLucas Stach #ifndef _DRM_GPU_SCHEDULER_H_
251b1f42d8SLucas Stach #define _DRM_GPU_SCHEDULER_H_
261b1f42d8SLucas Stach
271b1f42d8SLucas Stach #include <drm/spsc_queue.h>
281b1f42d8SLucas Stach #include <linux/dma-fence.h>
29dc10218dSStephen Rothwell #include <linux/completion.h>
30ebd5f742SDaniel Vetter #include <linux/xarray.h>
317d64c40aSDmitry Osipenko #include <linux/workqueue.h>
321b1f42d8SLucas Stach
33741f01e6SAndrey Grodzovsky #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
34741f01e6SAndrey Grodzovsky
357b476affSChristian König /**
367b476affSChristian König * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining
377b476affSChristian König *
387b476affSChristian König * Setting this flag on a scheduler fence prevents pipelining of jobs depending
397b476affSChristian König * on this fence. In other words we always insert a full CPU round trip before
407b476affSChristian König * dependen jobs are pushed to the hw queue.
417b476affSChristian König */
427b476affSChristian König #define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
437b476affSChristian König
44f3823da7SRob Clark /**
45f3823da7SRob Clark * DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT - A fence deadline hint has been set
46f3823da7SRob Clark *
47f3823da7SRob Clark * Because we could have a deadline hint can be set before the backing hw
48f3823da7SRob Clark * fence is created, we need to keep track of whether a deadline has already
49f3823da7SRob Clark * been set.
50f3823da7SRob Clark */
51f3823da7SRob Clark #define DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
52f3823da7SRob Clark
534d5230b5SChristian König enum dma_resv_usage;
544d5230b5SChristian König struct dma_resv;
55ebd5f742SDaniel Vetter struct drm_gem_object;
56ebd5f742SDaniel Vetter
571b1f42d8SLucas Stach struct drm_gpu_scheduler;
581b1f42d8SLucas Stach struct drm_sched_rq;
591b1f42d8SLucas Stach
60c087bbb6SMaíra Canal struct drm_file;
61c087bbb6SMaíra Canal
62e2d732fdSLuben Tuikov /* These are often used as an (initial) index
63e2d732fdSLuben Tuikov * to an array, and as such should start at 0.
64e2d732fdSLuben Tuikov */
651b1f42d8SLucas Stach enum drm_sched_priority {
661b1f42d8SLucas Stach DRM_SCHED_PRIORITY_MIN,
671b1f42d8SLucas Stach DRM_SCHED_PRIORITY_NORMAL,
68e2d732fdSLuben Tuikov DRM_SCHED_PRIORITY_HIGH,
691b1f42d8SLucas Stach DRM_SCHED_PRIORITY_KERNEL,
70e2d732fdSLuben Tuikov
71*fa8391adSLuben Tuikov DRM_SCHED_PRIORITY_COUNT
721b1f42d8SLucas Stach };
731b1f42d8SLucas Stach
7408fb97deSAndrey Grodzovsky /* Used to chose between FIFO and RR jobs scheduling */
7508fb97deSAndrey Grodzovsky extern int drm_sched_policy;
7608fb97deSAndrey Grodzovsky
7708fb97deSAndrey Grodzovsky #define DRM_SCHED_POLICY_RR 0
7808fb97deSAndrey Grodzovsky #define DRM_SCHED_POLICY_FIFO 1
7908fb97deSAndrey Grodzovsky
801b1f42d8SLucas Stach /**
812d33948eSNayan Deshmukh * struct drm_sched_entity - A wrapper around a job queue (typically
822d33948eSNayan Deshmukh * attached to the DRM file_priv).
832d33948eSNayan Deshmukh *
841a61ee07SEric Anholt * Entities will emit jobs in order to their corresponding hardware
851a61ee07SEric Anholt * ring, and the scheduler will alternate between entities based on
861a61ee07SEric Anholt * scheduling policy.
871b1f42d8SLucas Stach */
881b1f42d8SLucas Stach struct drm_sched_entity {
89981b04d9SDaniel Vetter /**
90981b04d9SDaniel Vetter * @list:
91981b04d9SDaniel Vetter *
92981b04d9SDaniel Vetter * Used to append this struct to the list of entities in the runqueue
93981b04d9SDaniel Vetter * @rq under &drm_sched_rq.entities.
94981b04d9SDaniel Vetter *
95981b04d9SDaniel Vetter * Protected by &drm_sched_rq.lock of @rq.
96981b04d9SDaniel Vetter */
971b1f42d8SLucas Stach struct list_head list;
98981b04d9SDaniel Vetter
99981b04d9SDaniel Vetter /**
100981b04d9SDaniel Vetter * @rq:
101981b04d9SDaniel Vetter *
102981b04d9SDaniel Vetter * Runqueue on which this entity is currently scheduled.
103981b04d9SDaniel Vetter *
104981b04d9SDaniel Vetter * FIXME: Locking is very unclear for this. Writers are protected by
105981b04d9SDaniel Vetter * @rq_lock, but readers are generally lockless and seem to just race
106981b04d9SDaniel Vetter * with not even a READ_ONCE.
107981b04d9SDaniel Vetter */
1081b1f42d8SLucas Stach struct drm_sched_rq *rq;
109981b04d9SDaniel Vetter
110981b04d9SDaniel Vetter /**
111981b04d9SDaniel Vetter * @sched_list:
112981b04d9SDaniel Vetter *
113981b04d9SDaniel Vetter * A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can
114981b04d9SDaniel Vetter * be scheduled on any scheduler on this list.
115981b04d9SDaniel Vetter *
116981b04d9SDaniel Vetter * This can be modified by calling drm_sched_entity_modify_sched().
117981b04d9SDaniel Vetter * Locking is entirely up to the driver, see the above function for more
118981b04d9SDaniel Vetter * details.
119981b04d9SDaniel Vetter *
120981b04d9SDaniel Vetter * This will be set to NULL if &num_sched_list equals 1 and @rq has been
121981b04d9SDaniel Vetter * set already.
122981b04d9SDaniel Vetter *
123981b04d9SDaniel Vetter * FIXME: This means priority changes through
124981b04d9SDaniel Vetter * drm_sched_entity_set_priority() will be lost henceforth in this case.
125981b04d9SDaniel Vetter */
126b3ac1766SNirmoy Das struct drm_gpu_scheduler **sched_list;
127981b04d9SDaniel Vetter
128981b04d9SDaniel Vetter /**
129981b04d9SDaniel Vetter * @num_sched_list:
130981b04d9SDaniel Vetter *
131981b04d9SDaniel Vetter * Number of drm_gpu_schedulers in the @sched_list.
132981b04d9SDaniel Vetter */
1339e3e90c5SNirmoy Das unsigned int num_sched_list;
134981b04d9SDaniel Vetter
135981b04d9SDaniel Vetter /**
136981b04d9SDaniel Vetter * @priority:
137981b04d9SDaniel Vetter *
138981b04d9SDaniel Vetter * Priority of the entity. This can be modified by calling
139981b04d9SDaniel Vetter * drm_sched_entity_set_priority(). Protected by &rq_lock.
140981b04d9SDaniel Vetter */
141b3ac1766SNirmoy Das enum drm_sched_priority priority;
142981b04d9SDaniel Vetter
143981b04d9SDaniel Vetter /**
144981b04d9SDaniel Vetter * @rq_lock:
145981b04d9SDaniel Vetter *
146981b04d9SDaniel Vetter * Lock to modify the runqueue to which this entity belongs.
147981b04d9SDaniel Vetter */
1481b1f42d8SLucas Stach spinlock_t rq_lock;
1491b1f42d8SLucas Stach
150981b04d9SDaniel Vetter /**
151981b04d9SDaniel Vetter * @job_queue: the list of jobs of this entity.
152981b04d9SDaniel Vetter */
1531b1f42d8SLucas Stach struct spsc_queue job_queue;
1541b1f42d8SLucas Stach
155981b04d9SDaniel Vetter /**
156981b04d9SDaniel Vetter * @fence_seq:
157981b04d9SDaniel Vetter *
158981b04d9SDaniel Vetter * A linearly increasing seqno incremented with each new
159981b04d9SDaniel Vetter * &drm_sched_fence which is part of the entity.
160981b04d9SDaniel Vetter *
161981b04d9SDaniel Vetter * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking,
162981b04d9SDaniel Vetter * this doesn't need to be atomic.
163981b04d9SDaniel Vetter */
1641b1f42d8SLucas Stach atomic_t fence_seq;
165981b04d9SDaniel Vetter
166981b04d9SDaniel Vetter /**
167981b04d9SDaniel Vetter * @fence_context:
168981b04d9SDaniel Vetter *
169981b04d9SDaniel Vetter * A unique context for all the fences which belong to this entity. The
170981b04d9SDaniel Vetter * &drm_sched_fence.scheduled uses the fence_context but
171981b04d9SDaniel Vetter * &drm_sched_fence.finished uses fence_context + 1.
172981b04d9SDaniel Vetter */
1731b1f42d8SLucas Stach uint64_t fence_context;
1741b1f42d8SLucas Stach
175981b04d9SDaniel Vetter /**
176981b04d9SDaniel Vetter * @dependency:
177981b04d9SDaniel Vetter *
178981b04d9SDaniel Vetter * The dependency fence of the job which is on the top of the job queue.
179981b04d9SDaniel Vetter */
1801b1f42d8SLucas Stach struct dma_fence *dependency;
181981b04d9SDaniel Vetter
182981b04d9SDaniel Vetter /**
183981b04d9SDaniel Vetter * @cb:
184981b04d9SDaniel Vetter *
185981b04d9SDaniel Vetter * Callback for the dependency fence above.
186981b04d9SDaniel Vetter */
1871b1f42d8SLucas Stach struct dma_fence_cb cb;
188981b04d9SDaniel Vetter
189981b04d9SDaniel Vetter /**
190981b04d9SDaniel Vetter * @guilty:
191981b04d9SDaniel Vetter *
192981b04d9SDaniel Vetter * Points to entities' guilty.
193981b04d9SDaniel Vetter */
1942d33948eSNayan Deshmukh atomic_t *guilty;
195981b04d9SDaniel Vetter
196981b04d9SDaniel Vetter /**
197981b04d9SDaniel Vetter * @last_scheduled:
198981b04d9SDaniel Vetter *
199981b04d9SDaniel Vetter * Points to the finished fence of the last scheduled job. Only written
200981b04d9SDaniel Vetter * by the scheduler thread, can be accessed locklessly from
201981b04d9SDaniel Vetter * drm_sched_job_arm() iff the queue is empty.
202981b04d9SDaniel Vetter */
20370102d77SChristian König struct dma_fence __rcu *last_scheduled;
204981b04d9SDaniel Vetter
205981b04d9SDaniel Vetter /**
206981b04d9SDaniel Vetter * @last_user: last group leader pushing a job into the entity.
207981b04d9SDaniel Vetter */
20843bce41cSChristian König struct task_struct *last_user;
209981b04d9SDaniel Vetter
210981b04d9SDaniel Vetter /**
211981b04d9SDaniel Vetter * @stopped:
212981b04d9SDaniel Vetter *
213981b04d9SDaniel Vetter * Marks the enity as removed from rq and destined for
214981b04d9SDaniel Vetter * termination. This is set by calling drm_sched_entity_flush() and by
215981b04d9SDaniel Vetter * drm_sched_fini().
216981b04d9SDaniel Vetter */
21762347a33SAndrey Grodzovsky bool stopped;
218981b04d9SDaniel Vetter
219981b04d9SDaniel Vetter /**
220981b04d9SDaniel Vetter * @entity_idle:
221981b04d9SDaniel Vetter *
222981b04d9SDaniel Vetter * Signals when entity is not in use, used to sequence entity cleanup in
223981b04d9SDaniel Vetter * drm_sched_entity_fini().
224981b04d9SDaniel Vetter */
22583a7772bSAndrey Grodzovsky struct completion entity_idle;
22608fb97deSAndrey Grodzovsky
22708fb97deSAndrey Grodzovsky /**
22808fb97deSAndrey Grodzovsky * @oldest_job_waiting:
22908fb97deSAndrey Grodzovsky *
23008fb97deSAndrey Grodzovsky * Marks earliest job waiting in SW queue
23108fb97deSAndrey Grodzovsky */
23208fb97deSAndrey Grodzovsky ktime_t oldest_job_waiting;
23308fb97deSAndrey Grodzovsky
23408fb97deSAndrey Grodzovsky /**
23508fb97deSAndrey Grodzovsky * @rb_tree_node:
23608fb97deSAndrey Grodzovsky *
23708fb97deSAndrey Grodzovsky * The node used to insert this entity into time based priority queue
23808fb97deSAndrey Grodzovsky */
23908fb97deSAndrey Grodzovsky struct rb_node rb_tree_node;
24008fb97deSAndrey Grodzovsky
2411b1f42d8SLucas Stach };
2421b1f42d8SLucas Stach
2431b1f42d8SLucas Stach /**
2442d33948eSNayan Deshmukh * struct drm_sched_rq - queue of entities to be scheduled.
2452d33948eSNayan Deshmukh *
2462d33948eSNayan Deshmukh * @lock: to modify the entities list.
2478dc9fbbfSNayan Deshmukh * @sched: the scheduler to which this rq belongs to.
2482d33948eSNayan Deshmukh * @entities: list of the entities to be scheduled.
2492d33948eSNayan Deshmukh * @current_entity: the entity which is to be scheduled.
25008fb97deSAndrey Grodzovsky * @rb_tree_root: root of time based priory queue of entities for FIFO scheduling
2512d33948eSNayan Deshmukh *
2521b1f42d8SLucas Stach * Run queue is a set of entities scheduling command submissions for
2531b1f42d8SLucas Stach * one specific ring. It implements the scheduling policy that selects
2541b1f42d8SLucas Stach * the next entity to emit commands from.
2551b1f42d8SLucas Stach */
2561b1f42d8SLucas Stach struct drm_sched_rq {
2571b1f42d8SLucas Stach spinlock_t lock;
2588dc9fbbfSNayan Deshmukh struct drm_gpu_scheduler *sched;
2591b1f42d8SLucas Stach struct list_head entities;
2601b1f42d8SLucas Stach struct drm_sched_entity *current_entity;
26108fb97deSAndrey Grodzovsky struct rb_root_cached rb_tree_root;
2621b1f42d8SLucas Stach };
2631b1f42d8SLucas Stach
2642d33948eSNayan Deshmukh /**
2652d33948eSNayan Deshmukh * struct drm_sched_fence - fences corresponding to the scheduling of a job.
2662d33948eSNayan Deshmukh */
2671b1f42d8SLucas Stach struct drm_sched_fence {
2682d33948eSNayan Deshmukh /**
2692d33948eSNayan Deshmukh * @scheduled: this fence is what will be signaled by the scheduler
2702d33948eSNayan Deshmukh * when the job is scheduled.
2712d33948eSNayan Deshmukh */
2721b1f42d8SLucas Stach struct dma_fence scheduled;
2731a61ee07SEric Anholt
2742d33948eSNayan Deshmukh /**
2752d33948eSNayan Deshmukh * @finished: this fence is what will be signaled by the scheduler
2762d33948eSNayan Deshmukh * when the job is completed.
2771a61ee07SEric Anholt *
2781a61ee07SEric Anholt * When setting up an out fence for the job, you should use
2791a61ee07SEric Anholt * this, since it's available immediately upon
2801a61ee07SEric Anholt * drm_sched_job_init(), and the fence returned by the driver
2811a61ee07SEric Anholt * from run_job() won't be created until the dependencies have
2821a61ee07SEric Anholt * resolved.
2831a61ee07SEric Anholt */
2841b1f42d8SLucas Stach struct dma_fence finished;
2851a61ee07SEric Anholt
2862d33948eSNayan Deshmukh /**
287f3823da7SRob Clark * @deadline: deadline set on &drm_sched_fence.finished which
288f3823da7SRob Clark * potentially needs to be propagated to &drm_sched_fence.parent
289f3823da7SRob Clark */
290f3823da7SRob Clark ktime_t deadline;
291f3823da7SRob Clark
292f3823da7SRob Clark /**
2932d33948eSNayan Deshmukh * @parent: the fence returned by &drm_sched_backend_ops.run_job
2942d33948eSNayan Deshmukh * when scheduling the job on hardware. We signal the
2952d33948eSNayan Deshmukh * &drm_sched_fence.finished fence once parent is signalled.
2962d33948eSNayan Deshmukh */
2971b1f42d8SLucas Stach struct dma_fence *parent;
2982d33948eSNayan Deshmukh /**
2992d33948eSNayan Deshmukh * @sched: the scheduler instance to which the job having this struct
3002d33948eSNayan Deshmukh * belongs to.
3012d33948eSNayan Deshmukh */
3021b1f42d8SLucas Stach struct drm_gpu_scheduler *sched;
3032d33948eSNayan Deshmukh /**
3042d33948eSNayan Deshmukh * @lock: the lock used by the scheduled and the finished fences.
3052d33948eSNayan Deshmukh */
3061b1f42d8SLucas Stach spinlock_t lock;
3072d33948eSNayan Deshmukh /**
3082d33948eSNayan Deshmukh * @owner: job owner for debugging
3092d33948eSNayan Deshmukh */
3101b1f42d8SLucas Stach void *owner;
3111b1f42d8SLucas Stach };
3121b1f42d8SLucas Stach
3131b1f42d8SLucas Stach struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
3141b1f42d8SLucas Stach
3151a61ee07SEric Anholt /**
3162d33948eSNayan Deshmukh * struct drm_sched_job - A job to be run by an entity.
3172d33948eSNayan Deshmukh *
3182d33948eSNayan Deshmukh * @queue_node: used to append this struct to the queue of jobs in an entity.
319c365d304SLuben Tuikov * @list: a job participates in a "pending" and "done" lists.
3202d33948eSNayan Deshmukh * @sched: the scheduler instance on which this job is scheduled.
3212d33948eSNayan Deshmukh * @s_fence: contains the fences for the scheduling of job.
3222d33948eSNayan Deshmukh * @finish_cb: the callback for the finished fence.
323e795df5bSAndrey Grodzovsky * @work: Helper to reschdeule job kill to different context.
3242d33948eSNayan Deshmukh * @id: a unique id assigned to each job scheduled on the scheduler.
3252d33948eSNayan Deshmukh * @karma: increment on every hang caused by this job. If this exceeds the hang
3262d33948eSNayan Deshmukh * limit of the scheduler then the job is marked guilty and will not
3272d33948eSNayan Deshmukh * be scheduled further.
3282d33948eSNayan Deshmukh * @s_priority: the priority of the job.
3292d33948eSNayan Deshmukh * @entity: the entity to which this job belongs.
3303741540eSAndrey Grodzovsky * @cb: the callback for the parent fence in s_fence.
3311a61ee07SEric Anholt *
3321a61ee07SEric Anholt * A job is created by the driver using drm_sched_job_init(), and
3331a61ee07SEric Anholt * should call drm_sched_entity_push_job() once it wants the scheduler
3341a61ee07SEric Anholt * to schedule the job.
3351a61ee07SEric Anholt */
3361b1f42d8SLucas Stach struct drm_sched_job {
3371b1f42d8SLucas Stach struct spsc_node queue_node;
3388935ff00SLuben Tuikov struct list_head list;
3391b1f42d8SLucas Stach struct drm_gpu_scheduler *sched;
3401b1f42d8SLucas Stach struct drm_sched_fence *s_fence;
341542cff78SAndrey Grodzovsky
342542cff78SAndrey Grodzovsky /*
343542cff78SAndrey Grodzovsky * work is used only after finish_cb has been used and will not be
344542cff78SAndrey Grodzovsky * accessed anymore.
345542cff78SAndrey Grodzovsky */
346542cff78SAndrey Grodzovsky union {
3471b1f42d8SLucas Stach struct dma_fence_cb finish_cb;
3487d64c40aSDmitry Osipenko struct work_struct work;
349542cff78SAndrey Grodzovsky };
350542cff78SAndrey Grodzovsky
3511b1f42d8SLucas Stach uint64_t id;
3521b1f42d8SLucas Stach atomic_t karma;
3531b1f42d8SLucas Stach enum drm_sched_priority s_priority;
3548ee3a52eSEmily Deng struct drm_sched_entity *entity;
3553741540eSAndrey Grodzovsky struct dma_fence_cb cb;
356ebd5f742SDaniel Vetter /**
357ebd5f742SDaniel Vetter * @dependencies:
358ebd5f742SDaniel Vetter *
359ebd5f742SDaniel Vetter * Contains the dependencies as struct dma_fence for this job, see
360ebd5f742SDaniel Vetter * drm_sched_job_add_dependency() and
361ebd5f742SDaniel Vetter * drm_sched_job_add_implicit_dependencies().
362ebd5f742SDaniel Vetter */
363ebd5f742SDaniel Vetter struct xarray dependencies;
364ebd5f742SDaniel Vetter
365ebd5f742SDaniel Vetter /** @last_dependency: tracks @dependencies as they signal */
366ebd5f742SDaniel Vetter unsigned long last_dependency;
36708fb97deSAndrey Grodzovsky
36808fb97deSAndrey Grodzovsky /**
36908fb97deSAndrey Grodzovsky * @submit_ts:
37008fb97deSAndrey Grodzovsky *
37108fb97deSAndrey Grodzovsky * When the job was pushed into the entity queue.
37208fb97deSAndrey Grodzovsky */
37308fb97deSAndrey Grodzovsky ktime_t submit_ts;
3741b1f42d8SLucas Stach };
3751b1f42d8SLucas Stach
drm_sched_invalidate_job(struct drm_sched_job * s_job,int threshold)3761b1f42d8SLucas Stach static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
3771b1f42d8SLucas Stach int threshold)
3781b1f42d8SLucas Stach {
3796efa4b46SLuben Tuikov return s_job && atomic_inc_return(&s_job->karma) > threshold;
3801b1f42d8SLucas Stach }
3811b1f42d8SLucas Stach
382a6a1f036SLuben Tuikov enum drm_gpu_sched_stat {
383a6a1f036SLuben Tuikov DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
384a6a1f036SLuben Tuikov DRM_GPU_SCHED_STAT_NOMINAL,
385a6a1f036SLuben Tuikov DRM_GPU_SCHED_STAT_ENODEV,
386a6a1f036SLuben Tuikov };
387a6a1f036SLuben Tuikov
3881b1f42d8SLucas Stach /**
389f8ad757eSRandy Dunlap * struct drm_sched_backend_ops - Define the backend operations
390f8ad757eSRandy Dunlap * called by the scheduler
3912d33948eSNayan Deshmukh *
392f8ad757eSRandy Dunlap * These functions should be implemented in the driver side.
3931b1f42d8SLucas Stach */
3941b1f42d8SLucas Stach struct drm_sched_backend_ops {
3952d33948eSNayan Deshmukh /**
396a82f30b0SChristian König * @prepare_job:
397ebd5f742SDaniel Vetter *
398ebd5f742SDaniel Vetter * Called when the scheduler is considering scheduling this job next, to
399ebd5f742SDaniel Vetter * get another struct dma_fence for this job to block on. Once it
400ebd5f742SDaniel Vetter * returns NULL, run_job() may be called.
401ebd5f742SDaniel Vetter *
402a82f30b0SChristian König * Can be NULL if no additional preparation to the dependencies are
403a82f30b0SChristian König * necessary. Skipped when jobs are killed instead of run.
4041a61ee07SEric Anholt */
405a82f30b0SChristian König struct dma_fence *(*prepare_job)(struct drm_sched_job *sched_job,
4061b1f42d8SLucas Stach struct drm_sched_entity *s_entity);
4071a61ee07SEric Anholt
4082d33948eSNayan Deshmukh /**
4092d33948eSNayan Deshmukh * @run_job: Called to execute the job once all of the dependencies
4102d33948eSNayan Deshmukh * have been resolved. This may be called multiple times, if
4111a61ee07SEric Anholt * timedout_job() has happened and drm_sched_job_recovery()
4121a61ee07SEric Anholt * decides to try it again.
4131a61ee07SEric Anholt */
4141b1f42d8SLucas Stach struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
4151a61ee07SEric Anholt
4162d33948eSNayan Deshmukh /**
4172d33948eSNayan Deshmukh * @timedout_job: Called when a job has taken too long to execute,
4182d33948eSNayan Deshmukh * to trigger GPU recovery.
419a6a1f036SLuben Tuikov *
4201fad1b7eSBoris Brezillon * This method is called in a workqueue context.
4211fad1b7eSBoris Brezillon *
4221fad1b7eSBoris Brezillon * Drivers typically issue a reset to recover from GPU hangs, and this
4231fad1b7eSBoris Brezillon * procedure usually follows the following workflow:
4241fad1b7eSBoris Brezillon *
4251fad1b7eSBoris Brezillon * 1. Stop the scheduler using drm_sched_stop(). This will park the
4261fad1b7eSBoris Brezillon * scheduler thread and cancel the timeout work, guaranteeing that
4271fad1b7eSBoris Brezillon * nothing is queued while we reset the hardware queue
4281fad1b7eSBoris Brezillon * 2. Try to gracefully stop non-faulty jobs (optional)
4291fad1b7eSBoris Brezillon * 3. Issue a GPU reset (driver-specific)
4301fad1b7eSBoris Brezillon * 4. Re-submit jobs using drm_sched_resubmit_jobs()
4311fad1b7eSBoris Brezillon * 5. Restart the scheduler using drm_sched_start(). At that point, new
4321fad1b7eSBoris Brezillon * jobs can be queued, and the scheduler thread is unblocked
4331fad1b7eSBoris Brezillon *
43478efe21bSBoris Brezillon * Note that some GPUs have distinct hardware queues but need to reset
43578efe21bSBoris Brezillon * the GPU globally, which requires extra synchronization between the
43678efe21bSBoris Brezillon * timeout handler of the different &drm_gpu_scheduler. One way to
43778efe21bSBoris Brezillon * achieve this synchronization is to create an ordered workqueue
43878efe21bSBoris Brezillon * (using alloc_ordered_workqueue()) at the driver level, and pass this
43978efe21bSBoris Brezillon * queue to drm_sched_init(), to guarantee that timeout handlers are
44078efe21bSBoris Brezillon * executed sequentially. The above workflow needs to be slightly
44178efe21bSBoris Brezillon * adjusted in that case:
44278efe21bSBoris Brezillon *
44378efe21bSBoris Brezillon * 1. Stop all schedulers impacted by the reset using drm_sched_stop()
44478efe21bSBoris Brezillon * 2. Try to gracefully stop non-faulty jobs on all queues impacted by
44578efe21bSBoris Brezillon * the reset (optional)
44678efe21bSBoris Brezillon * 3. Issue a GPU reset on all faulty queues (driver-specific)
44778efe21bSBoris Brezillon * 4. Re-submit jobs on all schedulers impacted by the reset using
44878efe21bSBoris Brezillon * drm_sched_resubmit_jobs()
44978efe21bSBoris Brezillon * 5. Restart all schedulers that were stopped in step #1 using
45078efe21bSBoris Brezillon * drm_sched_start()
45178efe21bSBoris Brezillon *
452a6a1f036SLuben Tuikov * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
453a6a1f036SLuben Tuikov * and the underlying driver has started or completed recovery.
454a6a1f036SLuben Tuikov *
455a6a1f036SLuben Tuikov * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
456a6a1f036SLuben Tuikov * available, i.e. has been unplugged.
4571a61ee07SEric Anholt */
458a6a1f036SLuben Tuikov enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
4591a61ee07SEric Anholt
4602d33948eSNayan Deshmukh /**
4612d33948eSNayan Deshmukh * @free_job: Called once the job's finished fence has been signaled
4622d33948eSNayan Deshmukh * and it's time to clean it up.
4631a61ee07SEric Anholt */
4641b1f42d8SLucas Stach void (*free_job)(struct drm_sched_job *sched_job);
4651b1f42d8SLucas Stach };
4661b1f42d8SLucas Stach
4671b1f42d8SLucas Stach /**
468f8ad757eSRandy Dunlap * struct drm_gpu_scheduler - scheduler instance-specific data
4692d33948eSNayan Deshmukh *
4702d33948eSNayan Deshmukh * @ops: backend operations provided by the driver.
4712d33948eSNayan Deshmukh * @hw_submission_limit: the max size of the hardware queue.
4722d33948eSNayan Deshmukh * @timeout: the time after which a job is removed from the scheduler.
4732d33948eSNayan Deshmukh * @name: name of the ring for which this scheduler is being used.
4742d33948eSNayan Deshmukh * @sched_rq: priority wise array of run queues.
4752d33948eSNayan Deshmukh * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
4762d33948eSNayan Deshmukh * is ready to be scheduled.
4772d33948eSNayan Deshmukh * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
4782d33948eSNayan Deshmukh * waits on this wait queue until all the scheduled jobs are
4792d33948eSNayan Deshmukh * finished.
4802d33948eSNayan Deshmukh * @hw_rq_count: the number of jobs currently in the hardware queue.
4812d33948eSNayan Deshmukh * @job_id_count: used to assign unique id to the each job.
48278efe21bSBoris Brezillon * @timeout_wq: workqueue used to queue @work_tdr
4836a962430SNayan Deshmukh * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
4846a962430SNayan Deshmukh * timeout interval is over.
4852d33948eSNayan Deshmukh * @thread: the kthread on which the scheduler which run.
4866efa4b46SLuben Tuikov * @pending_list: the list of jobs which are currently in the job queue.
4876efa4b46SLuben Tuikov * @job_list_lock: lock to protect the pending_list.
4882d33948eSNayan Deshmukh * @hang_limit: once the hangs by a job crosses this limit then it is marked
48995b2151fSAlyssa Rosenzweig * guilty and it will no longer be considered for scheduling.
490d41a39ddSNirmoy Das * @score: score to help loadbalancer pick a idle sched
491be318fd8SChristian König * @_score: score used when the driver doesn't provide one
492faf6e1a8SAndrey Grodzovsky * @ready: marks if the underlying HW is ready to work
493a5343b8aSAndrey Grodzovsky * @free_guilty: A hit to time out handler to free the guilty job.
494f8ad757eSRandy Dunlap * @dev: system &struct device
4952d33948eSNayan Deshmukh *
4962d33948eSNayan Deshmukh * One scheduler is implemented for each hardware ring.
4971b1f42d8SLucas Stach */
4981b1f42d8SLucas Stach struct drm_gpu_scheduler {
4991b1f42d8SLucas Stach const struct drm_sched_backend_ops *ops;
5001b1f42d8SLucas Stach uint32_t hw_submission_limit;
5011b1f42d8SLucas Stach long timeout;
5021b1f42d8SLucas Stach const char *name;
503e2d732fdSLuben Tuikov struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT];
5041b1f42d8SLucas Stach wait_queue_head_t wake_up_worker;
5051b1f42d8SLucas Stach wait_queue_head_t job_scheduled;
5061b1f42d8SLucas Stach atomic_t hw_rq_count;
5071b1f42d8SLucas Stach atomic64_t job_id_count;
50878efe21bSBoris Brezillon struct workqueue_struct *timeout_wq;
5096a962430SNayan Deshmukh struct delayed_work work_tdr;
5101b1f42d8SLucas Stach struct task_struct *thread;
5116efa4b46SLuben Tuikov struct list_head pending_list;
5121b1f42d8SLucas Stach spinlock_t job_list_lock;
5131b1f42d8SLucas Stach int hang_limit;
514f2f12eb9SChristian König atomic_t *score;
515f2f12eb9SChristian König atomic_t _score;
516faf6e1a8SAndrey Grodzovsky bool ready;
517a5343b8aSAndrey Grodzovsky bool free_guilty;
5188ab62edaSJiawei Gu struct device *dev;
5191b1f42d8SLucas Stach };
5201b1f42d8SLucas Stach
5211b1f42d8SLucas Stach int drm_sched_init(struct drm_gpu_scheduler *sched,
5221b1f42d8SLucas Stach const struct drm_sched_backend_ops *ops,
52378efe21bSBoris Brezillon uint32_t hw_submission, unsigned hang_limit,
52478efe21bSBoris Brezillon long timeout, struct workqueue_struct *timeout_wq,
5258ab62edaSJiawei Gu atomic_t *score, const char *name, struct device *dev);
526faf6e1a8SAndrey Grodzovsky
5271b1f42d8SLucas Stach void drm_sched_fini(struct drm_gpu_scheduler *sched);
528620e762fSChristian König int drm_sched_job_init(struct drm_sched_job *job,
529620e762fSChristian König struct drm_sched_entity *entity,
530620e762fSChristian König void *owner);
531dbe48d03SDaniel Vetter void drm_sched_job_arm(struct drm_sched_job *job);
532ebd5f742SDaniel Vetter int drm_sched_job_add_dependency(struct drm_sched_job *job,
533ebd5f742SDaniel Vetter struct dma_fence *fence);
534c087bbb6SMaíra Canal int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
535c087bbb6SMaíra Canal struct drm_file *file,
536c087bbb6SMaíra Canal u32 handle,
537c087bbb6SMaíra Canal u32 point);
5384d5230b5SChristian König int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
5394d5230b5SChristian König struct dma_resv *resv,
5404d5230b5SChristian König enum dma_resv_usage usage);
541ebd5f742SDaniel Vetter int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
542ebd5f742SDaniel Vetter struct drm_gem_object *obj,
543ebd5f742SDaniel Vetter bool write);
544ebd5f742SDaniel Vetter
545ebd5f742SDaniel Vetter
546b37aced3SNirmoy Das void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
547b37aced3SNirmoy Das struct drm_gpu_scheduler **sched_list,
548b37aced3SNirmoy Das unsigned int num_sched_list);
549b37aced3SNirmoy Das
55026efecf9SSharat Masetty void drm_sched_job_cleanup(struct drm_sched_job *job);
5513655c590SLuben Tuikov void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched);
5525918045cSChristian König void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
553222b5f04SAndrey Grodzovsky void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
554222b5f04SAndrey Grodzovsky void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
555222b5f04SAndrey Grodzovsky void drm_sched_increase_karma(struct drm_sched_job *bad);
556e6c6338fSJack Zhang void drm_sched_reset_karma(struct drm_sched_job *bad);
557e6c6338fSJack Zhang void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
558620e762fSChristian König bool drm_sched_dependency_optimized(struct dma_fence* fence,
559620e762fSChristian König struct drm_sched_entity *entity);
5608fe159b0SChristian König void drm_sched_fault(struct drm_gpu_scheduler *sched);
561620e762fSChristian König
562620e762fSChristian König void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
563620e762fSChristian König struct drm_sched_entity *entity);
564620e762fSChristian König void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
565620e762fSChristian König struct drm_sched_entity *entity);
5661b1f42d8SLucas Stach
56708fb97deSAndrey Grodzovsky void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts);
56808fb97deSAndrey Grodzovsky
569aa16b6c6SNayan Deshmukh int drm_sched_entity_init(struct drm_sched_entity *entity,
570b3ac1766SNirmoy Das enum drm_sched_priority priority,
571b3ac1766SNirmoy Das struct drm_gpu_scheduler **sched_list,
5729e3e90c5SNirmoy Das unsigned int num_sched_list,
5738344c53fSNayan Deshmukh atomic_t *guilty);
574cdc50176SNayan Deshmukh long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
575cdc50176SNayan Deshmukh void drm_sched_entity_fini(struct drm_sched_entity *entity);
576cdc50176SNayan Deshmukh void drm_sched_entity_destroy(struct drm_sched_entity *entity);
577620e762fSChristian König void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
578620e762fSChristian König struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
5790e10e9a1SDaniel Vetter void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
5807febe4bfSChristian König void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
5817febe4bfSChristian König enum drm_sched_priority priority);
582620e762fSChristian König bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
58370102d77SChristian König int drm_sched_entity_error(struct drm_sched_entity *entity);
584620e762fSChristian König
585dbe48d03SDaniel Vetter struct drm_sched_fence *drm_sched_fence_alloc(
5861b1f42d8SLucas Stach struct drm_sched_entity *s_entity, void *owner);
587dbe48d03SDaniel Vetter void drm_sched_fence_init(struct drm_sched_fence *fence,
588dbe48d03SDaniel Vetter struct drm_sched_entity *entity);
589d4c16733SBoris Brezillon void drm_sched_fence_free(struct drm_sched_fence *fence);
590dbe48d03SDaniel Vetter
591db8b4968SBoris Brezillon void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
592db8b4968SBoris Brezillon struct dma_fence *parent);
593539f9ee4SChristian König void drm_sched_fence_finished(struct drm_sched_fence *fence, int result);
5941b1f42d8SLucas Stach
5951db8c142SSharat Masetty unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
5961db8c142SSharat Masetty void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
5971db8c142SSharat Masetty unsigned long remaining);
598ec2edcc2SNirmoy Das struct drm_gpu_scheduler *
599ec2edcc2SNirmoy Das drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
600ec2edcc2SNirmoy Das unsigned int num_sched_list);
6011db8c142SSharat Masetty
6021b1f42d8SLucas Stach #endif
603