11b1f42d8SLucas Stach /* 21b1f42d8SLucas Stach * Copyright 2015 Advanced Micro Devices, Inc. 31b1f42d8SLucas Stach * 41b1f42d8SLucas Stach * Permission is hereby granted, free of charge, to any person obtaining a 51b1f42d8SLucas Stach * copy of this software and associated documentation files (the "Software"), 61b1f42d8SLucas Stach * to deal in the Software without restriction, including without limitation 71b1f42d8SLucas Stach * the rights to use, copy, modify, merge, publish, distribute, sublicense, 81b1f42d8SLucas Stach * and/or sell copies of the Software, and to permit persons to whom the 91b1f42d8SLucas Stach * Software is furnished to do so, subject to the following conditions: 101b1f42d8SLucas Stach * 111b1f42d8SLucas Stach * The above copyright notice and this permission notice shall be included in 121b1f42d8SLucas Stach * all copies or substantial portions of the Software. 131b1f42d8SLucas Stach * 141b1f42d8SLucas Stach * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 151b1f42d8SLucas Stach * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 161b1f42d8SLucas Stach * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 171b1f42d8SLucas Stach * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 181b1f42d8SLucas Stach * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 191b1f42d8SLucas Stach * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 201b1f42d8SLucas Stach * OTHER DEALINGS IN THE SOFTWARE. 211b1f42d8SLucas Stach * 221b1f42d8SLucas Stach */ 231b1f42d8SLucas Stach 241b1f42d8SLucas Stach #ifndef _DRM_GPU_SCHEDULER_H_ 251b1f42d8SLucas Stach #define _DRM_GPU_SCHEDULER_H_ 261b1f42d8SLucas Stach 271b1f42d8SLucas Stach #include <drm/spsc_queue.h> 281b1f42d8SLucas Stach #include <linux/dma-fence.h> 29dc10218dSStephen Rothwell #include <linux/completion.h> 30ebd5f742SDaniel Vetter #include <linux/xarray.h> 317d64c40aSDmitry Osipenko #include <linux/workqueue.h> 321b1f42d8SLucas Stach 33741f01e6SAndrey Grodzovsky #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) 34741f01e6SAndrey Grodzovsky 357b476affSChristian König /** 367b476affSChristian König * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining 377b476affSChristian König * 387b476affSChristian König * Setting this flag on a scheduler fence prevents pipelining of jobs depending 397b476affSChristian König * on this fence. In other words we always insert a full CPU round trip before 407b476affSChristian König * dependen jobs are pushed to the hw queue. 417b476affSChristian König */ 427b476affSChristian König #define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS 437b476affSChristian König 44f3823da7SRob Clark /** 45f3823da7SRob Clark * DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT - A fence deadline hint has been set 46f3823da7SRob Clark * 47f3823da7SRob Clark * Because we could have a deadline hint can be set before the backing hw 48f3823da7SRob Clark * fence is created, we need to keep track of whether a deadline has already 49f3823da7SRob Clark * been set. 50f3823da7SRob Clark */ 51f3823da7SRob Clark #define DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT (DMA_FENCE_FLAG_USER_BITS + 1) 52f3823da7SRob Clark 534d5230b5SChristian König enum dma_resv_usage; 544d5230b5SChristian König struct dma_resv; 55ebd5f742SDaniel Vetter struct drm_gem_object; 56ebd5f742SDaniel Vetter 571b1f42d8SLucas Stach struct drm_gpu_scheduler; 581b1f42d8SLucas Stach struct drm_sched_rq; 591b1f42d8SLucas Stach 60c087bbb6SMaíra Canal struct drm_file; 61c087bbb6SMaíra Canal 62e2d732fdSLuben Tuikov /* These are often used as an (initial) index 63e2d732fdSLuben Tuikov * to an array, and as such should start at 0. 64e2d732fdSLuben Tuikov */ 651b1f42d8SLucas Stach enum drm_sched_priority { 661b1f42d8SLucas Stach DRM_SCHED_PRIORITY_MIN, 671b1f42d8SLucas Stach DRM_SCHED_PRIORITY_NORMAL, 68e2d732fdSLuben Tuikov DRM_SCHED_PRIORITY_HIGH, 691b1f42d8SLucas Stach DRM_SCHED_PRIORITY_KERNEL, 70e2d732fdSLuben Tuikov 71e2d732fdSLuben Tuikov DRM_SCHED_PRIORITY_COUNT, 721b1f42d8SLucas Stach DRM_SCHED_PRIORITY_UNSET = -2 731b1f42d8SLucas Stach }; 741b1f42d8SLucas Stach 7508fb97deSAndrey Grodzovsky /* Used to chose between FIFO and RR jobs scheduling */ 7608fb97deSAndrey Grodzovsky extern int drm_sched_policy; 7708fb97deSAndrey Grodzovsky 7808fb97deSAndrey Grodzovsky #define DRM_SCHED_POLICY_RR 0 7908fb97deSAndrey Grodzovsky #define DRM_SCHED_POLICY_FIFO 1 8008fb97deSAndrey Grodzovsky 811b1f42d8SLucas Stach /** 822d33948eSNayan Deshmukh * struct drm_sched_entity - A wrapper around a job queue (typically 832d33948eSNayan Deshmukh * attached to the DRM file_priv). 842d33948eSNayan Deshmukh * 851a61ee07SEric Anholt * Entities will emit jobs in order to their corresponding hardware 861a61ee07SEric Anholt * ring, and the scheduler will alternate between entities based on 871a61ee07SEric Anholt * scheduling policy. 881b1f42d8SLucas Stach */ 891b1f42d8SLucas Stach struct drm_sched_entity { 90981b04d9SDaniel Vetter /** 91981b04d9SDaniel Vetter * @list: 92981b04d9SDaniel Vetter * 93981b04d9SDaniel Vetter * Used to append this struct to the list of entities in the runqueue 94981b04d9SDaniel Vetter * @rq under &drm_sched_rq.entities. 95981b04d9SDaniel Vetter * 96981b04d9SDaniel Vetter * Protected by &drm_sched_rq.lock of @rq. 97981b04d9SDaniel Vetter */ 981b1f42d8SLucas Stach struct list_head list; 99981b04d9SDaniel Vetter 100981b04d9SDaniel Vetter /** 101981b04d9SDaniel Vetter * @rq: 102981b04d9SDaniel Vetter * 103981b04d9SDaniel Vetter * Runqueue on which this entity is currently scheduled. 104981b04d9SDaniel Vetter * 105981b04d9SDaniel Vetter * FIXME: Locking is very unclear for this. Writers are protected by 106981b04d9SDaniel Vetter * @rq_lock, but readers are generally lockless and seem to just race 107981b04d9SDaniel Vetter * with not even a READ_ONCE. 108981b04d9SDaniel Vetter */ 1091b1f42d8SLucas Stach struct drm_sched_rq *rq; 110981b04d9SDaniel Vetter 111981b04d9SDaniel Vetter /** 112981b04d9SDaniel Vetter * @sched_list: 113981b04d9SDaniel Vetter * 114981b04d9SDaniel Vetter * A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can 115981b04d9SDaniel Vetter * be scheduled on any scheduler on this list. 116981b04d9SDaniel Vetter * 117981b04d9SDaniel Vetter * This can be modified by calling drm_sched_entity_modify_sched(). 118981b04d9SDaniel Vetter * Locking is entirely up to the driver, see the above function for more 119981b04d9SDaniel Vetter * details. 120981b04d9SDaniel Vetter * 121981b04d9SDaniel Vetter * This will be set to NULL if &num_sched_list equals 1 and @rq has been 122981b04d9SDaniel Vetter * set already. 123981b04d9SDaniel Vetter * 124981b04d9SDaniel Vetter * FIXME: This means priority changes through 125981b04d9SDaniel Vetter * drm_sched_entity_set_priority() will be lost henceforth in this case. 126981b04d9SDaniel Vetter */ 127b3ac1766SNirmoy Das struct drm_gpu_scheduler **sched_list; 128981b04d9SDaniel Vetter 129981b04d9SDaniel Vetter /** 130981b04d9SDaniel Vetter * @num_sched_list: 131981b04d9SDaniel Vetter * 132981b04d9SDaniel Vetter * Number of drm_gpu_schedulers in the @sched_list. 133981b04d9SDaniel Vetter */ 1349e3e90c5SNirmoy Das unsigned int num_sched_list; 135981b04d9SDaniel Vetter 136981b04d9SDaniel Vetter /** 137981b04d9SDaniel Vetter * @priority: 138981b04d9SDaniel Vetter * 139981b04d9SDaniel Vetter * Priority of the entity. This can be modified by calling 140981b04d9SDaniel Vetter * drm_sched_entity_set_priority(). Protected by &rq_lock. 141981b04d9SDaniel Vetter */ 142b3ac1766SNirmoy Das enum drm_sched_priority priority; 143981b04d9SDaniel Vetter 144981b04d9SDaniel Vetter /** 145981b04d9SDaniel Vetter * @rq_lock: 146981b04d9SDaniel Vetter * 147981b04d9SDaniel Vetter * Lock to modify the runqueue to which this entity belongs. 148981b04d9SDaniel Vetter */ 1491b1f42d8SLucas Stach spinlock_t rq_lock; 1501b1f42d8SLucas Stach 151981b04d9SDaniel Vetter /** 152981b04d9SDaniel Vetter * @job_queue: the list of jobs of this entity. 153981b04d9SDaniel Vetter */ 1541b1f42d8SLucas Stach struct spsc_queue job_queue; 1551b1f42d8SLucas Stach 156981b04d9SDaniel Vetter /** 157981b04d9SDaniel Vetter * @fence_seq: 158981b04d9SDaniel Vetter * 159981b04d9SDaniel Vetter * A linearly increasing seqno incremented with each new 160981b04d9SDaniel Vetter * &drm_sched_fence which is part of the entity. 161981b04d9SDaniel Vetter * 162981b04d9SDaniel Vetter * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking, 163981b04d9SDaniel Vetter * this doesn't need to be atomic. 164981b04d9SDaniel Vetter */ 1651b1f42d8SLucas Stach atomic_t fence_seq; 166981b04d9SDaniel Vetter 167981b04d9SDaniel Vetter /** 168981b04d9SDaniel Vetter * @fence_context: 169981b04d9SDaniel Vetter * 170981b04d9SDaniel Vetter * A unique context for all the fences which belong to this entity. The 171981b04d9SDaniel Vetter * &drm_sched_fence.scheduled uses the fence_context but 172981b04d9SDaniel Vetter * &drm_sched_fence.finished uses fence_context + 1. 173981b04d9SDaniel Vetter */ 1741b1f42d8SLucas Stach uint64_t fence_context; 1751b1f42d8SLucas Stach 176981b04d9SDaniel Vetter /** 177981b04d9SDaniel Vetter * @dependency: 178981b04d9SDaniel Vetter * 179981b04d9SDaniel Vetter * The dependency fence of the job which is on the top of the job queue. 180981b04d9SDaniel Vetter */ 1811b1f42d8SLucas Stach struct dma_fence *dependency; 182981b04d9SDaniel Vetter 183981b04d9SDaniel Vetter /** 184981b04d9SDaniel Vetter * @cb: 185981b04d9SDaniel Vetter * 186981b04d9SDaniel Vetter * Callback for the dependency fence above. 187981b04d9SDaniel Vetter */ 1881b1f42d8SLucas Stach struct dma_fence_cb cb; 189981b04d9SDaniel Vetter 190981b04d9SDaniel Vetter /** 191981b04d9SDaniel Vetter * @guilty: 192981b04d9SDaniel Vetter * 193981b04d9SDaniel Vetter * Points to entities' guilty. 194981b04d9SDaniel Vetter */ 1952d33948eSNayan Deshmukh atomic_t *guilty; 196981b04d9SDaniel Vetter 197981b04d9SDaniel Vetter /** 198981b04d9SDaniel Vetter * @last_scheduled: 199981b04d9SDaniel Vetter * 200981b04d9SDaniel Vetter * Points to the finished fence of the last scheduled job. Only written 201981b04d9SDaniel Vetter * by the scheduler thread, can be accessed locklessly from 202981b04d9SDaniel Vetter * drm_sched_job_arm() iff the queue is empty. 203981b04d9SDaniel Vetter */ 2048ee3a52eSEmily Deng struct dma_fence *last_scheduled; 205981b04d9SDaniel Vetter 206981b04d9SDaniel Vetter /** 207981b04d9SDaniel Vetter * @last_user: last group leader pushing a job into the entity. 208981b04d9SDaniel Vetter */ 20943bce41cSChristian König struct task_struct *last_user; 210981b04d9SDaniel Vetter 211981b04d9SDaniel Vetter /** 212981b04d9SDaniel Vetter * @stopped: 213981b04d9SDaniel Vetter * 214981b04d9SDaniel Vetter * Marks the enity as removed from rq and destined for 215981b04d9SDaniel Vetter * termination. This is set by calling drm_sched_entity_flush() and by 216981b04d9SDaniel Vetter * drm_sched_fini(). 217981b04d9SDaniel Vetter */ 21862347a33SAndrey Grodzovsky bool stopped; 219981b04d9SDaniel Vetter 220981b04d9SDaniel Vetter /** 221981b04d9SDaniel Vetter * @entity_idle: 222981b04d9SDaniel Vetter * 223981b04d9SDaniel Vetter * Signals when entity is not in use, used to sequence entity cleanup in 224981b04d9SDaniel Vetter * drm_sched_entity_fini(). 225981b04d9SDaniel Vetter */ 22683a7772bSAndrey Grodzovsky struct completion entity_idle; 22708fb97deSAndrey Grodzovsky 22808fb97deSAndrey Grodzovsky /** 22908fb97deSAndrey Grodzovsky * @oldest_job_waiting: 23008fb97deSAndrey Grodzovsky * 23108fb97deSAndrey Grodzovsky * Marks earliest job waiting in SW queue 23208fb97deSAndrey Grodzovsky */ 23308fb97deSAndrey Grodzovsky ktime_t oldest_job_waiting; 23408fb97deSAndrey Grodzovsky 23508fb97deSAndrey Grodzovsky /** 23608fb97deSAndrey Grodzovsky * @rb_tree_node: 23708fb97deSAndrey Grodzovsky * 23808fb97deSAndrey Grodzovsky * The node used to insert this entity into time based priority queue 23908fb97deSAndrey Grodzovsky */ 24008fb97deSAndrey Grodzovsky struct rb_node rb_tree_node; 24108fb97deSAndrey Grodzovsky 2421b1f42d8SLucas Stach }; 2431b1f42d8SLucas Stach 2441b1f42d8SLucas Stach /** 2452d33948eSNayan Deshmukh * struct drm_sched_rq - queue of entities to be scheduled. 2462d33948eSNayan Deshmukh * 2472d33948eSNayan Deshmukh * @lock: to modify the entities list. 2488dc9fbbfSNayan Deshmukh * @sched: the scheduler to which this rq belongs to. 2492d33948eSNayan Deshmukh * @entities: list of the entities to be scheduled. 2502d33948eSNayan Deshmukh * @current_entity: the entity which is to be scheduled. 25108fb97deSAndrey Grodzovsky * @rb_tree_root: root of time based priory queue of entities for FIFO scheduling 2522d33948eSNayan Deshmukh * 2531b1f42d8SLucas Stach * Run queue is a set of entities scheduling command submissions for 2541b1f42d8SLucas Stach * one specific ring. It implements the scheduling policy that selects 2551b1f42d8SLucas Stach * the next entity to emit commands from. 2561b1f42d8SLucas Stach */ 2571b1f42d8SLucas Stach struct drm_sched_rq { 2581b1f42d8SLucas Stach spinlock_t lock; 2598dc9fbbfSNayan Deshmukh struct drm_gpu_scheduler *sched; 2601b1f42d8SLucas Stach struct list_head entities; 2611b1f42d8SLucas Stach struct drm_sched_entity *current_entity; 26208fb97deSAndrey Grodzovsky struct rb_root_cached rb_tree_root; 2631b1f42d8SLucas Stach }; 2641b1f42d8SLucas Stach 2652d33948eSNayan Deshmukh /** 2662d33948eSNayan Deshmukh * struct drm_sched_fence - fences corresponding to the scheduling of a job. 2672d33948eSNayan Deshmukh */ 2681b1f42d8SLucas Stach struct drm_sched_fence { 2692d33948eSNayan Deshmukh /** 2702d33948eSNayan Deshmukh * @scheduled: this fence is what will be signaled by the scheduler 2712d33948eSNayan Deshmukh * when the job is scheduled. 2722d33948eSNayan Deshmukh */ 2731b1f42d8SLucas Stach struct dma_fence scheduled; 2741a61ee07SEric Anholt 2752d33948eSNayan Deshmukh /** 2762d33948eSNayan Deshmukh * @finished: this fence is what will be signaled by the scheduler 2772d33948eSNayan Deshmukh * when the job is completed. 2781a61ee07SEric Anholt * 2791a61ee07SEric Anholt * When setting up an out fence for the job, you should use 2801a61ee07SEric Anholt * this, since it's available immediately upon 2811a61ee07SEric Anholt * drm_sched_job_init(), and the fence returned by the driver 2821a61ee07SEric Anholt * from run_job() won't be created until the dependencies have 2831a61ee07SEric Anholt * resolved. 2841a61ee07SEric Anholt */ 2851b1f42d8SLucas Stach struct dma_fence finished; 2861a61ee07SEric Anholt 2872d33948eSNayan Deshmukh /** 288f3823da7SRob Clark * @deadline: deadline set on &drm_sched_fence.finished which 289f3823da7SRob Clark * potentially needs to be propagated to &drm_sched_fence.parent 290f3823da7SRob Clark */ 291f3823da7SRob Clark ktime_t deadline; 292f3823da7SRob Clark 293f3823da7SRob Clark /** 2942d33948eSNayan Deshmukh * @parent: the fence returned by &drm_sched_backend_ops.run_job 2952d33948eSNayan Deshmukh * when scheduling the job on hardware. We signal the 2962d33948eSNayan Deshmukh * &drm_sched_fence.finished fence once parent is signalled. 2972d33948eSNayan Deshmukh */ 2981b1f42d8SLucas Stach struct dma_fence *parent; 2992d33948eSNayan Deshmukh /** 3002d33948eSNayan Deshmukh * @sched: the scheduler instance to which the job having this struct 3012d33948eSNayan Deshmukh * belongs to. 3022d33948eSNayan Deshmukh */ 3031b1f42d8SLucas Stach struct drm_gpu_scheduler *sched; 3042d33948eSNayan Deshmukh /** 3052d33948eSNayan Deshmukh * @lock: the lock used by the scheduled and the finished fences. 3062d33948eSNayan Deshmukh */ 3071b1f42d8SLucas Stach spinlock_t lock; 3082d33948eSNayan Deshmukh /** 3092d33948eSNayan Deshmukh * @owner: job owner for debugging 3102d33948eSNayan Deshmukh */ 3111b1f42d8SLucas Stach void *owner; 3121b1f42d8SLucas Stach }; 3131b1f42d8SLucas Stach 3141b1f42d8SLucas Stach struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); 3151b1f42d8SLucas Stach 3161a61ee07SEric Anholt /** 3172d33948eSNayan Deshmukh * struct drm_sched_job - A job to be run by an entity. 3182d33948eSNayan Deshmukh * 3192d33948eSNayan Deshmukh * @queue_node: used to append this struct to the queue of jobs in an entity. 320c365d304SLuben Tuikov * @list: a job participates in a "pending" and "done" lists. 3212d33948eSNayan Deshmukh * @sched: the scheduler instance on which this job is scheduled. 3222d33948eSNayan Deshmukh * @s_fence: contains the fences for the scheduling of job. 3232d33948eSNayan Deshmukh * @finish_cb: the callback for the finished fence. 324e795df5bSAndrey Grodzovsky * @work: Helper to reschdeule job kill to different context. 3252d33948eSNayan Deshmukh * @id: a unique id assigned to each job scheduled on the scheduler. 3262d33948eSNayan Deshmukh * @karma: increment on every hang caused by this job. If this exceeds the hang 3272d33948eSNayan Deshmukh * limit of the scheduler then the job is marked guilty and will not 3282d33948eSNayan Deshmukh * be scheduled further. 3292d33948eSNayan Deshmukh * @s_priority: the priority of the job. 3302d33948eSNayan Deshmukh * @entity: the entity to which this job belongs. 3313741540eSAndrey Grodzovsky * @cb: the callback for the parent fence in s_fence. 3321a61ee07SEric Anholt * 3331a61ee07SEric Anholt * A job is created by the driver using drm_sched_job_init(), and 3341a61ee07SEric Anholt * should call drm_sched_entity_push_job() once it wants the scheduler 3351a61ee07SEric Anholt * to schedule the job. 3361a61ee07SEric Anholt */ 3371b1f42d8SLucas Stach struct drm_sched_job { 3381b1f42d8SLucas Stach struct spsc_node queue_node; 3398935ff00SLuben Tuikov struct list_head list; 3401b1f42d8SLucas Stach struct drm_gpu_scheduler *sched; 3411b1f42d8SLucas Stach struct drm_sched_fence *s_fence; 342542cff78SAndrey Grodzovsky 343542cff78SAndrey Grodzovsky /* 344542cff78SAndrey Grodzovsky * work is used only after finish_cb has been used and will not be 345542cff78SAndrey Grodzovsky * accessed anymore. 346542cff78SAndrey Grodzovsky */ 347542cff78SAndrey Grodzovsky union { 3481b1f42d8SLucas Stach struct dma_fence_cb finish_cb; 3497d64c40aSDmitry Osipenko struct work_struct work; 350542cff78SAndrey Grodzovsky }; 351542cff78SAndrey Grodzovsky 3521b1f42d8SLucas Stach uint64_t id; 3531b1f42d8SLucas Stach atomic_t karma; 3541b1f42d8SLucas Stach enum drm_sched_priority s_priority; 3558ee3a52eSEmily Deng struct drm_sched_entity *entity; 3563741540eSAndrey Grodzovsky struct dma_fence_cb cb; 357ebd5f742SDaniel Vetter /** 358ebd5f742SDaniel Vetter * @dependencies: 359ebd5f742SDaniel Vetter * 360ebd5f742SDaniel Vetter * Contains the dependencies as struct dma_fence for this job, see 361ebd5f742SDaniel Vetter * drm_sched_job_add_dependency() and 362ebd5f742SDaniel Vetter * drm_sched_job_add_implicit_dependencies(). 363ebd5f742SDaniel Vetter */ 364ebd5f742SDaniel Vetter struct xarray dependencies; 365ebd5f742SDaniel Vetter 366ebd5f742SDaniel Vetter /** @last_dependency: tracks @dependencies as they signal */ 367ebd5f742SDaniel Vetter unsigned long last_dependency; 36808fb97deSAndrey Grodzovsky 36908fb97deSAndrey Grodzovsky /** 37008fb97deSAndrey Grodzovsky * @submit_ts: 37108fb97deSAndrey Grodzovsky * 37208fb97deSAndrey Grodzovsky * When the job was pushed into the entity queue. 37308fb97deSAndrey Grodzovsky */ 37408fb97deSAndrey Grodzovsky ktime_t submit_ts; 3751b1f42d8SLucas Stach }; 3761b1f42d8SLucas Stach 3771b1f42d8SLucas Stach static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, 3781b1f42d8SLucas Stach int threshold) 3791b1f42d8SLucas Stach { 3806efa4b46SLuben Tuikov return s_job && atomic_inc_return(&s_job->karma) > threshold; 3811b1f42d8SLucas Stach } 3821b1f42d8SLucas Stach 383a6a1f036SLuben Tuikov enum drm_gpu_sched_stat { 384a6a1f036SLuben Tuikov DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */ 385a6a1f036SLuben Tuikov DRM_GPU_SCHED_STAT_NOMINAL, 386a6a1f036SLuben Tuikov DRM_GPU_SCHED_STAT_ENODEV, 387a6a1f036SLuben Tuikov }; 388a6a1f036SLuben Tuikov 3891b1f42d8SLucas Stach /** 390f8ad757eSRandy Dunlap * struct drm_sched_backend_ops - Define the backend operations 391f8ad757eSRandy Dunlap * called by the scheduler 3922d33948eSNayan Deshmukh * 393f8ad757eSRandy Dunlap * These functions should be implemented in the driver side. 3941b1f42d8SLucas Stach */ 3951b1f42d8SLucas Stach struct drm_sched_backend_ops { 3962d33948eSNayan Deshmukh /** 397a82f30b0SChristian König * @prepare_job: 398ebd5f742SDaniel Vetter * 399ebd5f742SDaniel Vetter * Called when the scheduler is considering scheduling this job next, to 400ebd5f742SDaniel Vetter * get another struct dma_fence for this job to block on. Once it 401ebd5f742SDaniel Vetter * returns NULL, run_job() may be called. 402ebd5f742SDaniel Vetter * 403a82f30b0SChristian König * Can be NULL if no additional preparation to the dependencies are 404a82f30b0SChristian König * necessary. Skipped when jobs are killed instead of run. 4051a61ee07SEric Anholt */ 406a82f30b0SChristian König struct dma_fence *(*prepare_job)(struct drm_sched_job *sched_job, 4071b1f42d8SLucas Stach struct drm_sched_entity *s_entity); 4081a61ee07SEric Anholt 4092d33948eSNayan Deshmukh /** 4102d33948eSNayan Deshmukh * @run_job: Called to execute the job once all of the dependencies 4112d33948eSNayan Deshmukh * have been resolved. This may be called multiple times, if 4121a61ee07SEric Anholt * timedout_job() has happened and drm_sched_job_recovery() 4131a61ee07SEric Anholt * decides to try it again. 4141a61ee07SEric Anholt */ 4151b1f42d8SLucas Stach struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); 4161a61ee07SEric Anholt 4172d33948eSNayan Deshmukh /** 4182d33948eSNayan Deshmukh * @timedout_job: Called when a job has taken too long to execute, 4192d33948eSNayan Deshmukh * to trigger GPU recovery. 420a6a1f036SLuben Tuikov * 4211fad1b7eSBoris Brezillon * This method is called in a workqueue context. 4221fad1b7eSBoris Brezillon * 4231fad1b7eSBoris Brezillon * Drivers typically issue a reset to recover from GPU hangs, and this 4241fad1b7eSBoris Brezillon * procedure usually follows the following workflow: 4251fad1b7eSBoris Brezillon * 4261fad1b7eSBoris Brezillon * 1. Stop the scheduler using drm_sched_stop(). This will park the 4271fad1b7eSBoris Brezillon * scheduler thread and cancel the timeout work, guaranteeing that 4281fad1b7eSBoris Brezillon * nothing is queued while we reset the hardware queue 4291fad1b7eSBoris Brezillon * 2. Try to gracefully stop non-faulty jobs (optional) 4301fad1b7eSBoris Brezillon * 3. Issue a GPU reset (driver-specific) 4311fad1b7eSBoris Brezillon * 4. Re-submit jobs using drm_sched_resubmit_jobs() 4321fad1b7eSBoris Brezillon * 5. Restart the scheduler using drm_sched_start(). At that point, new 4331fad1b7eSBoris Brezillon * jobs can be queued, and the scheduler thread is unblocked 4341fad1b7eSBoris Brezillon * 43578efe21bSBoris Brezillon * Note that some GPUs have distinct hardware queues but need to reset 43678efe21bSBoris Brezillon * the GPU globally, which requires extra synchronization between the 43778efe21bSBoris Brezillon * timeout handler of the different &drm_gpu_scheduler. One way to 43878efe21bSBoris Brezillon * achieve this synchronization is to create an ordered workqueue 43978efe21bSBoris Brezillon * (using alloc_ordered_workqueue()) at the driver level, and pass this 44078efe21bSBoris Brezillon * queue to drm_sched_init(), to guarantee that timeout handlers are 44178efe21bSBoris Brezillon * executed sequentially. The above workflow needs to be slightly 44278efe21bSBoris Brezillon * adjusted in that case: 44378efe21bSBoris Brezillon * 44478efe21bSBoris Brezillon * 1. Stop all schedulers impacted by the reset using drm_sched_stop() 44578efe21bSBoris Brezillon * 2. Try to gracefully stop non-faulty jobs on all queues impacted by 44678efe21bSBoris Brezillon * the reset (optional) 44778efe21bSBoris Brezillon * 3. Issue a GPU reset on all faulty queues (driver-specific) 44878efe21bSBoris Brezillon * 4. Re-submit jobs on all schedulers impacted by the reset using 44978efe21bSBoris Brezillon * drm_sched_resubmit_jobs() 45078efe21bSBoris Brezillon * 5. Restart all schedulers that were stopped in step #1 using 45178efe21bSBoris Brezillon * drm_sched_start() 45278efe21bSBoris Brezillon * 453a6a1f036SLuben Tuikov * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal, 454a6a1f036SLuben Tuikov * and the underlying driver has started or completed recovery. 455a6a1f036SLuben Tuikov * 456a6a1f036SLuben Tuikov * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer 457a6a1f036SLuben Tuikov * available, i.e. has been unplugged. 4581a61ee07SEric Anholt */ 459a6a1f036SLuben Tuikov enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job); 4601a61ee07SEric Anholt 4612d33948eSNayan Deshmukh /** 4622d33948eSNayan Deshmukh * @free_job: Called once the job's finished fence has been signaled 4632d33948eSNayan Deshmukh * and it's time to clean it up. 4641a61ee07SEric Anholt */ 4651b1f42d8SLucas Stach void (*free_job)(struct drm_sched_job *sched_job); 4661b1f42d8SLucas Stach }; 4671b1f42d8SLucas Stach 4681b1f42d8SLucas Stach /** 469f8ad757eSRandy Dunlap * struct drm_gpu_scheduler - scheduler instance-specific data 4702d33948eSNayan Deshmukh * 4712d33948eSNayan Deshmukh * @ops: backend operations provided by the driver. 4722d33948eSNayan Deshmukh * @hw_submission_limit: the max size of the hardware queue. 4732d33948eSNayan Deshmukh * @timeout: the time after which a job is removed from the scheduler. 4742d33948eSNayan Deshmukh * @name: name of the ring for which this scheduler is being used. 4752d33948eSNayan Deshmukh * @sched_rq: priority wise array of run queues. 4762d33948eSNayan Deshmukh * @wake_up_worker: the wait queue on which the scheduler sleeps until a job 4772d33948eSNayan Deshmukh * is ready to be scheduled. 4782d33948eSNayan Deshmukh * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler 4792d33948eSNayan Deshmukh * waits on this wait queue until all the scheduled jobs are 4802d33948eSNayan Deshmukh * finished. 4812d33948eSNayan Deshmukh * @hw_rq_count: the number of jobs currently in the hardware queue. 4822d33948eSNayan Deshmukh * @job_id_count: used to assign unique id to the each job. 48378efe21bSBoris Brezillon * @timeout_wq: workqueue used to queue @work_tdr 4846a962430SNayan Deshmukh * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the 4856a962430SNayan Deshmukh * timeout interval is over. 4862d33948eSNayan Deshmukh * @thread: the kthread on which the scheduler which run. 4876efa4b46SLuben Tuikov * @pending_list: the list of jobs which are currently in the job queue. 4886efa4b46SLuben Tuikov * @job_list_lock: lock to protect the pending_list. 4892d33948eSNayan Deshmukh * @hang_limit: once the hangs by a job crosses this limit then it is marked 49095b2151fSAlyssa Rosenzweig * guilty and it will no longer be considered for scheduling. 491d41a39ddSNirmoy Das * @score: score to help loadbalancer pick a idle sched 492be318fd8SChristian König * @_score: score used when the driver doesn't provide one 493faf6e1a8SAndrey Grodzovsky * @ready: marks if the underlying HW is ready to work 494a5343b8aSAndrey Grodzovsky * @free_guilty: A hit to time out handler to free the guilty job. 495f8ad757eSRandy Dunlap * @dev: system &struct device 4962d33948eSNayan Deshmukh * 4972d33948eSNayan Deshmukh * One scheduler is implemented for each hardware ring. 4981b1f42d8SLucas Stach */ 4991b1f42d8SLucas Stach struct drm_gpu_scheduler { 5001b1f42d8SLucas Stach const struct drm_sched_backend_ops *ops; 5011b1f42d8SLucas Stach uint32_t hw_submission_limit; 5021b1f42d8SLucas Stach long timeout; 5031b1f42d8SLucas Stach const char *name; 504e2d732fdSLuben Tuikov struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT]; 5051b1f42d8SLucas Stach wait_queue_head_t wake_up_worker; 5061b1f42d8SLucas Stach wait_queue_head_t job_scheduled; 5071b1f42d8SLucas Stach atomic_t hw_rq_count; 5081b1f42d8SLucas Stach atomic64_t job_id_count; 50978efe21bSBoris Brezillon struct workqueue_struct *timeout_wq; 5106a962430SNayan Deshmukh struct delayed_work work_tdr; 5111b1f42d8SLucas Stach struct task_struct *thread; 5126efa4b46SLuben Tuikov struct list_head pending_list; 5131b1f42d8SLucas Stach spinlock_t job_list_lock; 5141b1f42d8SLucas Stach int hang_limit; 515f2f12eb9SChristian König atomic_t *score; 516f2f12eb9SChristian König atomic_t _score; 517faf6e1a8SAndrey Grodzovsky bool ready; 518a5343b8aSAndrey Grodzovsky bool free_guilty; 5198ab62edaSJiawei Gu struct device *dev; 5201b1f42d8SLucas Stach }; 5211b1f42d8SLucas Stach 5221b1f42d8SLucas Stach int drm_sched_init(struct drm_gpu_scheduler *sched, 5231b1f42d8SLucas Stach const struct drm_sched_backend_ops *ops, 52478efe21bSBoris Brezillon uint32_t hw_submission, unsigned hang_limit, 52578efe21bSBoris Brezillon long timeout, struct workqueue_struct *timeout_wq, 5268ab62edaSJiawei Gu atomic_t *score, const char *name, struct device *dev); 527faf6e1a8SAndrey Grodzovsky 5281b1f42d8SLucas Stach void drm_sched_fini(struct drm_gpu_scheduler *sched); 529620e762fSChristian König int drm_sched_job_init(struct drm_sched_job *job, 530620e762fSChristian König struct drm_sched_entity *entity, 531620e762fSChristian König void *owner); 532dbe48d03SDaniel Vetter void drm_sched_job_arm(struct drm_sched_job *job); 533ebd5f742SDaniel Vetter int drm_sched_job_add_dependency(struct drm_sched_job *job, 534ebd5f742SDaniel Vetter struct dma_fence *fence); 535c087bbb6SMaíra Canal int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job, 536c087bbb6SMaíra Canal struct drm_file *file, 537c087bbb6SMaíra Canal u32 handle, 538c087bbb6SMaíra Canal u32 point); 5394d5230b5SChristian König int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, 5404d5230b5SChristian König struct dma_resv *resv, 5414d5230b5SChristian König enum dma_resv_usage usage); 542ebd5f742SDaniel Vetter int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, 543ebd5f742SDaniel Vetter struct drm_gem_object *obj, 544ebd5f742SDaniel Vetter bool write); 545ebd5f742SDaniel Vetter 546ebd5f742SDaniel Vetter 547b37aced3SNirmoy Das void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, 548b37aced3SNirmoy Das struct drm_gpu_scheduler **sched_list, 549b37aced3SNirmoy Das unsigned int num_sched_list); 550b37aced3SNirmoy Das 55126efecf9SSharat Masetty void drm_sched_job_cleanup(struct drm_sched_job *job); 552620e762fSChristian König void drm_sched_wakeup(struct drm_gpu_scheduler *sched); 5535918045cSChristian König void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); 554222b5f04SAndrey Grodzovsky void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); 555222b5f04SAndrey Grodzovsky void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); 556222b5f04SAndrey Grodzovsky void drm_sched_increase_karma(struct drm_sched_job *bad); 557e6c6338fSJack Zhang void drm_sched_reset_karma(struct drm_sched_job *bad); 558e6c6338fSJack Zhang void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type); 559620e762fSChristian König bool drm_sched_dependency_optimized(struct dma_fence* fence, 560620e762fSChristian König struct drm_sched_entity *entity); 5618fe159b0SChristian König void drm_sched_fault(struct drm_gpu_scheduler *sched); 562620e762fSChristian König 563620e762fSChristian König void drm_sched_rq_add_entity(struct drm_sched_rq *rq, 564620e762fSChristian König struct drm_sched_entity *entity); 565620e762fSChristian König void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, 566620e762fSChristian König struct drm_sched_entity *entity); 5671b1f42d8SLucas Stach 56808fb97deSAndrey Grodzovsky void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts); 56908fb97deSAndrey Grodzovsky 570aa16b6c6SNayan Deshmukh int drm_sched_entity_init(struct drm_sched_entity *entity, 571b3ac1766SNirmoy Das enum drm_sched_priority priority, 572b3ac1766SNirmoy Das struct drm_gpu_scheduler **sched_list, 5739e3e90c5SNirmoy Das unsigned int num_sched_list, 5748344c53fSNayan Deshmukh atomic_t *guilty); 575cdc50176SNayan Deshmukh long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); 576cdc50176SNayan Deshmukh void drm_sched_entity_fini(struct drm_sched_entity *entity); 577cdc50176SNayan Deshmukh void drm_sched_entity_destroy(struct drm_sched_entity *entity); 578620e762fSChristian König void drm_sched_entity_select_rq(struct drm_sched_entity *entity); 579620e762fSChristian König struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); 5800e10e9a1SDaniel Vetter void drm_sched_entity_push_job(struct drm_sched_job *sched_job); 5817febe4bfSChristian König void drm_sched_entity_set_priority(struct drm_sched_entity *entity, 5827febe4bfSChristian König enum drm_sched_priority priority); 583620e762fSChristian König bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); 584620e762fSChristian König 585dbe48d03SDaniel Vetter struct drm_sched_fence *drm_sched_fence_alloc( 5861b1f42d8SLucas Stach struct drm_sched_entity *s_entity, void *owner); 587dbe48d03SDaniel Vetter void drm_sched_fence_init(struct drm_sched_fence *fence, 588dbe48d03SDaniel Vetter struct drm_sched_entity *entity); 589d4c16733SBoris Brezillon void drm_sched_fence_free(struct drm_sched_fence *fence); 590dbe48d03SDaniel Vetter 591*db8b4968SBoris Brezillon void drm_sched_fence_scheduled(struct drm_sched_fence *fence, 592*db8b4968SBoris Brezillon struct dma_fence *parent); 5931b1f42d8SLucas Stach void drm_sched_fence_finished(struct drm_sched_fence *fence); 5941b1f42d8SLucas Stach 5951db8c142SSharat Masetty unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); 5961db8c142SSharat Masetty void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, 5971db8c142SSharat Masetty unsigned long remaining); 598ec2edcc2SNirmoy Das struct drm_gpu_scheduler * 599ec2edcc2SNirmoy Das drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, 600ec2edcc2SNirmoy Das unsigned int num_sched_list); 6011db8c142SSharat Masetty 6021b1f42d8SLucas Stach #endif 603