1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #ifndef _DRM_GPU_SCHEDULER_H_ 25 #define _DRM_GPU_SCHEDULER_H_ 26 27 #include <drm/spsc_queue.h> 28 #include <linux/dma-fence.h> 29 #include <linux/completion.h> 30 31 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) 32 33 struct drm_gpu_scheduler; 34 struct drm_sched_rq; 35 36 enum drm_sched_priority { 37 DRM_SCHED_PRIORITY_MIN, 38 DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN, 39 DRM_SCHED_PRIORITY_NORMAL, 40 DRM_SCHED_PRIORITY_HIGH_SW, 41 DRM_SCHED_PRIORITY_HIGH_HW, 42 DRM_SCHED_PRIORITY_KERNEL, 43 DRM_SCHED_PRIORITY_MAX, 44 DRM_SCHED_PRIORITY_INVALID = -1, 45 DRM_SCHED_PRIORITY_UNSET = -2 46 }; 47 48 /** 49 * struct drm_sched_entity - A wrapper around a job queue (typically 50 * attached to the DRM file_priv). 51 * 52 * @list: used to append this struct to the list of entities in the 53 * runqueue. 54 * @rq: runqueue on which this entity is currently scheduled. 55 * @sched_list: A list of schedulers (drm_gpu_schedulers). 56 * Jobs from this entity can be scheduled on any scheduler 57 * on this list. 58 * @num_sched_list: number of drm_gpu_schedulers in the sched_list. 59 * @priority: priority of the entity 60 * @rq_lock: lock to modify the runqueue to which this entity belongs. 61 * @job_queue: the list of jobs of this entity. 62 * @fence_seq: a linearly increasing seqno incremented with each 63 * new &drm_sched_fence which is part of the entity. 64 * @fence_context: a unique context for all the fences which belong 65 * to this entity. 66 * The &drm_sched_fence.scheduled uses the 67 * fence_context but &drm_sched_fence.finished uses 68 * fence_context + 1. 69 * @dependency: the dependency fence of the job which is on the top 70 * of the job queue. 71 * @cb: callback for the dependency fence above. 72 * @guilty: points to ctx's guilty. 73 * @fini_status: contains the exit status in case the process was signalled. 74 * @last_scheduled: points to the finished fence of the last scheduled job. 75 * @last_user: last group leader pushing a job into the entity. 76 * @stopped: Marks the enity as removed from rq and destined for termination. 77 * @entity_idle: Signals when enityt is not in use 78 * 79 * Entities will emit jobs in order to their corresponding hardware 80 * ring, and the scheduler will alternate between entities based on 81 * scheduling policy. 82 */ 83 struct drm_sched_entity { 84 struct list_head list; 85 struct drm_sched_rq *rq; 86 struct drm_gpu_scheduler **sched_list; 87 unsigned int num_sched_list; 88 enum drm_sched_priority priority; 89 spinlock_t rq_lock; 90 91 struct spsc_queue job_queue; 92 93 atomic_t fence_seq; 94 uint64_t fence_context; 95 96 struct dma_fence *dependency; 97 struct dma_fence_cb cb; 98 atomic_t *guilty; 99 struct dma_fence *last_scheduled; 100 struct task_struct *last_user; 101 bool stopped; 102 struct completion entity_idle; 103 }; 104 105 /** 106 * struct drm_sched_rq - queue of entities to be scheduled. 107 * 108 * @lock: to modify the entities list. 109 * @sched: the scheduler to which this rq belongs to. 110 * @entities: list of the entities to be scheduled. 111 * @current_entity: the entity which is to be scheduled. 112 * 113 * Run queue is a set of entities scheduling command submissions for 114 * one specific ring. It implements the scheduling policy that selects 115 * the next entity to emit commands from. 116 */ 117 struct drm_sched_rq { 118 spinlock_t lock; 119 struct drm_gpu_scheduler *sched; 120 struct list_head entities; 121 struct drm_sched_entity *current_entity; 122 }; 123 124 /** 125 * struct drm_sched_fence - fences corresponding to the scheduling of a job. 126 */ 127 struct drm_sched_fence { 128 /** 129 * @scheduled: this fence is what will be signaled by the scheduler 130 * when the job is scheduled. 131 */ 132 struct dma_fence scheduled; 133 134 /** 135 * @finished: this fence is what will be signaled by the scheduler 136 * when the job is completed. 137 * 138 * When setting up an out fence for the job, you should use 139 * this, since it's available immediately upon 140 * drm_sched_job_init(), and the fence returned by the driver 141 * from run_job() won't be created until the dependencies have 142 * resolved. 143 */ 144 struct dma_fence finished; 145 146 /** 147 * @parent: the fence returned by &drm_sched_backend_ops.run_job 148 * when scheduling the job on hardware. We signal the 149 * &drm_sched_fence.finished fence once parent is signalled. 150 */ 151 struct dma_fence *parent; 152 /** 153 * @sched: the scheduler instance to which the job having this struct 154 * belongs to. 155 */ 156 struct drm_gpu_scheduler *sched; 157 /** 158 * @lock: the lock used by the scheduled and the finished fences. 159 */ 160 spinlock_t lock; 161 /** 162 * @owner: job owner for debugging 163 */ 164 void *owner; 165 }; 166 167 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); 168 169 /** 170 * struct drm_sched_job - A job to be run by an entity. 171 * 172 * @queue_node: used to append this struct to the queue of jobs in an entity. 173 * @sched: the scheduler instance on which this job is scheduled. 174 * @s_fence: contains the fences for the scheduling of job. 175 * @finish_cb: the callback for the finished fence. 176 * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list. 177 * @id: a unique id assigned to each job scheduled on the scheduler. 178 * @karma: increment on every hang caused by this job. If this exceeds the hang 179 * limit of the scheduler then the job is marked guilty and will not 180 * be scheduled further. 181 * @s_priority: the priority of the job. 182 * @entity: the entity to which this job belongs. 183 * @cb: the callback for the parent fence in s_fence. 184 * 185 * A job is created by the driver using drm_sched_job_init(), and 186 * should call drm_sched_entity_push_job() once it wants the scheduler 187 * to schedule the job. 188 */ 189 struct drm_sched_job { 190 struct spsc_node queue_node; 191 struct drm_gpu_scheduler *sched; 192 struct drm_sched_fence *s_fence; 193 struct dma_fence_cb finish_cb; 194 struct list_head node; 195 uint64_t id; 196 atomic_t karma; 197 enum drm_sched_priority s_priority; 198 struct drm_sched_entity *entity; 199 struct dma_fence_cb cb; 200 }; 201 202 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, 203 int threshold) 204 { 205 return (s_job && atomic_inc_return(&s_job->karma) > threshold); 206 } 207 208 /** 209 * struct drm_sched_backend_ops 210 * 211 * Define the backend operations called by the scheduler, 212 * these functions should be implemented in driver side. 213 */ 214 struct drm_sched_backend_ops { 215 /** 216 * @dependency: Called when the scheduler is considering scheduling 217 * this job next, to get another struct dma_fence for this job to 218 * block on. Once it returns NULL, run_job() may be called. 219 */ 220 struct dma_fence *(*dependency)(struct drm_sched_job *sched_job, 221 struct drm_sched_entity *s_entity); 222 223 /** 224 * @run_job: Called to execute the job once all of the dependencies 225 * have been resolved. This may be called multiple times, if 226 * timedout_job() has happened and drm_sched_job_recovery() 227 * decides to try it again. 228 */ 229 struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); 230 231 /** 232 * @timedout_job: Called when a job has taken too long to execute, 233 * to trigger GPU recovery. 234 */ 235 void (*timedout_job)(struct drm_sched_job *sched_job); 236 237 /** 238 * @free_job: Called once the job's finished fence has been signaled 239 * and it's time to clean it up. 240 */ 241 void (*free_job)(struct drm_sched_job *sched_job); 242 }; 243 244 /** 245 * struct drm_gpu_scheduler 246 * 247 * @ops: backend operations provided by the driver. 248 * @hw_submission_limit: the max size of the hardware queue. 249 * @timeout: the time after which a job is removed from the scheduler. 250 * @name: name of the ring for which this scheduler is being used. 251 * @sched_rq: priority wise array of run queues. 252 * @wake_up_worker: the wait queue on which the scheduler sleeps until a job 253 * is ready to be scheduled. 254 * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler 255 * waits on this wait queue until all the scheduled jobs are 256 * finished. 257 * @hw_rq_count: the number of jobs currently in the hardware queue. 258 * @job_id_count: used to assign unique id to the each job. 259 * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the 260 * timeout interval is over. 261 * @thread: the kthread on which the scheduler which run. 262 * @ring_mirror_list: the list of jobs which are currently in the job queue. 263 * @job_list_lock: lock to protect the ring_mirror_list. 264 * @hang_limit: once the hangs by a job crosses this limit then it is marked 265 * guilty and it will be considered for scheduling further. 266 * @score: score to help loadbalancer pick a idle sched 267 * @ready: marks if the underlying HW is ready to work 268 * @free_guilty: A hit to time out handler to free the guilty job. 269 * 270 * One scheduler is implemented for each hardware ring. 271 */ 272 struct drm_gpu_scheduler { 273 const struct drm_sched_backend_ops *ops; 274 uint32_t hw_submission_limit; 275 long timeout; 276 const char *name; 277 struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX]; 278 wait_queue_head_t wake_up_worker; 279 wait_queue_head_t job_scheduled; 280 atomic_t hw_rq_count; 281 atomic64_t job_id_count; 282 struct delayed_work work_tdr; 283 struct task_struct *thread; 284 struct list_head ring_mirror_list; 285 spinlock_t job_list_lock; 286 int hang_limit; 287 atomic_t score; 288 bool ready; 289 bool free_guilty; 290 }; 291 292 int drm_sched_init(struct drm_gpu_scheduler *sched, 293 const struct drm_sched_backend_ops *ops, 294 uint32_t hw_submission, unsigned hang_limit, long timeout, 295 const char *name); 296 297 void drm_sched_fini(struct drm_gpu_scheduler *sched); 298 int drm_sched_job_init(struct drm_sched_job *job, 299 struct drm_sched_entity *entity, 300 void *owner); 301 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, 302 struct drm_gpu_scheduler **sched_list, 303 unsigned int num_sched_list); 304 305 void drm_sched_job_cleanup(struct drm_sched_job *job); 306 void drm_sched_wakeup(struct drm_gpu_scheduler *sched); 307 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); 308 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); 309 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); 310 void drm_sched_increase_karma(struct drm_sched_job *bad); 311 bool drm_sched_dependency_optimized(struct dma_fence* fence, 312 struct drm_sched_entity *entity); 313 void drm_sched_fault(struct drm_gpu_scheduler *sched); 314 void drm_sched_job_kickout(struct drm_sched_job *s_job); 315 316 void drm_sched_rq_add_entity(struct drm_sched_rq *rq, 317 struct drm_sched_entity *entity); 318 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, 319 struct drm_sched_entity *entity); 320 321 int drm_sched_entity_init(struct drm_sched_entity *entity, 322 enum drm_sched_priority priority, 323 struct drm_gpu_scheduler **sched_list, 324 unsigned int num_sched_list, 325 atomic_t *guilty); 326 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); 327 void drm_sched_entity_fini(struct drm_sched_entity *entity); 328 void drm_sched_entity_destroy(struct drm_sched_entity *entity); 329 void drm_sched_entity_select_rq(struct drm_sched_entity *entity); 330 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); 331 void drm_sched_entity_push_job(struct drm_sched_job *sched_job, 332 struct drm_sched_entity *entity); 333 void drm_sched_entity_set_priority(struct drm_sched_entity *entity, 334 enum drm_sched_priority priority); 335 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); 336 337 struct drm_sched_fence *drm_sched_fence_create( 338 struct drm_sched_entity *s_entity, void *owner); 339 void drm_sched_fence_scheduled(struct drm_sched_fence *fence); 340 void drm_sched_fence_finished(struct drm_sched_fence *fence); 341 342 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); 343 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, 344 unsigned long remaining); 345 struct drm_gpu_scheduler * 346 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, 347 unsigned int num_sched_list); 348 349 #endif 350