1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #ifndef _DRM_GPU_SCHEDULER_H_ 25 #define _DRM_GPU_SCHEDULER_H_ 26 27 #include <drm/spsc_queue.h> 28 #include <linux/dma-fence.h> 29 #include <linux/completion.h> 30 #include <linux/xarray.h> 31 32 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) 33 34 struct drm_gem_object; 35 36 struct drm_gpu_scheduler; 37 struct drm_sched_rq; 38 39 /* These are often used as an (initial) index 40 * to an array, and as such should start at 0. 41 */ 42 enum drm_sched_priority { 43 DRM_SCHED_PRIORITY_MIN, 44 DRM_SCHED_PRIORITY_NORMAL, 45 DRM_SCHED_PRIORITY_HIGH, 46 DRM_SCHED_PRIORITY_KERNEL, 47 48 DRM_SCHED_PRIORITY_COUNT, 49 DRM_SCHED_PRIORITY_UNSET = -2 50 }; 51 52 /** 53 * struct drm_sched_entity - A wrapper around a job queue (typically 54 * attached to the DRM file_priv). 55 * 56 * @list: used to append this struct to the list of entities in the 57 * runqueue. 58 * @rq: runqueue on which this entity is currently scheduled. 59 * @sched_list: A list of schedulers (drm_gpu_schedulers). 60 * Jobs from this entity can be scheduled on any scheduler 61 * on this list. 62 * @num_sched_list: number of drm_gpu_schedulers in the sched_list. 63 * @priority: priority of the entity 64 * @rq_lock: lock to modify the runqueue to which this entity belongs. 65 * @job_queue: the list of jobs of this entity. 66 * @fence_seq: a linearly increasing seqno incremented with each 67 * new &drm_sched_fence which is part of the entity. 68 * @fence_context: a unique context for all the fences which belong 69 * to this entity. 70 * The &drm_sched_fence.scheduled uses the 71 * fence_context but &drm_sched_fence.finished uses 72 * fence_context + 1. 73 * @dependency: the dependency fence of the job which is on the top 74 * of the job queue. 75 * @cb: callback for the dependency fence above. 76 * @guilty: points to ctx's guilty. 77 * @fini_status: contains the exit status in case the process was signalled. 78 * @last_scheduled: points to the finished fence of the last scheduled job. 79 * @last_user: last group leader pushing a job into the entity. 80 * @stopped: Marks the enity as removed from rq and destined for termination. 81 * @entity_idle: Signals when enityt is not in use 82 * 83 * Entities will emit jobs in order to their corresponding hardware 84 * ring, and the scheduler will alternate between entities based on 85 * scheduling policy. 86 */ 87 struct drm_sched_entity { 88 struct list_head list; 89 struct drm_sched_rq *rq; 90 struct drm_gpu_scheduler **sched_list; 91 unsigned int num_sched_list; 92 enum drm_sched_priority priority; 93 spinlock_t rq_lock; 94 95 struct spsc_queue job_queue; 96 97 atomic_t fence_seq; 98 uint64_t fence_context; 99 100 struct dma_fence *dependency; 101 struct dma_fence_cb cb; 102 atomic_t *guilty; 103 struct dma_fence *last_scheduled; 104 struct task_struct *last_user; 105 bool stopped; 106 struct completion entity_idle; 107 }; 108 109 /** 110 * struct drm_sched_rq - queue of entities to be scheduled. 111 * 112 * @lock: to modify the entities list. 113 * @sched: the scheduler to which this rq belongs to. 114 * @entities: list of the entities to be scheduled. 115 * @current_entity: the entity which is to be scheduled. 116 * 117 * Run queue is a set of entities scheduling command submissions for 118 * one specific ring. It implements the scheduling policy that selects 119 * the next entity to emit commands from. 120 */ 121 struct drm_sched_rq { 122 spinlock_t lock; 123 struct drm_gpu_scheduler *sched; 124 struct list_head entities; 125 struct drm_sched_entity *current_entity; 126 }; 127 128 /** 129 * struct drm_sched_fence - fences corresponding to the scheduling of a job. 130 */ 131 struct drm_sched_fence { 132 /** 133 * @scheduled: this fence is what will be signaled by the scheduler 134 * when the job is scheduled. 135 */ 136 struct dma_fence scheduled; 137 138 /** 139 * @finished: this fence is what will be signaled by the scheduler 140 * when the job is completed. 141 * 142 * When setting up an out fence for the job, you should use 143 * this, since it's available immediately upon 144 * drm_sched_job_init(), and the fence returned by the driver 145 * from run_job() won't be created until the dependencies have 146 * resolved. 147 */ 148 struct dma_fence finished; 149 150 /** 151 * @parent: the fence returned by &drm_sched_backend_ops.run_job 152 * when scheduling the job on hardware. We signal the 153 * &drm_sched_fence.finished fence once parent is signalled. 154 */ 155 struct dma_fence *parent; 156 /** 157 * @sched: the scheduler instance to which the job having this struct 158 * belongs to. 159 */ 160 struct drm_gpu_scheduler *sched; 161 /** 162 * @lock: the lock used by the scheduled and the finished fences. 163 */ 164 spinlock_t lock; 165 /** 166 * @owner: job owner for debugging 167 */ 168 void *owner; 169 }; 170 171 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); 172 173 /** 174 * struct drm_sched_job - A job to be run by an entity. 175 * 176 * @queue_node: used to append this struct to the queue of jobs in an entity. 177 * @list: a job participates in a "pending" and "done" lists. 178 * @sched: the scheduler instance on which this job is scheduled. 179 * @s_fence: contains the fences for the scheduling of job. 180 * @finish_cb: the callback for the finished fence. 181 * @id: a unique id assigned to each job scheduled on the scheduler. 182 * @karma: increment on every hang caused by this job. If this exceeds the hang 183 * limit of the scheduler then the job is marked guilty and will not 184 * be scheduled further. 185 * @s_priority: the priority of the job. 186 * @entity: the entity to which this job belongs. 187 * @cb: the callback for the parent fence in s_fence. 188 * 189 * A job is created by the driver using drm_sched_job_init(), and 190 * should call drm_sched_entity_push_job() once it wants the scheduler 191 * to schedule the job. 192 */ 193 struct drm_sched_job { 194 struct spsc_node queue_node; 195 struct list_head list; 196 struct drm_gpu_scheduler *sched; 197 struct drm_sched_fence *s_fence; 198 struct dma_fence_cb finish_cb; 199 uint64_t id; 200 atomic_t karma; 201 enum drm_sched_priority s_priority; 202 struct drm_sched_entity *entity; 203 struct dma_fence_cb cb; 204 /** 205 * @dependencies: 206 * 207 * Contains the dependencies as struct dma_fence for this job, see 208 * drm_sched_job_add_dependency() and 209 * drm_sched_job_add_implicit_dependencies(). 210 */ 211 struct xarray dependencies; 212 213 /** @last_dependency: tracks @dependencies as they signal */ 214 unsigned long last_dependency; 215 }; 216 217 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, 218 int threshold) 219 { 220 return s_job && atomic_inc_return(&s_job->karma) > threshold; 221 } 222 223 enum drm_gpu_sched_stat { 224 DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */ 225 DRM_GPU_SCHED_STAT_NOMINAL, 226 DRM_GPU_SCHED_STAT_ENODEV, 227 }; 228 229 /** 230 * struct drm_sched_backend_ops 231 * 232 * Define the backend operations called by the scheduler, 233 * these functions should be implemented in driver side. 234 */ 235 struct drm_sched_backend_ops { 236 /** 237 * @dependency: 238 * 239 * Called when the scheduler is considering scheduling this job next, to 240 * get another struct dma_fence for this job to block on. Once it 241 * returns NULL, run_job() may be called. 242 * 243 * If a driver exclusively uses drm_sched_job_add_dependency() and 244 * drm_sched_job_add_implicit_dependencies() this can be ommitted and 245 * left as NULL. 246 */ 247 struct dma_fence *(*dependency)(struct drm_sched_job *sched_job, 248 struct drm_sched_entity *s_entity); 249 250 /** 251 * @run_job: Called to execute the job once all of the dependencies 252 * have been resolved. This may be called multiple times, if 253 * timedout_job() has happened and drm_sched_job_recovery() 254 * decides to try it again. 255 */ 256 struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); 257 258 /** 259 * @timedout_job: Called when a job has taken too long to execute, 260 * to trigger GPU recovery. 261 * 262 * This method is called in a workqueue context. 263 * 264 * Drivers typically issue a reset to recover from GPU hangs, and this 265 * procedure usually follows the following workflow: 266 * 267 * 1. Stop the scheduler using drm_sched_stop(). This will park the 268 * scheduler thread and cancel the timeout work, guaranteeing that 269 * nothing is queued while we reset the hardware queue 270 * 2. Try to gracefully stop non-faulty jobs (optional) 271 * 3. Issue a GPU reset (driver-specific) 272 * 4. Re-submit jobs using drm_sched_resubmit_jobs() 273 * 5. Restart the scheduler using drm_sched_start(). At that point, new 274 * jobs can be queued, and the scheduler thread is unblocked 275 * 276 * Note that some GPUs have distinct hardware queues but need to reset 277 * the GPU globally, which requires extra synchronization between the 278 * timeout handler of the different &drm_gpu_scheduler. One way to 279 * achieve this synchronization is to create an ordered workqueue 280 * (using alloc_ordered_workqueue()) at the driver level, and pass this 281 * queue to drm_sched_init(), to guarantee that timeout handlers are 282 * executed sequentially. The above workflow needs to be slightly 283 * adjusted in that case: 284 * 285 * 1. Stop all schedulers impacted by the reset using drm_sched_stop() 286 * 2. Try to gracefully stop non-faulty jobs on all queues impacted by 287 * the reset (optional) 288 * 3. Issue a GPU reset on all faulty queues (driver-specific) 289 * 4. Re-submit jobs on all schedulers impacted by the reset using 290 * drm_sched_resubmit_jobs() 291 * 5. Restart all schedulers that were stopped in step #1 using 292 * drm_sched_start() 293 * 294 * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal, 295 * and the underlying driver has started or completed recovery. 296 * 297 * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer 298 * available, i.e. has been unplugged. 299 */ 300 enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job); 301 302 /** 303 * @free_job: Called once the job's finished fence has been signaled 304 * and it's time to clean it up. 305 */ 306 void (*free_job)(struct drm_sched_job *sched_job); 307 }; 308 309 /** 310 * struct drm_gpu_scheduler 311 * 312 * @ops: backend operations provided by the driver. 313 * @hw_submission_limit: the max size of the hardware queue. 314 * @timeout: the time after which a job is removed from the scheduler. 315 * @name: name of the ring for which this scheduler is being used. 316 * @sched_rq: priority wise array of run queues. 317 * @wake_up_worker: the wait queue on which the scheduler sleeps until a job 318 * is ready to be scheduled. 319 * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler 320 * waits on this wait queue until all the scheduled jobs are 321 * finished. 322 * @hw_rq_count: the number of jobs currently in the hardware queue. 323 * @job_id_count: used to assign unique id to the each job. 324 * @timeout_wq: workqueue used to queue @work_tdr 325 * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the 326 * timeout interval is over. 327 * @thread: the kthread on which the scheduler which run. 328 * @pending_list: the list of jobs which are currently in the job queue. 329 * @job_list_lock: lock to protect the pending_list. 330 * @hang_limit: once the hangs by a job crosses this limit then it is marked 331 * guilty and it will no longer be considered for scheduling. 332 * @score: score to help loadbalancer pick a idle sched 333 * @_score: score used when the driver doesn't provide one 334 * @ready: marks if the underlying HW is ready to work 335 * @free_guilty: A hit to time out handler to free the guilty job. 336 * 337 * One scheduler is implemented for each hardware ring. 338 */ 339 struct drm_gpu_scheduler { 340 const struct drm_sched_backend_ops *ops; 341 uint32_t hw_submission_limit; 342 long timeout; 343 const char *name; 344 struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT]; 345 wait_queue_head_t wake_up_worker; 346 wait_queue_head_t job_scheduled; 347 atomic_t hw_rq_count; 348 atomic64_t job_id_count; 349 struct workqueue_struct *timeout_wq; 350 struct delayed_work work_tdr; 351 struct task_struct *thread; 352 struct list_head pending_list; 353 spinlock_t job_list_lock; 354 int hang_limit; 355 atomic_t *score; 356 atomic_t _score; 357 bool ready; 358 bool free_guilty; 359 }; 360 361 int drm_sched_init(struct drm_gpu_scheduler *sched, 362 const struct drm_sched_backend_ops *ops, 363 uint32_t hw_submission, unsigned hang_limit, 364 long timeout, struct workqueue_struct *timeout_wq, 365 atomic_t *score, const char *name); 366 367 void drm_sched_fini(struct drm_gpu_scheduler *sched); 368 int drm_sched_job_init(struct drm_sched_job *job, 369 struct drm_sched_entity *entity, 370 void *owner); 371 void drm_sched_job_arm(struct drm_sched_job *job); 372 int drm_sched_job_add_dependency(struct drm_sched_job *job, 373 struct dma_fence *fence); 374 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, 375 struct drm_gem_object *obj, 376 bool write); 377 378 379 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, 380 struct drm_gpu_scheduler **sched_list, 381 unsigned int num_sched_list); 382 383 void drm_sched_job_cleanup(struct drm_sched_job *job); 384 void drm_sched_wakeup(struct drm_gpu_scheduler *sched); 385 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); 386 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); 387 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); 388 void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max); 389 void drm_sched_increase_karma(struct drm_sched_job *bad); 390 void drm_sched_reset_karma(struct drm_sched_job *bad); 391 void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type); 392 bool drm_sched_dependency_optimized(struct dma_fence* fence, 393 struct drm_sched_entity *entity); 394 void drm_sched_fault(struct drm_gpu_scheduler *sched); 395 void drm_sched_job_kickout(struct drm_sched_job *s_job); 396 397 void drm_sched_rq_add_entity(struct drm_sched_rq *rq, 398 struct drm_sched_entity *entity); 399 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, 400 struct drm_sched_entity *entity); 401 402 int drm_sched_entity_init(struct drm_sched_entity *entity, 403 enum drm_sched_priority priority, 404 struct drm_gpu_scheduler **sched_list, 405 unsigned int num_sched_list, 406 atomic_t *guilty); 407 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); 408 void drm_sched_entity_fini(struct drm_sched_entity *entity); 409 void drm_sched_entity_destroy(struct drm_sched_entity *entity); 410 void drm_sched_entity_select_rq(struct drm_sched_entity *entity); 411 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); 412 void drm_sched_entity_push_job(struct drm_sched_job *sched_job, 413 struct drm_sched_entity *entity); 414 void drm_sched_entity_set_priority(struct drm_sched_entity *entity, 415 enum drm_sched_priority priority); 416 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); 417 418 struct drm_sched_fence *drm_sched_fence_alloc( 419 struct drm_sched_entity *s_entity, void *owner); 420 void drm_sched_fence_init(struct drm_sched_fence *fence, 421 struct drm_sched_entity *entity); 422 void drm_sched_fence_free(struct rcu_head *rcu); 423 424 void drm_sched_fence_scheduled(struct drm_sched_fence *fence); 425 void drm_sched_fence_finished(struct drm_sched_fence *fence); 426 427 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); 428 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, 429 unsigned long remaining); 430 struct drm_gpu_scheduler * 431 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, 432 unsigned int num_sched_list); 433 434 #endif 435