1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #ifndef _DRM_GPU_SCHEDULER_H_ 25 #define _DRM_GPU_SCHEDULER_H_ 26 27 #include <drm/spsc_queue.h> 28 #include <linux/dma-fence.h> 29 #include <linux/completion.h> 30 #include <linux/xarray.h> 31 #include <linux/workqueue.h> 32 33 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) 34 35 /** 36 * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining 37 * 38 * Setting this flag on a scheduler fence prevents pipelining of jobs depending 39 * on this fence. In other words we always insert a full CPU round trip before 40 * dependen jobs are pushed to the hw queue. 41 */ 42 #define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS 43 44 enum dma_resv_usage; 45 struct dma_resv; 46 struct drm_gem_object; 47 48 struct drm_gpu_scheduler; 49 struct drm_sched_rq; 50 51 struct drm_file; 52 53 /* These are often used as an (initial) index 54 * to an array, and as such should start at 0. 55 */ 56 enum drm_sched_priority { 57 DRM_SCHED_PRIORITY_MIN, 58 DRM_SCHED_PRIORITY_NORMAL, 59 DRM_SCHED_PRIORITY_HIGH, 60 DRM_SCHED_PRIORITY_KERNEL, 61 62 DRM_SCHED_PRIORITY_COUNT, 63 DRM_SCHED_PRIORITY_UNSET = -2 64 }; 65 66 /* Used to chose between FIFO and RR jobs scheduling */ 67 extern int drm_sched_policy; 68 69 #define DRM_SCHED_POLICY_RR 0 70 #define DRM_SCHED_POLICY_FIFO 1 71 72 /** 73 * struct drm_sched_entity - A wrapper around a job queue (typically 74 * attached to the DRM file_priv). 75 * 76 * Entities will emit jobs in order to their corresponding hardware 77 * ring, and the scheduler will alternate between entities based on 78 * scheduling policy. 79 */ 80 struct drm_sched_entity { 81 /** 82 * @list: 83 * 84 * Used to append this struct to the list of entities in the runqueue 85 * @rq under &drm_sched_rq.entities. 86 * 87 * Protected by &drm_sched_rq.lock of @rq. 88 */ 89 struct list_head list; 90 91 /** 92 * @rq: 93 * 94 * Runqueue on which this entity is currently scheduled. 95 * 96 * FIXME: Locking is very unclear for this. Writers are protected by 97 * @rq_lock, but readers are generally lockless and seem to just race 98 * with not even a READ_ONCE. 99 */ 100 struct drm_sched_rq *rq; 101 102 /** 103 * @sched_list: 104 * 105 * A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can 106 * be scheduled on any scheduler on this list. 107 * 108 * This can be modified by calling drm_sched_entity_modify_sched(). 109 * Locking is entirely up to the driver, see the above function for more 110 * details. 111 * 112 * This will be set to NULL if &num_sched_list equals 1 and @rq has been 113 * set already. 114 * 115 * FIXME: This means priority changes through 116 * drm_sched_entity_set_priority() will be lost henceforth in this case. 117 */ 118 struct drm_gpu_scheduler **sched_list; 119 120 /** 121 * @num_sched_list: 122 * 123 * Number of drm_gpu_schedulers in the @sched_list. 124 */ 125 unsigned int num_sched_list; 126 127 /** 128 * @priority: 129 * 130 * Priority of the entity. This can be modified by calling 131 * drm_sched_entity_set_priority(). Protected by &rq_lock. 132 */ 133 enum drm_sched_priority priority; 134 135 /** 136 * @rq_lock: 137 * 138 * Lock to modify the runqueue to which this entity belongs. 139 */ 140 spinlock_t rq_lock; 141 142 /** 143 * @job_queue: the list of jobs of this entity. 144 */ 145 struct spsc_queue job_queue; 146 147 /** 148 * @fence_seq: 149 * 150 * A linearly increasing seqno incremented with each new 151 * &drm_sched_fence which is part of the entity. 152 * 153 * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking, 154 * this doesn't need to be atomic. 155 */ 156 atomic_t fence_seq; 157 158 /** 159 * @fence_context: 160 * 161 * A unique context for all the fences which belong to this entity. The 162 * &drm_sched_fence.scheduled uses the fence_context but 163 * &drm_sched_fence.finished uses fence_context + 1. 164 */ 165 uint64_t fence_context; 166 167 /** 168 * @dependency: 169 * 170 * The dependency fence of the job which is on the top of the job queue. 171 */ 172 struct dma_fence *dependency; 173 174 /** 175 * @cb: 176 * 177 * Callback for the dependency fence above. 178 */ 179 struct dma_fence_cb cb; 180 181 /** 182 * @guilty: 183 * 184 * Points to entities' guilty. 185 */ 186 atomic_t *guilty; 187 188 /** 189 * @last_scheduled: 190 * 191 * Points to the finished fence of the last scheduled job. Only written 192 * by the scheduler thread, can be accessed locklessly from 193 * drm_sched_job_arm() iff the queue is empty. 194 */ 195 struct dma_fence *last_scheduled; 196 197 /** 198 * @last_user: last group leader pushing a job into the entity. 199 */ 200 struct task_struct *last_user; 201 202 /** 203 * @stopped: 204 * 205 * Marks the enity as removed from rq and destined for 206 * termination. This is set by calling drm_sched_entity_flush() and by 207 * drm_sched_fini(). 208 */ 209 bool stopped; 210 211 /** 212 * @entity_idle: 213 * 214 * Signals when entity is not in use, used to sequence entity cleanup in 215 * drm_sched_entity_fini(). 216 */ 217 struct completion entity_idle; 218 219 /** 220 * @oldest_job_waiting: 221 * 222 * Marks earliest job waiting in SW queue 223 */ 224 ktime_t oldest_job_waiting; 225 226 /** 227 * @rb_tree_node: 228 * 229 * The node used to insert this entity into time based priority queue 230 */ 231 struct rb_node rb_tree_node; 232 233 /** 234 * @elapsed_ns: 235 * 236 * Records the amount of time where jobs from this entity were active 237 * on the GPU. 238 */ 239 uint64_t elapsed_ns; 240 }; 241 242 /** 243 * struct drm_sched_rq - queue of entities to be scheduled. 244 * 245 * @lock: to modify the entities list. 246 * @sched: the scheduler to which this rq belongs to. 247 * @entities: list of the entities to be scheduled. 248 * @current_entity: the entity which is to be scheduled. 249 * @rb_tree_root: root of time based priory queue of entities for FIFO scheduling 250 * 251 * Run queue is a set of entities scheduling command submissions for 252 * one specific ring. It implements the scheduling policy that selects 253 * the next entity to emit commands from. 254 */ 255 struct drm_sched_rq { 256 spinlock_t lock; 257 struct drm_gpu_scheduler *sched; 258 struct list_head entities; 259 struct drm_sched_entity *current_entity; 260 struct rb_root_cached rb_tree_root; 261 }; 262 263 /** 264 * struct drm_sched_fence - fences corresponding to the scheduling of a job. 265 */ 266 struct drm_sched_fence { 267 /** 268 * @scheduled: this fence is what will be signaled by the scheduler 269 * when the job is scheduled. 270 */ 271 struct dma_fence scheduled; 272 273 /** 274 * @finished: this fence is what will be signaled by the scheduler 275 * when the job is completed. 276 * 277 * When setting up an out fence for the job, you should use 278 * this, since it's available immediately upon 279 * drm_sched_job_init(), and the fence returned by the driver 280 * from run_job() won't be created until the dependencies have 281 * resolved. 282 */ 283 struct dma_fence finished; 284 285 /** 286 * @parent: the fence returned by &drm_sched_backend_ops.run_job 287 * when scheduling the job on hardware. We signal the 288 * &drm_sched_fence.finished fence once parent is signalled. 289 */ 290 struct dma_fence *parent; 291 /** 292 * @sched: the scheduler instance to which the job having this struct 293 * belongs to. 294 */ 295 struct drm_gpu_scheduler *sched; 296 /** 297 * @lock: the lock used by the scheduled and the finished fences. 298 */ 299 spinlock_t lock; 300 /** 301 * @owner: job owner for debugging 302 */ 303 void *owner; 304 }; 305 306 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); 307 308 /** 309 * struct drm_sched_job - A job to be run by an entity. 310 * 311 * @queue_node: used to append this struct to the queue of jobs in an entity. 312 * @list: a job participates in a "pending" and "done" lists. 313 * @sched: the scheduler instance on which this job is scheduled. 314 * @s_fence: contains the fences for the scheduling of job. 315 * @finish_cb: the callback for the finished fence. 316 * @work: Helper to reschdeule job kill to different context. 317 * @id: a unique id assigned to each job scheduled on the scheduler. 318 * @karma: increment on every hang caused by this job. If this exceeds the hang 319 * limit of the scheduler then the job is marked guilty and will not 320 * be scheduled further. 321 * @s_priority: the priority of the job. 322 * @entity: the entity to which this job belongs. 323 * @cb: the callback for the parent fence in s_fence. 324 * 325 * A job is created by the driver using drm_sched_job_init(), and 326 * should call drm_sched_entity_push_job() once it wants the scheduler 327 * to schedule the job. 328 */ 329 struct drm_sched_job { 330 struct spsc_node queue_node; 331 struct list_head list; 332 struct drm_gpu_scheduler *sched; 333 struct drm_sched_fence *s_fence; 334 335 /* 336 * work is used only after finish_cb has been used and will not be 337 * accessed anymore. 338 */ 339 union { 340 struct dma_fence_cb finish_cb; 341 struct work_struct work; 342 }; 343 344 uint64_t id; 345 atomic_t karma; 346 enum drm_sched_priority s_priority; 347 struct drm_sched_entity *entity; 348 struct dma_fence_cb cb; 349 /** 350 * @dependencies: 351 * 352 * Contains the dependencies as struct dma_fence for this job, see 353 * drm_sched_job_add_dependency() and 354 * drm_sched_job_add_implicit_dependencies(). 355 */ 356 struct xarray dependencies; 357 358 /** @last_dependency: tracks @dependencies as they signal */ 359 unsigned long last_dependency; 360 361 /** 362 * @submit_ts: 363 * 364 * When the job was pushed into the entity queue. 365 */ 366 ktime_t submit_ts; 367 }; 368 369 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, 370 int threshold) 371 { 372 return s_job && atomic_inc_return(&s_job->karma) > threshold; 373 } 374 375 enum drm_gpu_sched_stat { 376 DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */ 377 DRM_GPU_SCHED_STAT_NOMINAL, 378 DRM_GPU_SCHED_STAT_ENODEV, 379 }; 380 381 /** 382 * struct drm_sched_backend_ops - Define the backend operations 383 * called by the scheduler 384 * 385 * These functions should be implemented in the driver side. 386 */ 387 struct drm_sched_backend_ops { 388 /** 389 * @prepare_job: 390 * 391 * Called when the scheduler is considering scheduling this job next, to 392 * get another struct dma_fence for this job to block on. Once it 393 * returns NULL, run_job() may be called. 394 * 395 * Can be NULL if no additional preparation to the dependencies are 396 * necessary. Skipped when jobs are killed instead of run. 397 */ 398 struct dma_fence *(*prepare_job)(struct drm_sched_job *sched_job, 399 struct drm_sched_entity *s_entity); 400 401 /** 402 * @run_job: Called to execute the job once all of the dependencies 403 * have been resolved. This may be called multiple times, if 404 * timedout_job() has happened and drm_sched_job_recovery() 405 * decides to try it again. 406 */ 407 struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); 408 409 /** 410 * @timedout_job: Called when a job has taken too long to execute, 411 * to trigger GPU recovery. 412 * 413 * This method is called in a workqueue context. 414 * 415 * Drivers typically issue a reset to recover from GPU hangs, and this 416 * procedure usually follows the following workflow: 417 * 418 * 1. Stop the scheduler using drm_sched_stop(). This will park the 419 * scheduler thread and cancel the timeout work, guaranteeing that 420 * nothing is queued while we reset the hardware queue 421 * 2. Try to gracefully stop non-faulty jobs (optional) 422 * 3. Issue a GPU reset (driver-specific) 423 * 4. Re-submit jobs using drm_sched_resubmit_jobs() 424 * 5. Restart the scheduler using drm_sched_start(). At that point, new 425 * jobs can be queued, and the scheduler thread is unblocked 426 * 427 * Note that some GPUs have distinct hardware queues but need to reset 428 * the GPU globally, which requires extra synchronization between the 429 * timeout handler of the different &drm_gpu_scheduler. One way to 430 * achieve this synchronization is to create an ordered workqueue 431 * (using alloc_ordered_workqueue()) at the driver level, and pass this 432 * queue to drm_sched_init(), to guarantee that timeout handlers are 433 * executed sequentially. The above workflow needs to be slightly 434 * adjusted in that case: 435 * 436 * 1. Stop all schedulers impacted by the reset using drm_sched_stop() 437 * 2. Try to gracefully stop non-faulty jobs on all queues impacted by 438 * the reset (optional) 439 * 3. Issue a GPU reset on all faulty queues (driver-specific) 440 * 4. Re-submit jobs on all schedulers impacted by the reset using 441 * drm_sched_resubmit_jobs() 442 * 5. Restart all schedulers that were stopped in step #1 using 443 * drm_sched_start() 444 * 445 * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal, 446 * and the underlying driver has started or completed recovery. 447 * 448 * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer 449 * available, i.e. has been unplugged. 450 */ 451 enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job); 452 453 /** 454 * @free_job: Called once the job's finished fence has been signaled 455 * and it's time to clean it up. 456 */ 457 void (*free_job)(struct drm_sched_job *sched_job); 458 }; 459 460 /** 461 * struct drm_gpu_scheduler - scheduler instance-specific data 462 * 463 * @ops: backend operations provided by the driver. 464 * @hw_submission_limit: the max size of the hardware queue. 465 * @timeout: the time after which a job is removed from the scheduler. 466 * @name: name of the ring for which this scheduler is being used. 467 * @sched_rq: priority wise array of run queues. 468 * @wake_up_worker: the wait queue on which the scheduler sleeps until a job 469 * is ready to be scheduled. 470 * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler 471 * waits on this wait queue until all the scheduled jobs are 472 * finished. 473 * @hw_rq_count: the number of jobs currently in the hardware queue. 474 * @job_id_count: used to assign unique id to the each job. 475 * @timeout_wq: workqueue used to queue @work_tdr 476 * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the 477 * timeout interval is over. 478 * @thread: the kthread on which the scheduler which run. 479 * @pending_list: the list of jobs which are currently in the job queue. 480 * @job_list_lock: lock to protect the pending_list. 481 * @hang_limit: once the hangs by a job crosses this limit then it is marked 482 * guilty and it will no longer be considered for scheduling. 483 * @score: score to help loadbalancer pick a idle sched 484 * @_score: score used when the driver doesn't provide one 485 * @ready: marks if the underlying HW is ready to work 486 * @free_guilty: A hit to time out handler to free the guilty job. 487 * @dev: system &struct device 488 * 489 * One scheduler is implemented for each hardware ring. 490 */ 491 struct drm_gpu_scheduler { 492 const struct drm_sched_backend_ops *ops; 493 uint32_t hw_submission_limit; 494 long timeout; 495 const char *name; 496 struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT]; 497 wait_queue_head_t wake_up_worker; 498 wait_queue_head_t job_scheduled; 499 atomic_t hw_rq_count; 500 atomic64_t job_id_count; 501 struct workqueue_struct *timeout_wq; 502 struct delayed_work work_tdr; 503 struct task_struct *thread; 504 struct list_head pending_list; 505 spinlock_t job_list_lock; 506 int hang_limit; 507 atomic_t *score; 508 atomic_t _score; 509 bool ready; 510 bool free_guilty; 511 struct device *dev; 512 }; 513 514 int drm_sched_init(struct drm_gpu_scheduler *sched, 515 const struct drm_sched_backend_ops *ops, 516 uint32_t hw_submission, unsigned hang_limit, 517 long timeout, struct workqueue_struct *timeout_wq, 518 atomic_t *score, const char *name, struct device *dev); 519 520 void drm_sched_fini(struct drm_gpu_scheduler *sched); 521 int drm_sched_job_init(struct drm_sched_job *job, 522 struct drm_sched_entity *entity, 523 void *owner); 524 void drm_sched_job_arm(struct drm_sched_job *job); 525 int drm_sched_job_add_dependency(struct drm_sched_job *job, 526 struct dma_fence *fence); 527 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job, 528 struct drm_file *file, 529 u32 handle, 530 u32 point); 531 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, 532 struct dma_resv *resv, 533 enum dma_resv_usage usage); 534 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, 535 struct drm_gem_object *obj, 536 bool write); 537 538 539 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, 540 struct drm_gpu_scheduler **sched_list, 541 unsigned int num_sched_list); 542 543 void drm_sched_job_cleanup(struct drm_sched_job *job); 544 void drm_sched_wakeup(struct drm_gpu_scheduler *sched); 545 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); 546 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); 547 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); 548 void drm_sched_increase_karma(struct drm_sched_job *bad); 549 void drm_sched_reset_karma(struct drm_sched_job *bad); 550 void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type); 551 bool drm_sched_dependency_optimized(struct dma_fence* fence, 552 struct drm_sched_entity *entity); 553 void drm_sched_fault(struct drm_gpu_scheduler *sched); 554 555 void drm_sched_rq_add_entity(struct drm_sched_rq *rq, 556 struct drm_sched_entity *entity); 557 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, 558 struct drm_sched_entity *entity); 559 560 void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts); 561 562 int drm_sched_entity_init(struct drm_sched_entity *entity, 563 enum drm_sched_priority priority, 564 struct drm_gpu_scheduler **sched_list, 565 unsigned int num_sched_list, 566 atomic_t *guilty); 567 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); 568 void drm_sched_entity_fini(struct drm_sched_entity *entity); 569 void drm_sched_entity_destroy(struct drm_sched_entity *entity); 570 void drm_sched_entity_select_rq(struct drm_sched_entity *entity); 571 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); 572 void drm_sched_entity_push_job(struct drm_sched_job *sched_job); 573 void drm_sched_entity_set_priority(struct drm_sched_entity *entity, 574 enum drm_sched_priority priority); 575 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); 576 577 struct drm_sched_fence *drm_sched_fence_alloc( 578 struct drm_sched_entity *s_entity, void *owner); 579 void drm_sched_fence_init(struct drm_sched_fence *fence, 580 struct drm_sched_entity *entity); 581 void drm_sched_fence_free(struct drm_sched_fence *fence); 582 583 void drm_sched_fence_scheduled(struct drm_sched_fence *fence); 584 void drm_sched_fence_finished(struct drm_sched_fence *fence); 585 586 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); 587 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, 588 unsigned long remaining); 589 struct drm_gpu_scheduler * 590 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, 591 unsigned int num_sched_list); 592 593 #endif 594