1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/kthread.h> 25 #include <drm/gpu_scheduler.h> 26 27 #include "gpu_scheduler_trace.h" 28 29 #define to_drm_sched_job(sched_job) \ 30 container_of((sched_job), struct drm_sched_job, queue_node) 31 32 /** 33 * drm_sched_entity_init - Init a context entity used by scheduler when 34 * submit to HW ring. 35 * 36 * @entity: scheduler entity to init 37 * @rq_list: the list of run queue on which jobs from this 38 * entity can be submitted 39 * @num_rq_list: number of run queue in rq_list 40 * @guilty: atomic_t set to 1 when a job on this queue 41 * is found to be guilty causing a timeout 42 * 43 * Note: the rq_list should have atleast one element to schedule 44 * the entity 45 * 46 * Returns 0 on success or a negative error code on failure. 47 */ 48 int drm_sched_entity_init(struct drm_sched_entity *entity, 49 struct drm_sched_rq **rq_list, 50 unsigned int num_rq_list, 51 atomic_t *guilty) 52 { 53 int i; 54 55 if (!(entity && rq_list && num_rq_list > 0 && rq_list[0])) 56 return -EINVAL; 57 58 memset(entity, 0, sizeof(struct drm_sched_entity)); 59 INIT_LIST_HEAD(&entity->list); 60 entity->rq = rq_list[0]; 61 entity->guilty = guilty; 62 entity->num_rq_list = num_rq_list; 63 entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *), 64 GFP_KERNEL); 65 if (!entity->rq_list) 66 return -ENOMEM; 67 68 for (i = 0; i < num_rq_list; ++i) 69 entity->rq_list[i] = rq_list[i]; 70 entity->last_scheduled = NULL; 71 72 spin_lock_init(&entity->rq_lock); 73 spsc_queue_init(&entity->job_queue); 74 75 atomic_set(&entity->fence_seq, 0); 76 entity->fence_context = dma_fence_context_alloc(2); 77 78 return 0; 79 } 80 EXPORT_SYMBOL(drm_sched_entity_init); 81 82 /** 83 * drm_sched_entity_is_idle - Check if entity is idle 84 * 85 * @entity: scheduler entity 86 * 87 * Returns true if the entity does not have any unscheduled jobs. 88 */ 89 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) 90 { 91 rmb(); /* for list_empty to work without lock */ 92 93 if (list_empty(&entity->list) || 94 spsc_queue_peek(&entity->job_queue) == NULL) 95 return true; 96 97 return false; 98 } 99 100 /** 101 * drm_sched_entity_is_ready - Check if entity is ready 102 * 103 * @entity: scheduler entity 104 * 105 * Return true if entity could provide a job. 106 */ 107 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) 108 { 109 if (spsc_queue_peek(&entity->job_queue) == NULL) 110 return false; 111 112 if (READ_ONCE(entity->dependency)) 113 return false; 114 115 return true; 116 } 117 118 /** 119 * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load 120 * 121 * @entity: scheduler entity 122 * 123 * Return the pointer to the rq with least load. 124 */ 125 static struct drm_sched_rq * 126 drm_sched_entity_get_free_sched(struct drm_sched_entity *entity) 127 { 128 struct drm_sched_rq *rq = NULL; 129 unsigned int min_jobs = UINT_MAX, num_jobs; 130 int i; 131 132 for (i = 0; i < entity->num_rq_list; ++i) { 133 struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched; 134 135 if (!entity->rq_list[i]->sched->ready) { 136 DRM_WARN("sched%s is not ready, skipping", sched->name); 137 continue; 138 } 139 140 num_jobs = atomic_read(&sched->num_jobs); 141 if (num_jobs < min_jobs) { 142 min_jobs = num_jobs; 143 rq = entity->rq_list[i]; 144 } 145 } 146 147 return rq; 148 } 149 150 /** 151 * drm_sched_entity_flush - Flush a context entity 152 * 153 * @entity: scheduler entity 154 * @timeout: time to wait in for Q to become empty in jiffies. 155 * 156 * Splitting drm_sched_entity_fini() into two functions, The first one does the 157 * waiting, removes the entity from the runqueue and returns an error when the 158 * process was killed. 159 * 160 * Returns the remaining time in jiffies left from the input timeout 161 */ 162 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) 163 { 164 struct drm_gpu_scheduler *sched; 165 struct task_struct *last_user; 166 long ret = timeout; 167 168 sched = entity->rq->sched; 169 /** 170 * The client will not queue more IBs during this fini, consume existing 171 * queued IBs or discard them on SIGKILL 172 */ 173 if (current->flags & PF_EXITING) { 174 if (timeout) 175 ret = wait_event_timeout( 176 sched->job_scheduled, 177 drm_sched_entity_is_idle(entity), 178 timeout); 179 } else { 180 wait_event_killable(sched->job_scheduled, 181 drm_sched_entity_is_idle(entity)); 182 } 183 184 /* For killed process disable any more IBs enqueue right now */ 185 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); 186 if ((!last_user || last_user == current->group_leader) && 187 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) { 188 spin_lock(&entity->rq_lock); 189 entity->stopped = true; 190 drm_sched_rq_remove_entity(entity->rq, entity); 191 spin_unlock(&entity->rq_lock); 192 } 193 194 return ret; 195 } 196 EXPORT_SYMBOL(drm_sched_entity_flush); 197 198 /** 199 * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs 200 * 201 * @f: signaled fence 202 * @cb: our callback structure 203 * 204 * Signal the scheduler finished fence when the entity in question is killed. 205 */ 206 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, 207 struct dma_fence_cb *cb) 208 { 209 struct drm_sched_job *job = container_of(cb, struct drm_sched_job, 210 finish_cb); 211 212 drm_sched_fence_finished(job->s_fence); 213 WARN_ON(job->s_fence->parent); 214 job->sched->ops->free_job(job); 215 } 216 217 /** 218 * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed 219 * 220 * @entity: entity which is cleaned up 221 * 222 * Makes sure that all remaining jobs in an entity are killed before it is 223 * destroyed. 224 */ 225 static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity) 226 { 227 struct drm_sched_job *job; 228 int r; 229 230 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { 231 struct drm_sched_fence *s_fence = job->s_fence; 232 233 drm_sched_fence_scheduled(s_fence); 234 dma_fence_set_error(&s_fence->finished, -ESRCH); 235 236 /* 237 * When pipe is hanged by older entity, new entity might 238 * not even have chance to submit it's first job to HW 239 * and so entity->last_scheduled will remain NULL 240 */ 241 if (!entity->last_scheduled) { 242 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 243 continue; 244 } 245 246 r = dma_fence_add_callback(entity->last_scheduled, 247 &job->finish_cb, 248 drm_sched_entity_kill_jobs_cb); 249 if (r == -ENOENT) 250 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 251 else if (r) 252 DRM_ERROR("fence add callback failed (%d)\n", r); 253 } 254 } 255 256 /** 257 * drm_sched_entity_cleanup - Destroy a context entity 258 * 259 * @entity: scheduler entity 260 * 261 * This should be called after @drm_sched_entity_do_release. It goes over the 262 * entity and signals all jobs with an error code if the process was killed. 263 * 264 */ 265 void drm_sched_entity_fini(struct drm_sched_entity *entity) 266 { 267 struct drm_gpu_scheduler *sched; 268 269 sched = entity->rq->sched; 270 drm_sched_rq_remove_entity(entity->rq, entity); 271 272 /* Consumption of existing IBs wasn't completed. Forcefully 273 * remove them here. 274 */ 275 if (spsc_queue_peek(&entity->job_queue)) { 276 /* Park the kernel for a moment to make sure it isn't processing 277 * our enity. 278 */ 279 kthread_park(sched->thread); 280 kthread_unpark(sched->thread); 281 if (entity->dependency) { 282 dma_fence_remove_callback(entity->dependency, 283 &entity->cb); 284 dma_fence_put(entity->dependency); 285 entity->dependency = NULL; 286 } 287 288 drm_sched_entity_kill_jobs(entity); 289 } 290 291 dma_fence_put(entity->last_scheduled); 292 entity->last_scheduled = NULL; 293 kfree(entity->rq_list); 294 } 295 EXPORT_SYMBOL(drm_sched_entity_fini); 296 297 /** 298 * drm_sched_entity_fini - Destroy a context entity 299 * 300 * @entity: scheduler entity 301 * 302 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() 303 */ 304 void drm_sched_entity_destroy(struct drm_sched_entity *entity) 305 { 306 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); 307 drm_sched_entity_fini(entity); 308 } 309 EXPORT_SYMBOL(drm_sched_entity_destroy); 310 311 /** 312 * drm_sched_entity_clear_dep - callback to clear the entities dependency 313 */ 314 static void drm_sched_entity_clear_dep(struct dma_fence *f, 315 struct dma_fence_cb *cb) 316 { 317 struct drm_sched_entity *entity = 318 container_of(cb, struct drm_sched_entity, cb); 319 320 entity->dependency = NULL; 321 dma_fence_put(f); 322 } 323 324 /** 325 * drm_sched_entity_clear_dep - callback to clear the entities dependency and 326 * wake up scheduler 327 */ 328 static void drm_sched_entity_wakeup(struct dma_fence *f, 329 struct dma_fence_cb *cb) 330 { 331 struct drm_sched_entity *entity = 332 container_of(cb, struct drm_sched_entity, cb); 333 334 drm_sched_entity_clear_dep(f, cb); 335 drm_sched_wakeup(entity->rq->sched); 336 } 337 338 /** 339 * drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority 340 */ 341 static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq, 342 enum drm_sched_priority priority) 343 { 344 *rq = &(*rq)->sched->sched_rq[priority]; 345 } 346 347 /** 348 * drm_sched_entity_set_priority - Sets priority of the entity 349 * 350 * @entity: scheduler entity 351 * @priority: scheduler priority 352 * 353 * Update the priority of runqueus used for the entity. 354 */ 355 void drm_sched_entity_set_priority(struct drm_sched_entity *entity, 356 enum drm_sched_priority priority) 357 { 358 unsigned int i; 359 360 spin_lock(&entity->rq_lock); 361 362 for (i = 0; i < entity->num_rq_list; ++i) 363 drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority); 364 365 drm_sched_rq_remove_entity(entity->rq, entity); 366 drm_sched_entity_set_rq_priority(&entity->rq, priority); 367 drm_sched_rq_add_entity(entity->rq, entity); 368 369 spin_unlock(&entity->rq_lock); 370 } 371 EXPORT_SYMBOL(drm_sched_entity_set_priority); 372 373 /** 374 * drm_sched_entity_add_dependency_cb - add callback for the entities dependency 375 * 376 * @entity: entity with dependency 377 * 378 * Add a callback to the current dependency of the entity to wake up the 379 * scheduler when the entity becomes available. 380 */ 381 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) 382 { 383 struct drm_gpu_scheduler *sched = entity->rq->sched; 384 struct dma_fence *fence = entity->dependency; 385 struct drm_sched_fence *s_fence; 386 387 if (fence->context == entity->fence_context || 388 fence->context == entity->fence_context + 1) { 389 /* 390 * Fence is a scheduled/finished fence from a job 391 * which belongs to the same entity, we can ignore 392 * fences from ourself 393 */ 394 dma_fence_put(entity->dependency); 395 return false; 396 } 397 398 s_fence = to_drm_sched_fence(fence); 399 if (s_fence && s_fence->sched == sched) { 400 401 /* 402 * Fence is from the same scheduler, only need to wait for 403 * it to be scheduled 404 */ 405 fence = dma_fence_get(&s_fence->scheduled); 406 dma_fence_put(entity->dependency); 407 entity->dependency = fence; 408 if (!dma_fence_add_callback(fence, &entity->cb, 409 drm_sched_entity_clear_dep)) 410 return true; 411 412 /* Ignore it when it is already scheduled */ 413 dma_fence_put(fence); 414 return false; 415 } 416 417 if (!dma_fence_add_callback(entity->dependency, &entity->cb, 418 drm_sched_entity_wakeup)) 419 return true; 420 421 dma_fence_put(entity->dependency); 422 return false; 423 } 424 425 /** 426 * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity 427 * 428 * @entity: entity to get the job from 429 * 430 * Process all dependencies and try to get one job from the entities queue. 431 */ 432 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) 433 { 434 struct drm_gpu_scheduler *sched = entity->rq->sched; 435 struct drm_sched_job *sched_job; 436 437 sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); 438 if (!sched_job) 439 return NULL; 440 441 while ((entity->dependency = 442 sched->ops->dependency(sched_job, entity))) { 443 444 if (drm_sched_entity_add_dependency_cb(entity)) { 445 446 trace_drm_sched_job_wait_dep(sched_job, 447 entity->dependency); 448 return NULL; 449 } 450 } 451 452 /* skip jobs from entity that marked guilty */ 453 if (entity->guilty && atomic_read(entity->guilty)) 454 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); 455 456 dma_fence_put(entity->last_scheduled); 457 entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished); 458 459 spsc_queue_pop(&entity->job_queue); 460 return sched_job; 461 } 462 463 /** 464 * drm_sched_entity_select_rq - select a new rq for the entity 465 * 466 * @entity: scheduler entity 467 * 468 * Check all prerequisites and select a new rq for the entity for load 469 * balancing. 470 */ 471 void drm_sched_entity_select_rq(struct drm_sched_entity *entity) 472 { 473 struct dma_fence *fence; 474 struct drm_sched_rq *rq; 475 476 if (spsc_queue_count(&entity->job_queue) || entity->num_rq_list <= 1) 477 return; 478 479 fence = READ_ONCE(entity->last_scheduled); 480 if (fence && !dma_fence_is_signaled(fence)) 481 return; 482 483 rq = drm_sched_entity_get_free_sched(entity); 484 if (rq == entity->rq) 485 return; 486 487 spin_lock(&entity->rq_lock); 488 drm_sched_rq_remove_entity(entity->rq, entity); 489 entity->rq = rq; 490 spin_unlock(&entity->rq_lock); 491 } 492 493 /** 494 * drm_sched_entity_push_job - Submit a job to the entity's job queue 495 * 496 * @sched_job: job to submit 497 * @entity: scheduler entity 498 * 499 * Note: To guarantee that the order of insertion to queue matches 500 * the job's fence sequence number this function should be 501 * called with drm_sched_job_init under common lock. 502 * 503 * Returns 0 for success, negative error code otherwise. 504 */ 505 void drm_sched_entity_push_job(struct drm_sched_job *sched_job, 506 struct drm_sched_entity *entity) 507 { 508 bool first; 509 510 trace_drm_sched_job(sched_job, entity); 511 atomic_inc(&entity->rq->sched->num_jobs); 512 WRITE_ONCE(entity->last_user, current->group_leader); 513 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); 514 515 /* first job wakes up scheduler */ 516 if (first) { 517 /* Add the entity to the run queue */ 518 spin_lock(&entity->rq_lock); 519 if (entity->stopped) { 520 spin_unlock(&entity->rq_lock); 521 522 DRM_ERROR("Trying to push to a killed entity\n"); 523 return; 524 } 525 drm_sched_rq_add_entity(entity->rq, entity); 526 spin_unlock(&entity->rq_lock); 527 drm_sched_wakeup(entity->rq->sched); 528 } 529 } 530 EXPORT_SYMBOL(drm_sched_entity_push_job); 531