1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/completion.h>
27 
28 #include <drm/drm_print.h>
29 #include <drm/gpu_scheduler.h>
30 
31 #include "gpu_scheduler_trace.h"
32 
33 #define to_drm_sched_job(sched_job)		\
34 		container_of((sched_job), struct drm_sched_job, queue_node)
35 
36 /**
37  * drm_sched_entity_init - Init a context entity used by scheduler when
38  * submit to HW ring.
39  *
40  * @entity: scheduler entity to init
41  * @priority: priority of the entity
42  * @sched_list: the list of drm scheds on which jobs from this
43  *           entity can be submitted
44  * @num_sched_list: number of drm sched in sched_list
45  * @guilty: atomic_t set to 1 when a job on this queue
46  *          is found to be guilty causing a timeout
47  *
48  * Note: the sched_list should have at least one element to schedule
49  *       the entity
50  *
51  * Returns 0 on success or a negative error code on failure.
52  */
53 int drm_sched_entity_init(struct drm_sched_entity *entity,
54 			  enum drm_sched_priority priority,
55 			  struct drm_gpu_scheduler **sched_list,
56 			  unsigned int num_sched_list,
57 			  atomic_t *guilty)
58 {
59 	if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
60 		return -EINVAL;
61 
62 	memset(entity, 0, sizeof(struct drm_sched_entity));
63 	INIT_LIST_HEAD(&entity->list);
64 	entity->rq = NULL;
65 	entity->guilty = guilty;
66 	entity->num_sched_list = num_sched_list;
67 	entity->priority = priority;
68 	entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
69 	entity->last_scheduled = NULL;
70 
71 	if(num_sched_list)
72 		entity->rq = &sched_list[0]->sched_rq[entity->priority];
73 
74 	init_completion(&entity->entity_idle);
75 
76 	spin_lock_init(&entity->rq_lock);
77 	spsc_queue_init(&entity->job_queue);
78 
79 	atomic_set(&entity->fence_seq, 0);
80 	entity->fence_context = dma_fence_context_alloc(2);
81 
82 	return 0;
83 }
84 EXPORT_SYMBOL(drm_sched_entity_init);
85 
86 /**
87  * drm_sched_entity_is_idle - Check if entity is idle
88  *
89  * @entity: scheduler entity
90  *
91  * Returns true if the entity does not have any unscheduled jobs.
92  */
93 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
94 {
95 	rmb(); /* for list_empty to work without lock */
96 
97 	if (list_empty(&entity->list) ||
98 	    spsc_queue_count(&entity->job_queue) == 0)
99 		return true;
100 
101 	return false;
102 }
103 
104 /**
105  * drm_sched_entity_is_ready - Check if entity is ready
106  *
107  * @entity: scheduler entity
108  *
109  * Return true if entity could provide a job.
110  */
111 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
112 {
113 	if (spsc_queue_peek(&entity->job_queue) == NULL)
114 		return false;
115 
116 	if (READ_ONCE(entity->dependency))
117 		return false;
118 
119 	return true;
120 }
121 
122 /**
123  * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
124  *
125  * @entity: scheduler entity
126  *
127  * Return the pointer to the rq with least load.
128  */
129 static struct drm_sched_rq *
130 drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
131 {
132 	struct drm_sched_rq *rq = NULL;
133 	unsigned int min_score = UINT_MAX, num_score;
134 	int i;
135 
136 	for (i = 0; i < entity->num_sched_list; ++i) {
137 		struct drm_gpu_scheduler *sched = entity->sched_list[i];
138 
139 		if (!entity->sched_list[i]->ready) {
140 			DRM_WARN("sched%s is not ready, skipping", sched->name);
141 			continue;
142 		}
143 
144 		num_score = atomic_read(&sched->score);
145 		if (num_score < min_score) {
146 			min_score = num_score;
147 			rq = &entity->sched_list[i]->sched_rq[entity->priority];
148 		}
149 	}
150 
151 	return rq;
152 }
153 
154 /**
155  * drm_sched_entity_flush - Flush a context entity
156  *
157  * @entity: scheduler entity
158  * @timeout: time to wait in for Q to become empty in jiffies.
159  *
160  * Splitting drm_sched_entity_fini() into two functions, The first one does the
161  * waiting, removes the entity from the runqueue and returns an error when the
162  * process was killed.
163  *
164  * Returns the remaining time in jiffies left from the input timeout
165  */
166 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
167 {
168 	struct drm_gpu_scheduler *sched;
169 	struct task_struct *last_user;
170 	long ret = timeout;
171 
172 	if (!entity->rq)
173 		return 0;
174 
175 	sched = entity->rq->sched;
176 	/**
177 	 * The client will not queue more IBs during this fini, consume existing
178 	 * queued IBs or discard them on SIGKILL
179 	 */
180 	if (current->flags & PF_EXITING) {
181 		if (timeout)
182 			ret = wait_event_timeout(
183 					sched->job_scheduled,
184 					drm_sched_entity_is_idle(entity),
185 					timeout);
186 	} else {
187 		wait_event_killable(sched->job_scheduled,
188 				    drm_sched_entity_is_idle(entity));
189 	}
190 
191 	/* For killed process disable any more IBs enqueue right now */
192 	last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
193 	if ((!last_user || last_user == current->group_leader) &&
194 	    (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) {
195 		spin_lock(&entity->rq_lock);
196 		entity->stopped = true;
197 		drm_sched_rq_remove_entity(entity->rq, entity);
198 		spin_unlock(&entity->rq_lock);
199 	}
200 
201 	return ret;
202 }
203 EXPORT_SYMBOL(drm_sched_entity_flush);
204 
205 /**
206  * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs
207  *
208  * @f: signaled fence
209  * @cb: our callback structure
210  *
211  * Signal the scheduler finished fence when the entity in question is killed.
212  */
213 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
214 					  struct dma_fence_cb *cb)
215 {
216 	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
217 						 finish_cb);
218 
219 	drm_sched_fence_finished(job->s_fence);
220 	WARN_ON(job->s_fence->parent);
221 	job->sched->ops->free_job(job);
222 }
223 
224 /**
225  * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
226  *
227  * @entity: entity which is cleaned up
228  *
229  * Makes sure that all remaining jobs in an entity are killed before it is
230  * destroyed.
231  */
232 static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
233 {
234 	struct drm_sched_job *job;
235 	int r;
236 
237 	while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
238 		struct drm_sched_fence *s_fence = job->s_fence;
239 
240 		drm_sched_fence_scheduled(s_fence);
241 		dma_fence_set_error(&s_fence->finished, -ESRCH);
242 
243 		/*
244 		 * When pipe is hanged by older entity, new entity might
245 		 * not even have chance to submit it's first job to HW
246 		 * and so entity->last_scheduled will remain NULL
247 		 */
248 		if (!entity->last_scheduled) {
249 			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
250 			continue;
251 		}
252 
253 		r = dma_fence_add_callback(entity->last_scheduled,
254 					   &job->finish_cb,
255 					   drm_sched_entity_kill_jobs_cb);
256 		if (r == -ENOENT)
257 			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
258 		else if (r)
259 			DRM_ERROR("fence add callback failed (%d)\n", r);
260 	}
261 }
262 
263 /**
264  * drm_sched_entity_cleanup - Destroy a context entity
265  *
266  * @entity: scheduler entity
267  *
268  * This should be called after @drm_sched_entity_do_release. It goes over the
269  * entity and signals all jobs with an error code if the process was killed.
270  *
271  */
272 void drm_sched_entity_fini(struct drm_sched_entity *entity)
273 {
274 	struct drm_gpu_scheduler *sched = NULL;
275 
276 	if (entity->rq) {
277 		sched = entity->rq->sched;
278 		drm_sched_rq_remove_entity(entity->rq, entity);
279 	}
280 
281 	/* Consumption of existing IBs wasn't completed. Forcefully
282 	 * remove them here.
283 	 */
284 	if (spsc_queue_count(&entity->job_queue)) {
285 		if (sched) {
286 			/*
287 			 * Wait for thread to idle to make sure it isn't processing
288 			 * this entity.
289 			 */
290 			wait_for_completion(&entity->entity_idle);
291 
292 		}
293 		if (entity->dependency) {
294 			dma_fence_remove_callback(entity->dependency,
295 						  &entity->cb);
296 			dma_fence_put(entity->dependency);
297 			entity->dependency = NULL;
298 		}
299 
300 		drm_sched_entity_kill_jobs(entity);
301 	}
302 
303 	dma_fence_put(entity->last_scheduled);
304 	entity->last_scheduled = NULL;
305 }
306 EXPORT_SYMBOL(drm_sched_entity_fini);
307 
308 /**
309  * drm_sched_entity_fini - Destroy a context entity
310  *
311  * @entity: scheduler entity
312  *
313  * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
314  */
315 void drm_sched_entity_destroy(struct drm_sched_entity *entity)
316 {
317 	drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
318 	drm_sched_entity_fini(entity);
319 }
320 EXPORT_SYMBOL(drm_sched_entity_destroy);
321 
322 /**
323  * drm_sched_entity_clear_dep - callback to clear the entities dependency
324  */
325 static void drm_sched_entity_clear_dep(struct dma_fence *f,
326 				       struct dma_fence_cb *cb)
327 {
328 	struct drm_sched_entity *entity =
329 		container_of(cb, struct drm_sched_entity, cb);
330 
331 	entity->dependency = NULL;
332 	dma_fence_put(f);
333 }
334 
335 /**
336  * drm_sched_entity_clear_dep - callback to clear the entities dependency and
337  * wake up scheduler
338  */
339 static void drm_sched_entity_wakeup(struct dma_fence *f,
340 				    struct dma_fence_cb *cb)
341 {
342 	struct drm_sched_entity *entity =
343 		container_of(cb, struct drm_sched_entity, cb);
344 
345 	drm_sched_entity_clear_dep(f, cb);
346 	drm_sched_wakeup(entity->rq->sched);
347 }
348 
349 /**
350  * drm_sched_entity_set_priority - Sets priority of the entity
351  *
352  * @entity: scheduler entity
353  * @priority: scheduler priority
354  *
355  * Update the priority of runqueus used for the entity.
356  */
357 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
358 				   enum drm_sched_priority priority)
359 {
360 	spin_lock(&entity->rq_lock);
361 	entity->priority = priority;
362 	spin_unlock(&entity->rq_lock);
363 }
364 EXPORT_SYMBOL(drm_sched_entity_set_priority);
365 
366 /**
367  * drm_sched_entity_add_dependency_cb - add callback for the entities dependency
368  *
369  * @entity: entity with dependency
370  *
371  * Add a callback to the current dependency of the entity to wake up the
372  * scheduler when the entity becomes available.
373  */
374 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
375 {
376 	struct drm_gpu_scheduler *sched = entity->rq->sched;
377 	struct dma_fence *fence = entity->dependency;
378 	struct drm_sched_fence *s_fence;
379 
380 	if (fence->context == entity->fence_context ||
381 	    fence->context == entity->fence_context + 1) {
382 		/*
383 		 * Fence is a scheduled/finished fence from a job
384 		 * which belongs to the same entity, we can ignore
385 		 * fences from ourself
386 		 */
387 		dma_fence_put(entity->dependency);
388 		return false;
389 	}
390 
391 	s_fence = to_drm_sched_fence(fence);
392 	if (s_fence && s_fence->sched == sched) {
393 
394 		/*
395 		 * Fence is from the same scheduler, only need to wait for
396 		 * it to be scheduled
397 		 */
398 		fence = dma_fence_get(&s_fence->scheduled);
399 		dma_fence_put(entity->dependency);
400 		entity->dependency = fence;
401 		if (!dma_fence_add_callback(fence, &entity->cb,
402 					    drm_sched_entity_clear_dep))
403 			return true;
404 
405 		/* Ignore it when it is already scheduled */
406 		dma_fence_put(fence);
407 		return false;
408 	}
409 
410 	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
411 				    drm_sched_entity_wakeup))
412 		return true;
413 
414 	dma_fence_put(entity->dependency);
415 	return false;
416 }
417 
418 /**
419  * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
420  *
421  * @entity: entity to get the job from
422  *
423  * Process all dependencies and try to get one job from the entities queue.
424  */
425 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
426 {
427 	struct drm_gpu_scheduler *sched = entity->rq->sched;
428 	struct drm_sched_job *sched_job;
429 
430 	sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
431 	if (!sched_job)
432 		return NULL;
433 
434 	while ((entity->dependency =
435 			sched->ops->dependency(sched_job, entity))) {
436 		trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
437 
438 		if (drm_sched_entity_add_dependency_cb(entity))
439 			return NULL;
440 	}
441 
442 	/* skip jobs from entity that marked guilty */
443 	if (entity->guilty && atomic_read(entity->guilty))
444 		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
445 
446 	dma_fence_put(entity->last_scheduled);
447 	entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
448 
449 	spsc_queue_pop(&entity->job_queue);
450 	return sched_job;
451 }
452 
453 /**
454  * drm_sched_entity_select_rq - select a new rq for the entity
455  *
456  * @entity: scheduler entity
457  *
458  * Check all prerequisites and select a new rq for the entity for load
459  * balancing.
460  */
461 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
462 {
463 	struct dma_fence *fence;
464 	struct drm_sched_rq *rq;
465 
466 	if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
467 		return;
468 
469 	fence = READ_ONCE(entity->last_scheduled);
470 	if (fence && !dma_fence_is_signaled(fence))
471 		return;
472 
473 	spin_lock(&entity->rq_lock);
474 	rq = drm_sched_entity_get_free_sched(entity);
475 	if (rq != entity->rq) {
476 		drm_sched_rq_remove_entity(entity->rq, entity);
477 		entity->rq = rq;
478 	}
479 
480 	spin_unlock(&entity->rq_lock);
481 }
482 
483 /**
484  * drm_sched_entity_push_job - Submit a job to the entity's job queue
485  *
486  * @sched_job: job to submit
487  * @entity: scheduler entity
488  *
489  * Note: To guarantee that the order of insertion to queue matches
490  * the job's fence sequence number this function should be
491  * called with drm_sched_job_init under common lock.
492  *
493  * Returns 0 for success, negative error code otherwise.
494  */
495 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
496 			       struct drm_sched_entity *entity)
497 {
498 	bool first;
499 
500 	trace_drm_sched_job(sched_job, entity);
501 	atomic_inc(&entity->rq->sched->score);
502 	WRITE_ONCE(entity->last_user, current->group_leader);
503 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
504 
505 	/* first job wakes up scheduler */
506 	if (first) {
507 		/* Add the entity to the run queue */
508 		spin_lock(&entity->rq_lock);
509 		if (entity->stopped) {
510 			spin_unlock(&entity->rq_lock);
511 
512 			DRM_ERROR("Trying to push to a killed entity\n");
513 			return;
514 		}
515 		drm_sched_rq_add_entity(entity->rq, entity);
516 		spin_unlock(&entity->rq_lock);
517 		drm_sched_wakeup(entity->rq->sched);
518 	}
519 }
520 EXPORT_SYMBOL(drm_sched_entity_push_job);
521