1f6ffbd4fSLucas Stach // SPDX-License-Identifier: GPL-2.0
2e93b6deeSLucas Stach /*
3e93b6deeSLucas Stach  * Copyright (C) 2017 Etnaviv Project
4e93b6deeSLucas Stach  */
5e93b6deeSLucas Stach 
6e93b6deeSLucas Stach #include <linux/kthread.h>
7e93b6deeSLucas Stach 
8e93b6deeSLucas Stach #include "etnaviv_drv.h"
96d7a20c0SLucas Stach #include "etnaviv_dump.h"
10e93b6deeSLucas Stach #include "etnaviv_gem.h"
11e93b6deeSLucas Stach #include "etnaviv_gpu.h"
126d7a20c0SLucas Stach #include "etnaviv_sched.h"
132c83a726SLucas Stach #include "state.xml.h"
14e93b6deeSLucas Stach 
15e93b6deeSLucas Stach static int etnaviv_job_hang_limit = 0;
16e93b6deeSLucas Stach module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
174ed75c3eSLucas Stach static int etnaviv_hw_jobs_limit = 4;
18e93b6deeSLucas Stach module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
19e93b6deeSLucas Stach 
20fc0775daSFabio Estevam static struct dma_fence *
21fc0775daSFabio Estevam etnaviv_sched_dependency(struct drm_sched_job *sched_job,
22e93b6deeSLucas Stach 			 struct drm_sched_entity *entity)
23e93b6deeSLucas Stach {
24683da226SLucas Stach 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
25683da226SLucas Stach 	struct dma_fence *fence;
26683da226SLucas Stach 	int i;
27683da226SLucas Stach 
28683da226SLucas Stach 	if (unlikely(submit->in_fence)) {
29683da226SLucas Stach 		fence = submit->in_fence;
30683da226SLucas Stach 		submit->in_fence = NULL;
31683da226SLucas Stach 
32683da226SLucas Stach 		if (!dma_fence_is_signaled(fence))
33683da226SLucas Stach 			return fence;
34683da226SLucas Stach 
35683da226SLucas Stach 		dma_fence_put(fence);
36683da226SLucas Stach 	}
37683da226SLucas Stach 
38683da226SLucas Stach 	for (i = 0; i < submit->nr_bos; i++) {
39683da226SLucas Stach 		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
40683da226SLucas Stach 		int j;
41683da226SLucas Stach 
42683da226SLucas Stach 		if (bo->excl) {
43683da226SLucas Stach 			fence = bo->excl;
44683da226SLucas Stach 			bo->excl = NULL;
45683da226SLucas Stach 
46683da226SLucas Stach 			if (!dma_fence_is_signaled(fence))
47683da226SLucas Stach 				return fence;
48683da226SLucas Stach 
49683da226SLucas Stach 			dma_fence_put(fence);
50683da226SLucas Stach 		}
51683da226SLucas Stach 
52683da226SLucas Stach 		for (j = 0; j < bo->nr_shared; j++) {
53683da226SLucas Stach 			if (!bo->shared[j])
54683da226SLucas Stach 				continue;
55683da226SLucas Stach 
56683da226SLucas Stach 			fence = bo->shared[j];
57683da226SLucas Stach 			bo->shared[j] = NULL;
58683da226SLucas Stach 
59683da226SLucas Stach 			if (!dma_fence_is_signaled(fence))
60683da226SLucas Stach 				return fence;
61683da226SLucas Stach 
62683da226SLucas Stach 			dma_fence_put(fence);
63683da226SLucas Stach 		}
64683da226SLucas Stach 		kfree(bo->shared);
65683da226SLucas Stach 		bo->nr_shared = 0;
66683da226SLucas Stach 		bo->shared = NULL;
67683da226SLucas Stach 	}
68683da226SLucas Stach 
69e93b6deeSLucas Stach 	return NULL;
70e93b6deeSLucas Stach }
71e93b6deeSLucas Stach 
72fc0775daSFabio Estevam static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
73e93b6deeSLucas Stach {
74e93b6deeSLucas Stach 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
756d7a20c0SLucas Stach 	struct dma_fence *fence = NULL;
76e93b6deeSLucas Stach 
776d7a20c0SLucas Stach 	if (likely(!sched_job->s_fence->finished.error))
78e93b6deeSLucas Stach 		fence = etnaviv_gpu_submit(submit);
796d7a20c0SLucas Stach 	else
806d7a20c0SLucas Stach 		dev_dbg(submit->gpu->dev, "skipping bad job\n");
81e93b6deeSLucas Stach 
82e93b6deeSLucas Stach 	return fence;
83e93b6deeSLucas Stach }
84e93b6deeSLucas Stach 
85e93b6deeSLucas Stach static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
86e93b6deeSLucas Stach {
876d7a20c0SLucas Stach 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
886d7a20c0SLucas Stach 	struct etnaviv_gpu *gpu = submit->gpu;
892c83a726SLucas Stach 	u32 dma_addr;
902c83a726SLucas Stach 	int change;
912c83a726SLucas Stach 
922c83a726SLucas Stach 	/*
932c83a726SLucas Stach 	 * If the GPU managed to complete this jobs fence, the timout is
942c83a726SLucas Stach 	 * spurious. Bail out.
952c83a726SLucas Stach 	 */
962c83a726SLucas Stach 	if (fence_completed(gpu, submit->out_fence->seqno))
972c83a726SLucas Stach 		return;
982c83a726SLucas Stach 
992c83a726SLucas Stach 	/*
1002c83a726SLucas Stach 	 * If the GPU is still making forward progress on the front-end (which
1012c83a726SLucas Stach 	 * should never loop) we shift out the timeout to give it a chance to
1022c83a726SLucas Stach 	 * finish the job.
1032c83a726SLucas Stach 	 */
1042c83a726SLucas Stach 	dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
1052c83a726SLucas Stach 	change = dma_addr - gpu->hangcheck_dma_addr;
1062c83a726SLucas Stach 	if (change < 0 || change > 16) {
1072c83a726SLucas Stach 		gpu->hangcheck_dma_addr = dma_addr;
1082c83a726SLucas Stach 		schedule_delayed_work(&sched_job->work_tdr,
1092c83a726SLucas Stach 				      sched_job->sched->timeout);
1102c83a726SLucas Stach 		return;
1112c83a726SLucas Stach 	}
1126d7a20c0SLucas Stach 
1136d7a20c0SLucas Stach 	/* block scheduler */
1146d7a20c0SLucas Stach 	kthread_park(gpu->sched.thread);
1156d7a20c0SLucas Stach 	drm_sched_hw_job_reset(&gpu->sched, sched_job);
1166d7a20c0SLucas Stach 
1176d7a20c0SLucas Stach 	/* get the GPU back into the init state */
1186d7a20c0SLucas Stach 	etnaviv_core_dump(gpu);
1196d7a20c0SLucas Stach 	etnaviv_gpu_recover_hang(gpu);
1206d7a20c0SLucas Stach 
1216d7a20c0SLucas Stach 	/* restart scheduler after GPU is usable again */
1226d7a20c0SLucas Stach 	drm_sched_job_recovery(&gpu->sched);
1236d7a20c0SLucas Stach 	kthread_unpark(gpu->sched.thread);
124e93b6deeSLucas Stach }
125e93b6deeSLucas Stach 
126e93b6deeSLucas Stach static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
127e93b6deeSLucas Stach {
128e93b6deeSLucas Stach 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
129e93b6deeSLucas Stach 
130e93b6deeSLucas Stach 	etnaviv_submit_put(submit);
131e93b6deeSLucas Stach }
132e93b6deeSLucas Stach 
133e93b6deeSLucas Stach static const struct drm_sched_backend_ops etnaviv_sched_ops = {
134e93b6deeSLucas Stach 	.dependency = etnaviv_sched_dependency,
135e93b6deeSLucas Stach 	.run_job = etnaviv_sched_run_job,
136e93b6deeSLucas Stach 	.timedout_job = etnaviv_sched_timedout_job,
137e93b6deeSLucas Stach 	.free_job = etnaviv_sched_free_job,
138e93b6deeSLucas Stach };
139e93b6deeSLucas Stach 
140e93b6deeSLucas Stach int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
141e93b6deeSLucas Stach 			   struct etnaviv_gem_submit *submit)
142e93b6deeSLucas Stach {
143a0780bb1SLucas Stach 	int ret = 0;
144a0780bb1SLucas Stach 
145a0780bb1SLucas Stach 	/*
146a0780bb1SLucas Stach 	 * Hold the fence lock across the whole operation to avoid jobs being
147a0780bb1SLucas Stach 	 * pushed out of order with regard to their sched fence seqnos as
148a0780bb1SLucas Stach 	 * allocated in drm_sched_job_init.
149a0780bb1SLucas Stach 	 */
150a0780bb1SLucas Stach 	mutex_lock(&submit->gpu->fence_lock);
151e93b6deeSLucas Stach 
152e93b6deeSLucas Stach 	ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
153e93b6deeSLucas Stach 				 sched_entity, submit->cmdbuf.ctx);
154e93b6deeSLucas Stach 	if (ret)
155a0780bb1SLucas Stach 		goto out_unlock;
156e93b6deeSLucas Stach 
157e93b6deeSLucas Stach 	submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
158e93b6deeSLucas Stach 	submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
159e93b6deeSLucas Stach 						submit->out_fence, 0,
160e93b6deeSLucas Stach 						INT_MAX, GFP_KERNEL);
161a0780bb1SLucas Stach 	if (submit->out_fence_id < 0) {
162a0780bb1SLucas Stach 		ret = -ENOMEM;
163a0780bb1SLucas Stach 		goto out_unlock;
164a0780bb1SLucas Stach 	}
165e93b6deeSLucas Stach 
166e93b6deeSLucas Stach 	/* the scheduler holds on to the job now */
167e93b6deeSLucas Stach 	kref_get(&submit->refcount);
168e93b6deeSLucas Stach 
169e93b6deeSLucas Stach 	drm_sched_entity_push_job(&submit->sched_job, sched_entity);
170e93b6deeSLucas Stach 
171a0780bb1SLucas Stach out_unlock:
172a0780bb1SLucas Stach 	mutex_unlock(&submit->gpu->fence_lock);
173a0780bb1SLucas Stach 
174a0780bb1SLucas Stach 	return ret;
175e93b6deeSLucas Stach }
176e93b6deeSLucas Stach 
177e93b6deeSLucas Stach int etnaviv_sched_init(struct etnaviv_gpu *gpu)
178e93b6deeSLucas Stach {
179e93b6deeSLucas Stach 	int ret;
180e93b6deeSLucas Stach 
181e93b6deeSLucas Stach 	ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
182e93b6deeSLucas Stach 			     etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
183e93b6deeSLucas Stach 			     msecs_to_jiffies(500), dev_name(gpu->dev));
184e93b6deeSLucas Stach 	if (ret)
185e93b6deeSLucas Stach 		return ret;
186e93b6deeSLucas Stach 
187e93b6deeSLucas Stach 	return 0;
188e93b6deeSLucas Stach }
189e93b6deeSLucas Stach 
190e93b6deeSLucas Stach void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
191e93b6deeSLucas Stach {
192e93b6deeSLucas Stach 	drm_sched_fini(&gpu->sched);
193e93b6deeSLucas Stach }
194