1f6ffbd4fSLucas Stach // SPDX-License-Identifier: GPL-2.0
2e93b6deeSLucas Stach /*
3e93b6deeSLucas Stach * Copyright (C) 2017 Etnaviv Project
4e93b6deeSLucas Stach */
5e93b6deeSLucas Stach
62e737e52SLucas Stach #include <linux/moduleparam.h>
7e93b6deeSLucas Stach
8e93b6deeSLucas Stach #include "etnaviv_drv.h"
96d7a20c0SLucas Stach #include "etnaviv_dump.h"
10e93b6deeSLucas Stach #include "etnaviv_gem.h"
11e93b6deeSLucas Stach #include "etnaviv_gpu.h"
126d7a20c0SLucas Stach #include "etnaviv_sched.h"
132c83a726SLucas Stach #include "state.xml.h"
14e93b6deeSLucas Stach
15e93b6deeSLucas Stach static int etnaviv_job_hang_limit = 0;
16e93b6deeSLucas Stach module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
174ed75c3eSLucas Stach static int etnaviv_hw_jobs_limit = 4;
18e93b6deeSLucas Stach module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
19e93b6deeSLucas Stach
etnaviv_sched_run_job(struct drm_sched_job * sched_job)20fc0775daSFabio Estevam static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
21e93b6deeSLucas Stach {
22e93b6deeSLucas Stach struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
236d7a20c0SLucas Stach struct dma_fence *fence = NULL;
24e93b6deeSLucas Stach
256d7a20c0SLucas Stach if (likely(!sched_job->s_fence->finished.error))
26e93b6deeSLucas Stach fence = etnaviv_gpu_submit(submit);
276d7a20c0SLucas Stach else
286d7a20c0SLucas Stach dev_dbg(submit->gpu->dev, "skipping bad job\n");
29e93b6deeSLucas Stach
30e93b6deeSLucas Stach return fence;
31e93b6deeSLucas Stach }
32e93b6deeSLucas Stach
etnaviv_sched_timedout_job(struct drm_sched_job * sched_job)33a6a1f036SLuben Tuikov static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
34a6a1f036SLuben Tuikov *sched_job)
35e93b6deeSLucas Stach {
366d7a20c0SLucas Stach struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
376d7a20c0SLucas Stach struct etnaviv_gpu *gpu = submit->gpu;
382c83a726SLucas Stach u32 dma_addr;
392c83a726SLucas Stach int change;
402c83a726SLucas Stach
4150248a3eSLucas Stach /* block scheduler */
4250248a3eSLucas Stach drm_sched_stop(&gpu->sched, sched_job);
4350248a3eSLucas Stach
442c83a726SLucas Stach /*
452c83a726SLucas Stach * If the GPU managed to complete this jobs fence, the timout is
462c83a726SLucas Stach * spurious. Bail out.
472c83a726SLucas Stach */
486fce3a40SLucas Stach if (dma_fence_is_signaled(submit->out_fence))
4950248a3eSLucas Stach goto out_no_timeout;
502c83a726SLucas Stach
512c83a726SLucas Stach /*
522c83a726SLucas Stach * If the GPU is still making forward progress on the front-end (which
532c83a726SLucas Stach * should never loop) we shift out the timeout to give it a chance to
542c83a726SLucas Stach * finish the job.
552c83a726SLucas Stach */
562c83a726SLucas Stach dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
572c83a726SLucas Stach change = dma_addr - gpu->hangcheck_dma_addr;
58*9ec2afdeSLucas Stach if (gpu->state == ETNA_GPU_STATE_RUNNING &&
59*9ec2afdeSLucas Stach (gpu->completed_fence != gpu->hangcheck_fence ||
60*9ec2afdeSLucas Stach change < 0 || change > 16)) {
612c83a726SLucas Stach gpu->hangcheck_dma_addr = dma_addr;
62cdd15695SLucas Stach gpu->hangcheck_fence = gpu->completed_fence;
6350248a3eSLucas Stach goto out_no_timeout;
642c83a726SLucas Stach }
656d7a20c0SLucas Stach
66222b5f04SAndrey Grodzovsky if(sched_job)
67222b5f04SAndrey Grodzovsky drm_sched_increase_karma(sched_job);
686d7a20c0SLucas Stach
696d7a20c0SLucas Stach /* get the GPU back into the init state */
709a1fdae5SLucas Stach etnaviv_core_dump(submit);
71f51d753fSChristian Gmeiner etnaviv_gpu_recover_hang(submit);
726d7a20c0SLucas Stach
73222b5f04SAndrey Grodzovsky drm_sched_resubmit_jobs(&gpu->sched);
74222b5f04SAndrey Grodzovsky
75a6a1f036SLuben Tuikov drm_sched_start(&gpu->sched, true);
76a6a1f036SLuben Tuikov return DRM_GPU_SCHED_STAT_NOMINAL;
77a6a1f036SLuben Tuikov
7850248a3eSLucas Stach out_no_timeout:
796d7a20c0SLucas Stach /* restart scheduler after GPU is usable again */
80222b5f04SAndrey Grodzovsky drm_sched_start(&gpu->sched, true);
81a6a1f036SLuben Tuikov return DRM_GPU_SCHED_STAT_NOMINAL;
82e93b6deeSLucas Stach }
83e93b6deeSLucas Stach
etnaviv_sched_free_job(struct drm_sched_job * sched_job)84e93b6deeSLucas Stach static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
85e93b6deeSLucas Stach {
86e93b6deeSLucas Stach struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
87e93b6deeSLucas Stach
8826efecf9SSharat Masetty drm_sched_job_cleanup(sched_job);
8926efecf9SSharat Masetty
90e93b6deeSLucas Stach etnaviv_submit_put(submit);
91e93b6deeSLucas Stach }
92e93b6deeSLucas Stach
93e93b6deeSLucas Stach static const struct drm_sched_backend_ops etnaviv_sched_ops = {
94e93b6deeSLucas Stach .run_job = etnaviv_sched_run_job,
95e93b6deeSLucas Stach .timedout_job = etnaviv_sched_timedout_job,
96e93b6deeSLucas Stach .free_job = etnaviv_sched_free_job,
97e93b6deeSLucas Stach };
98e93b6deeSLucas Stach
etnaviv_sched_push_job(struct etnaviv_gem_submit * submit)99b827c84fSDaniel Vetter int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
100e93b6deeSLucas Stach {
1012cd5bd98SLucas Stach struct etnaviv_gpu *gpu = submit->gpu;
102764be123SLucas Stach int ret;
103a0780bb1SLucas Stach
104a0780bb1SLucas Stach /*
1052cd5bd98SLucas Stach * Hold the sched lock across the whole operation to avoid jobs being
106a0780bb1SLucas Stach * pushed out of order with regard to their sched fence seqnos as
107b827c84fSDaniel Vetter * allocated in drm_sched_job_arm.
108a0780bb1SLucas Stach */
1092cd5bd98SLucas Stach mutex_lock(&gpu->sched_lock);
110e93b6deeSLucas Stach
111dbe48d03SDaniel Vetter drm_sched_job_arm(&submit->sched_job);
112dbe48d03SDaniel Vetter
113e93b6deeSLucas Stach submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
114764be123SLucas Stach ret = xa_alloc_cyclic(&gpu->user_fences, &submit->out_fence_id,
115764be123SLucas Stach submit->out_fence, xa_limit_32b,
116764be123SLucas Stach &gpu->next_user_fence, GFP_KERNEL);
117764be123SLucas Stach if (ret < 0) {
11826efecf9SSharat Masetty drm_sched_job_cleanup(&submit->sched_job);
119a0780bb1SLucas Stach goto out_unlock;
120a0780bb1SLucas Stach }
121e93b6deeSLucas Stach
122e93b6deeSLucas Stach /* the scheduler holds on to the job now */
123e93b6deeSLucas Stach kref_get(&submit->refcount);
124e93b6deeSLucas Stach
1250e10e9a1SDaniel Vetter drm_sched_entity_push_job(&submit->sched_job);
126e93b6deeSLucas Stach
127a0780bb1SLucas Stach out_unlock:
1282cd5bd98SLucas Stach mutex_unlock(&gpu->sched_lock);
129a0780bb1SLucas Stach
130a0780bb1SLucas Stach return ret;
131e93b6deeSLucas Stach }
132e93b6deeSLucas Stach
etnaviv_sched_init(struct etnaviv_gpu * gpu)133e93b6deeSLucas Stach int etnaviv_sched_init(struct etnaviv_gpu *gpu)
134e93b6deeSLucas Stach {
135e93b6deeSLucas Stach int ret;
136e93b6deeSLucas Stach
137e93b6deeSLucas Stach ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
138e93b6deeSLucas Stach etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
13978efe21bSBoris Brezillon msecs_to_jiffies(500), NULL, NULL,
1408ab62edaSJiawei Gu dev_name(gpu->dev), gpu->dev);
141e93b6deeSLucas Stach if (ret)
142e93b6deeSLucas Stach return ret;
143e93b6deeSLucas Stach
144e93b6deeSLucas Stach return 0;
145e93b6deeSLucas Stach }
146e93b6deeSLucas Stach
etnaviv_sched_fini(struct etnaviv_gpu * gpu)147e93b6deeSLucas Stach void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
148e93b6deeSLucas Stach {
149e93b6deeSLucas Stach drm_sched_fini(&gpu->sched);
150e93b6deeSLucas Stach }
151