1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
27198e6b0SRob Clark /*
37198e6b0SRob Clark * Copyright (C) 2013 Red Hat
47198e6b0SRob Clark * Author: Rob Clark <robdclark@gmail.com>
57198e6b0SRob Clark */
67198e6b0SRob Clark
77198e6b0SRob Clark #include "msm_ringbuffer.h"
87198e6b0SRob Clark #include "msm_gpu.h"
97198e6b0SRob Clark
101d8a5ca4SRob Clark static uint num_hw_submissions = 8;
111d8a5ca4SRob Clark MODULE_PARM_DESC(num_hw_submissions, "The max # of jobs to write into ringbuffer (default 8)");
121d8a5ca4SRob Clark module_param(num_hw_submissions, uint, 0600);
131d8a5ca4SRob Clark
msm_job_run(struct drm_sched_job * job)141d8a5ca4SRob Clark static struct dma_fence *msm_job_run(struct drm_sched_job *job)
151d8a5ca4SRob Clark {
161d8a5ca4SRob Clark struct msm_gem_submit *submit = to_msm_submit(job);
1795d1deb0SRob Clark struct msm_fence_context *fctx = submit->ring->fctx;
181d8a5ca4SRob Clark struct msm_gpu *gpu = submit->gpu;
191a8b612eSRob Clark struct msm_drm_private *priv = gpu->dev->dev_private;
2095d1deb0SRob Clark int i;
211d8a5ca4SRob Clark
22f94e6a51SRob Clark msm_fence_init(submit->hw_fence, fctx);
2395d1deb0SRob Clark
241a8b612eSRob Clark mutex_lock(&priv->lru.lock);
251a8b612eSRob Clark
2695d1deb0SRob Clark for (i = 0; i < submit->nr_bos; i++) {
276ba5daa5SRob Clark struct drm_gem_object *obj = submit->bos[i].obj;
2895d1deb0SRob Clark
2917b704f1SRob Clark msm_gem_unpin_active(obj);
307391c282SRob Clark submit->bos[i].flags &= ~BO_PINNED;
3195d1deb0SRob Clark }
321d8a5ca4SRob Clark
331a8b612eSRob Clark mutex_unlock(&priv->lru.lock);
341a8b612eSRob Clark
35*56a19b79SRob Clark /* TODO move submit path over to using a per-ring lock.. */
36*56a19b79SRob Clark mutex_lock(&gpu->lock);
37*56a19b79SRob Clark
381d8a5ca4SRob Clark msm_gpu_submit(gpu, submit);
391d8a5ca4SRob Clark
40*56a19b79SRob Clark mutex_unlock(&gpu->lock);
41*56a19b79SRob Clark
421d8a5ca4SRob Clark return dma_fence_get(submit->hw_fence);
431d8a5ca4SRob Clark }
441d8a5ca4SRob Clark
msm_job_free(struct drm_sched_job * job)451d8a5ca4SRob Clark static void msm_job_free(struct drm_sched_job *job)
461d8a5ca4SRob Clark {
471d8a5ca4SRob Clark struct msm_gem_submit *submit = to_msm_submit(job);
481d8a5ca4SRob Clark
491d8a5ca4SRob Clark drm_sched_job_cleanup(job);
501d8a5ca4SRob Clark msm_gem_submit_put(submit);
511d8a5ca4SRob Clark }
521d8a5ca4SRob Clark
53500ca2a1STom Rix static const struct drm_sched_backend_ops msm_sched_ops = {
541d8a5ca4SRob Clark .run_job = msm_job_run,
551d8a5ca4SRob Clark .free_job = msm_job_free
561d8a5ca4SRob Clark };
571d8a5ca4SRob Clark
msm_ringbuffer_new(struct msm_gpu * gpu,int id,void * memptrs,uint64_t memptrs_iova)58f97decacSJordan Crouse struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
59f97decacSJordan Crouse void *memptrs, uint64_t memptrs_iova)
607198e6b0SRob Clark {
617198e6b0SRob Clark struct msm_ringbuffer *ring;
621d8a5ca4SRob Clark long sched_timeout;
63f97decacSJordan Crouse char name[32];
647198e6b0SRob Clark int ret;
657198e6b0SRob Clark
66f97decacSJordan Crouse /* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
67f97decacSJordan Crouse BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ));
687198e6b0SRob Clark
697198e6b0SRob Clark ring = kzalloc(sizeof(*ring), GFP_KERNEL);
707198e6b0SRob Clark if (!ring) {
717198e6b0SRob Clark ret = -ENOMEM;
727198e6b0SRob Clark goto fail;
737198e6b0SRob Clark }
747198e6b0SRob Clark
757198e6b0SRob Clark ring->gpu = gpu;
76f97decacSJordan Crouse ring->id = id;
7784c61275SJordan Crouse
78f97decacSJordan Crouse ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
79604234f3SJordan Crouse check_apriv(gpu, MSM_BO_WC | MSM_BO_GPU_READONLY),
80604234f3SJordan Crouse gpu->aspace, &ring->bo, &ring->iova);
818223286dSJordan Crouse
8269a834c2SRob Clark if (IS_ERR(ring->start)) {
8369a834c2SRob Clark ret = PTR_ERR(ring->start);
84375f9a63SRob Clark ring->start = NULL;
8569a834c2SRob Clark goto fail;
8669a834c2SRob Clark }
870815d774SJordan Crouse
880815d774SJordan Crouse msm_gem_object_set_name(ring->bo, "ring%d", id);
890815d774SJordan Crouse
90f97decacSJordan Crouse ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
914c7085a5SJordan Crouse ring->next = ring->start;
927198e6b0SRob Clark ring->cur = ring->start;
937198e6b0SRob Clark
94f97decacSJordan Crouse ring->memptrs = memptrs;
95f97decacSJordan Crouse ring->memptrs_iova = memptrs_iova;
96f97decacSJordan Crouse
971d8a5ca4SRob Clark /* currently managing hangcheck ourselves: */
981d8a5ca4SRob Clark sched_timeout = MAX_SCHEDULE_TIMEOUT;
991d8a5ca4SRob Clark
1001d8a5ca4SRob Clark ret = drm_sched_init(&ring->sched, &msm_sched_ops,
1011d8a5ca4SRob Clark num_hw_submissions, 0, sched_timeout,
1028ab62edaSJiawei Gu NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev);
1031d8a5ca4SRob Clark if (ret) {
1041d8a5ca4SRob Clark goto fail;
1051d8a5ca4SRob Clark }
1061d8a5ca4SRob Clark
107f97decacSJordan Crouse INIT_LIST_HEAD(&ring->submits);
10877d20529SRob Clark spin_lock_init(&ring->submit_lock);
10977c40603SRob Clark spin_lock_init(&ring->preempt_lock);
110f97decacSJordan Crouse
111f97decacSJordan Crouse snprintf(name, sizeof(name), "gpu-ring-%d", ring->id);
112f97decacSJordan Crouse
113da3d378dSRob Clark ring->fctx = msm_fence_context_alloc(gpu->dev, &ring->memptrs->fence, name);
1147198e6b0SRob Clark
1157198e6b0SRob Clark return ring;
1167198e6b0SRob Clark
1177198e6b0SRob Clark fail:
1187198e6b0SRob Clark msm_ringbuffer_destroy(ring);
1197198e6b0SRob Clark return ERR_PTR(ret);
1207198e6b0SRob Clark }
1217198e6b0SRob Clark
msm_ringbuffer_destroy(struct msm_ringbuffer * ring)1227198e6b0SRob Clark void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
1237198e6b0SRob Clark {
124f97decacSJordan Crouse if (IS_ERR_OR_NULL(ring))
125f97decacSJordan Crouse return;
126f97decacSJordan Crouse
1271d8a5ca4SRob Clark drm_sched_fini(&ring->sched);
1281d8a5ca4SRob Clark
129f97decacSJordan Crouse msm_fence_context_free(ring->fctx);
130f97decacSJordan Crouse
131030af2b0SRob Clark msm_gem_kernel_put(ring->bo, ring->gpu->aspace);
1321e29dff0SJordan Crouse
1337198e6b0SRob Clark kfree(ring);
1347198e6b0SRob Clark }
135