xref: /openbmc/linux/drivers/gpu/drm/msm/msm_fence.c (revision 1cd0787f)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2fde5de6cSRob Clark /*
3fde5de6cSRob Clark  * Copyright (C) 2013-2016 Red Hat
4fde5de6cSRob Clark  * Author: Rob Clark <robdclark@gmail.com>
5fde5de6cSRob Clark  */
6fde5de6cSRob Clark 
7f54d1867SChris Wilson #include <linux/dma-fence.h>
8ca762a8aSRob Clark 
9fde5de6cSRob Clark #include "msm_drv.h"
10fde5de6cSRob Clark #include "msm_fence.h"
11f8b8487cSRob Clark #include "msm_gpu.h"
12f8b8487cSRob Clark 
fctx2gpu(struct msm_fence_context * fctx)13f8b8487cSRob Clark static struct msm_gpu *fctx2gpu(struct msm_fence_context *fctx)
14f8b8487cSRob Clark {
15f8b8487cSRob Clark 	struct msm_drm_private *priv = fctx->dev->dev_private;
16f8b8487cSRob Clark 	return priv->gpu;
17f8b8487cSRob Clark }
18f8b8487cSRob Clark 
deadline_timer(struct hrtimer * t)19f8b8487cSRob Clark static enum hrtimer_restart deadline_timer(struct hrtimer *t)
20f8b8487cSRob Clark {
21f8b8487cSRob Clark 	struct msm_fence_context *fctx = container_of(t,
22f8b8487cSRob Clark 			struct msm_fence_context, deadline_timer);
23f8b8487cSRob Clark 
24f8b8487cSRob Clark 	kthread_queue_work(fctx2gpu(fctx)->worker, &fctx->deadline_work);
25f8b8487cSRob Clark 
26f8b8487cSRob Clark 	return HRTIMER_NORESTART;
27f8b8487cSRob Clark }
28f8b8487cSRob Clark 
deadline_work(struct kthread_work * work)29f8b8487cSRob Clark static void deadline_work(struct kthread_work *work)
30f8b8487cSRob Clark {
31f8b8487cSRob Clark 	struct msm_fence_context *fctx = container_of(work,
32f8b8487cSRob Clark 			struct msm_fence_context, deadline_work);
33f8b8487cSRob Clark 
34f8b8487cSRob Clark 	/* If deadline fence has already passed, nothing to do: */
35f8b8487cSRob Clark 	if (msm_fence_completed(fctx, fctx->next_deadline_fence))
36f8b8487cSRob Clark 		return;
37f8b8487cSRob Clark 
38f8b8487cSRob Clark 	msm_devfreq_boost(fctx2gpu(fctx), 2);
39f8b8487cSRob Clark }
40fde5de6cSRob Clark 
41ca762a8aSRob Clark 
42ca762a8aSRob Clark struct msm_fence_context *
msm_fence_context_alloc(struct drm_device * dev,volatile uint32_t * fenceptr,const char * name)43da3d378dSRob Clark msm_fence_context_alloc(struct drm_device *dev, volatile uint32_t *fenceptr,
44da3d378dSRob Clark 		const char *name)
45fde5de6cSRob Clark {
46ca762a8aSRob Clark 	struct msm_fence_context *fctx;
4795d1deb0SRob Clark 	static int index = 0;
48ca762a8aSRob Clark 
49ca762a8aSRob Clark 	fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
50ca762a8aSRob Clark 	if (!fctx)
51ca762a8aSRob Clark 		return ERR_PTR(-ENOMEM);
52ca762a8aSRob Clark 
53ca762a8aSRob Clark 	fctx->dev = dev;
54d7fd8634SDmitry Baryshkov 	strscpy(fctx->name, name, sizeof(fctx->name));
55f54d1867SChris Wilson 	fctx->context = dma_fence_context_alloc(1);
5695d1deb0SRob Clark 	fctx->index = index++;
57da3d378dSRob Clark 	fctx->fenceptr = fenceptr;
58b6295f9aSRob Clark 	spin_lock_init(&fctx->spinlock);
59ca762a8aSRob Clark 
602311720aSRob Clark 	/*
612311720aSRob Clark 	 * Start out close to the 32b fence rollover point, so we can
622311720aSRob Clark 	 * catch bugs with fence comparisons.
632311720aSRob Clark 	 */
642311720aSRob Clark 	fctx->last_fence = 0xffffff00;
652311720aSRob Clark 	fctx->completed_fence = fctx->last_fence;
662311720aSRob Clark 	*fctx->fenceptr = fctx->last_fence;
672311720aSRob Clark 
68f8b8487cSRob Clark 	hrtimer_init(&fctx->deadline_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
69f8b8487cSRob Clark 	fctx->deadline_timer.function = deadline_timer;
70f8b8487cSRob Clark 
71f8b8487cSRob Clark 	kthread_init_work(&fctx->deadline_work, deadline_work);
72f8b8487cSRob Clark 
73f8b8487cSRob Clark 	fctx->next_deadline = ktime_get();
74f8b8487cSRob Clark 
75ca762a8aSRob Clark 	return fctx;
76fde5de6cSRob Clark }
77fde5de6cSRob Clark 
msm_fence_context_free(struct msm_fence_context * fctx)78ca762a8aSRob Clark void msm_fence_context_free(struct msm_fence_context *fctx)
79ca762a8aSRob Clark {
80ca762a8aSRob Clark 	kfree(fctx);
81ca762a8aSRob Clark }
82ca762a8aSRob Clark 
msm_fence_completed(struct msm_fence_context * fctx,uint32_t fence)8395d1deb0SRob Clark bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence)
84ca762a8aSRob Clark {
85da3d378dSRob Clark 	/*
86da3d378dSRob Clark 	 * Note: Check completed_fence first, as fenceptr is in a write-combine
87da3d378dSRob Clark 	 * mapping, so it will be more expensive to read.
88da3d378dSRob Clark 	 */
89da3d378dSRob Clark 	return (int32_t)(fctx->completed_fence - fence) >= 0 ||
90da3d378dSRob Clark 		(int32_t)(*fctx->fenceptr - fence) >= 0;
91ca762a8aSRob Clark }
92ca762a8aSRob Clark 
933c7a5221SRob Clark /* called from irq handler and workqueue (in recover path) */
msm_update_fence(struct msm_fence_context * fctx,uint32_t fence)94ca762a8aSRob Clark void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
95fde5de6cSRob Clark {
963c7a5221SRob Clark 	unsigned long flags;
973c7a5221SRob Clark 
983c7a5221SRob Clark 	spin_lock_irqsave(&fctx->spinlock, flags);
992311720aSRob Clark 	if (fence_after(fence, fctx->completed_fence))
1002311720aSRob Clark 		fctx->completed_fence = fence;
101f8b8487cSRob Clark 	if (msm_fence_completed(fctx, fctx->next_deadline_fence))
102f8b8487cSRob Clark 		hrtimer_cancel(&fctx->deadline_timer);
1033c7a5221SRob Clark 	spin_unlock_irqrestore(&fctx->spinlock, flags);
104fde5de6cSRob Clark }
105b6295f9aSRob Clark 
106b6295f9aSRob Clark struct msm_fence {
107f54d1867SChris Wilson 	struct dma_fence base;
1083c30cc41SEric Anholt 	struct msm_fence_context *fctx;
109b6295f9aSRob Clark };
110b6295f9aSRob Clark 
to_msm_fence(struct dma_fence * fence)111f54d1867SChris Wilson static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
112b6295f9aSRob Clark {
113b6295f9aSRob Clark 	return container_of(fence, struct msm_fence, base);
114b6295f9aSRob Clark }
115b6295f9aSRob Clark 
msm_fence_get_driver_name(struct dma_fence * fence)116f54d1867SChris Wilson static const char *msm_fence_get_driver_name(struct dma_fence *fence)
117b6295f9aSRob Clark {
118b6295f9aSRob Clark 	return "msm";
119b6295f9aSRob Clark }
120b6295f9aSRob Clark 
msm_fence_get_timeline_name(struct dma_fence * fence)121f54d1867SChris Wilson static const char *msm_fence_get_timeline_name(struct dma_fence *fence)
122b6295f9aSRob Clark {
123b6295f9aSRob Clark 	struct msm_fence *f = to_msm_fence(fence);
124b6295f9aSRob Clark 	return f->fctx->name;
125b6295f9aSRob Clark }
126b6295f9aSRob Clark 
msm_fence_signaled(struct dma_fence * fence)127f54d1867SChris Wilson static bool msm_fence_signaled(struct dma_fence *fence)
128b6295f9aSRob Clark {
129b6295f9aSRob Clark 	struct msm_fence *f = to_msm_fence(fence);
13095d1deb0SRob Clark 	return msm_fence_completed(f->fctx, f->base.seqno);
131b6295f9aSRob Clark }
132b6295f9aSRob Clark 
msm_fence_set_deadline(struct dma_fence * fence,ktime_t deadline)133f8b8487cSRob Clark static void msm_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
134f8b8487cSRob Clark {
135f8b8487cSRob Clark 	struct msm_fence *f = to_msm_fence(fence);
136f8b8487cSRob Clark 	struct msm_fence_context *fctx = f->fctx;
137f8b8487cSRob Clark 	unsigned long flags;
138f8b8487cSRob Clark 	ktime_t now;
139f8b8487cSRob Clark 
140f8b8487cSRob Clark 	spin_lock_irqsave(&fctx->spinlock, flags);
141f8b8487cSRob Clark 	now = ktime_get();
142f8b8487cSRob Clark 
143f8b8487cSRob Clark 	if (ktime_after(now, fctx->next_deadline) ||
144f8b8487cSRob Clark 			ktime_before(deadline, fctx->next_deadline)) {
145f8b8487cSRob Clark 		fctx->next_deadline = deadline;
146f8b8487cSRob Clark 		fctx->next_deadline_fence =
147f8b8487cSRob Clark 			max(fctx->next_deadline_fence, (uint32_t)fence->seqno);
148f8b8487cSRob Clark 
149f8b8487cSRob Clark 		/*
150f8b8487cSRob Clark 		 * Set timer to trigger boost 3ms before deadline, or
151f8b8487cSRob Clark 		 * if we are already less than 3ms before the deadline
152f8b8487cSRob Clark 		 * schedule boost work immediately.
153f8b8487cSRob Clark 		 */
154f8b8487cSRob Clark 		deadline = ktime_sub(deadline, ms_to_ktime(3));
155f8b8487cSRob Clark 
156f8b8487cSRob Clark 		if (ktime_after(now, deadline)) {
157f8b8487cSRob Clark 			kthread_queue_work(fctx2gpu(fctx)->worker,
158f8b8487cSRob Clark 					&fctx->deadline_work);
159f8b8487cSRob Clark 		} else {
160f8b8487cSRob Clark 			hrtimer_start(&fctx->deadline_timer, deadline,
161f8b8487cSRob Clark 					HRTIMER_MODE_ABS);
162f8b8487cSRob Clark 		}
163f8b8487cSRob Clark 	}
164f8b8487cSRob Clark 
165f8b8487cSRob Clark 	spin_unlock_irqrestore(&fctx->spinlock, flags);
166f8b8487cSRob Clark }
167f8b8487cSRob Clark 
168f54d1867SChris Wilson static const struct dma_fence_ops msm_fence_ops = {
169b6295f9aSRob Clark 	.get_driver_name = msm_fence_get_driver_name,
170b6295f9aSRob Clark 	.get_timeline_name = msm_fence_get_timeline_name,
171b6295f9aSRob Clark 	.signaled = msm_fence_signaled,
172f8b8487cSRob Clark 	.set_deadline = msm_fence_set_deadline,
173b6295f9aSRob Clark };
174b6295f9aSRob Clark 
175f54d1867SChris Wilson struct dma_fence *
msm_fence_alloc(void)176f94e6a51SRob Clark msm_fence_alloc(void)
177b6295f9aSRob Clark {
178b6295f9aSRob Clark 	struct msm_fence *f;
179b6295f9aSRob Clark 
180b6295f9aSRob Clark 	f = kzalloc(sizeof(*f), GFP_KERNEL);
181b6295f9aSRob Clark 	if (!f)
182b6295f9aSRob Clark 		return ERR_PTR(-ENOMEM);
183b6295f9aSRob Clark 
184f94e6a51SRob Clark 	return &f->base;
185f94e6a51SRob Clark }
186f94e6a51SRob Clark 
187f94e6a51SRob Clark void
msm_fence_init(struct dma_fence * fence,struct msm_fence_context * fctx)188f94e6a51SRob Clark msm_fence_init(struct dma_fence *fence, struct msm_fence_context *fctx)
189f94e6a51SRob Clark {
190f94e6a51SRob Clark 	struct msm_fence *f = to_msm_fence(fence);
191f94e6a51SRob Clark 
192b6295f9aSRob Clark 	f->fctx = fctx;
193b6295f9aSRob Clark 
194*1cd0787fSRob Clark 	/*
195*1cd0787fSRob Clark 	 * Until this point, the fence was just some pre-allocated memory,
196*1cd0787fSRob Clark 	 * no-one should have taken a reference to it yet.
197*1cd0787fSRob Clark 	 */
198*1cd0787fSRob Clark 	WARN_ON(kref_read(&fence->refcount));
199*1cd0787fSRob Clark 
200f54d1867SChris Wilson 	dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
201b6295f9aSRob Clark 		       fctx->context, ++fctx->last_fence);
202b6295f9aSRob Clark }
203