1d5b1a78aSEric Anholt /*
2d5b1a78aSEric Anholt * Copyright © 2014 Broadcom
3d5b1a78aSEric Anholt *
4d5b1a78aSEric Anholt * Permission is hereby granted, free of charge, to any person obtaining a
5d5b1a78aSEric Anholt * copy of this software and associated documentation files (the "Software"),
6d5b1a78aSEric Anholt * to deal in the Software without restriction, including without limitation
7d5b1a78aSEric Anholt * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8d5b1a78aSEric Anholt * and/or sell copies of the Software, and to permit persons to whom the
9d5b1a78aSEric Anholt * Software is furnished to do so, subject to the following conditions:
10d5b1a78aSEric Anholt *
11d5b1a78aSEric Anholt * The above copyright notice and this permission notice (including the next
12d5b1a78aSEric Anholt * paragraph) shall be included in all copies or substantial portions of the
13d5b1a78aSEric Anholt * Software.
14d5b1a78aSEric Anholt *
15d5b1a78aSEric Anholt * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16d5b1a78aSEric Anholt * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17d5b1a78aSEric Anholt * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18d5b1a78aSEric Anholt * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19d5b1a78aSEric Anholt * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20d5b1a78aSEric Anholt * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21d5b1a78aSEric Anholt * IN THE SOFTWARE.
22d5b1a78aSEric Anholt */
23d5b1a78aSEric Anholt
24d5b1a78aSEric Anholt #include <linux/module.h>
25d5b1a78aSEric Anholt #include <linux/platform_device.h>
26001bdb55SEric Anholt #include <linux/pm_runtime.h>
27d5b1a78aSEric Anholt #include <linux/device.h>
28d5b1a78aSEric Anholt #include <linux/io.h>
29174cd4b1SIngo Molnar #include <linux/sched/signal.h>
30818f5c8fSStefan Schake #include <linux/dma-fence-array.h>
31d5b1a78aSEric Anholt
32fd6d6d80SSam Ravnborg #include <drm/drm_syncobj.h>
33fd6d6d80SSam Ravnborg
34d5b1a78aSEric Anholt #include "uapi/drm/vc4_drm.h"
35d5b1a78aSEric Anholt #include "vc4_drv.h"
36d5b1a78aSEric Anholt #include "vc4_regs.h"
37d5b1a78aSEric Anholt #include "vc4_trace.h"
38d5b1a78aSEric Anholt
39d5b1a78aSEric Anholt static void
vc4_queue_hangcheck(struct drm_device * dev)40d5b1a78aSEric Anholt vc4_queue_hangcheck(struct drm_device *dev)
41d5b1a78aSEric Anholt {
42d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev);
43d5b1a78aSEric Anholt
44d5b1a78aSEric Anholt mod_timer(&vc4->hangcheck.timer,
45d5b1a78aSEric Anholt round_jiffies_up(jiffies + msecs_to_jiffies(100)));
46d5b1a78aSEric Anholt }
47d5b1a78aSEric Anholt
4821461365SEric Anholt struct vc4_hang_state {
4921461365SEric Anholt struct drm_vc4_get_hang_state user_state;
5021461365SEric Anholt
5121461365SEric Anholt u32 bo_count;
5221461365SEric Anholt struct drm_gem_object **bo;
5321461365SEric Anholt };
5421461365SEric Anholt
5521461365SEric Anholt static void
vc4_free_hang_state(struct drm_device * dev,struct vc4_hang_state * state)5621461365SEric Anholt vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
5721461365SEric Anholt {
5821461365SEric Anholt unsigned int i;
5921461365SEric Anholt
6021461365SEric Anholt for (i = 0; i < state->user_state.bo_count; i++)
61f7a8cd30SEmil Velikov drm_gem_object_put(state->bo[i]);
6221461365SEric Anholt
6321461365SEric Anholt kfree(state);
6421461365SEric Anholt }
6521461365SEric Anholt
6621461365SEric Anholt int
vc4_get_hang_state_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)6721461365SEric Anholt vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
6821461365SEric Anholt struct drm_file *file_priv)
6921461365SEric Anholt {
7021461365SEric Anholt struct drm_vc4_get_hang_state *get_state = data;
7121461365SEric Anholt struct drm_vc4_get_hang_state_bo *bo_state;
7221461365SEric Anholt struct vc4_hang_state *kernel_state;
7321461365SEric Anholt struct drm_vc4_get_hang_state *state;
7421461365SEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev);
7521461365SEric Anholt unsigned long irqflags;
7621461365SEric Anholt u32 i;
7765c4777dSDan Carpenter int ret = 0;
7821461365SEric Anholt
7930f8c74cSMaxime Ripard if (WARN_ON_ONCE(vc4->is_vc5))
8030f8c74cSMaxime Ripard return -ENODEV;
8130f8c74cSMaxime Ripard
82ffc26740SEric Anholt if (!vc4->v3d) {
83ffc26740SEric Anholt DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n");
84ffc26740SEric Anholt return -ENODEV;
85ffc26740SEric Anholt }
86ffc26740SEric Anholt
8721461365SEric Anholt spin_lock_irqsave(&vc4->job_lock, irqflags);
8821461365SEric Anholt kernel_state = vc4->hang_state;
8921461365SEric Anholt if (!kernel_state) {
9021461365SEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags);
9121461365SEric Anholt return -ENOENT;
9221461365SEric Anholt }
9321461365SEric Anholt state = &kernel_state->user_state;
9421461365SEric Anholt
9521461365SEric Anholt /* If the user's array isn't big enough, just return the
9621461365SEric Anholt * required array size.
9721461365SEric Anholt */
9821461365SEric Anholt if (get_state->bo_count < state->bo_count) {
9921461365SEric Anholt get_state->bo_count = state->bo_count;
10021461365SEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags);
10121461365SEric Anholt return 0;
10221461365SEric Anholt }
10321461365SEric Anholt
10421461365SEric Anholt vc4->hang_state = NULL;
10521461365SEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags);
10621461365SEric Anholt
10721461365SEric Anholt /* Save the user's BO pointer, so we don't stomp it with the memcpy. */
10821461365SEric Anholt state->bo = get_state->bo;
10921461365SEric Anholt memcpy(get_state, state, sizeof(*state));
11021461365SEric Anholt
11121461365SEric Anholt bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
11221461365SEric Anholt if (!bo_state) {
11321461365SEric Anholt ret = -ENOMEM;
11421461365SEric Anholt goto err_free;
11521461365SEric Anholt }
11621461365SEric Anholt
11721461365SEric Anholt for (i = 0; i < state->bo_count; i++) {
11821461365SEric Anholt struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
11921461365SEric Anholt u32 handle;
12021461365SEric Anholt
12121461365SEric Anholt ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
12221461365SEric Anholt &handle);
12321461365SEric Anholt
12421461365SEric Anholt if (ret) {
125d0b1d259SChristophe JAILLET state->bo_count = i;
126d0b1d259SChristophe JAILLET goto err_delete_handle;
12721461365SEric Anholt }
12821461365SEric Anholt bo_state[i].handle = handle;
1298c30eeccSDanilo Krummrich bo_state[i].paddr = vc4_bo->base.dma_addr;
13021461365SEric Anholt bo_state[i].size = vc4_bo->base.base.size;
13121461365SEric Anholt }
13221461365SEric Anholt
13395d7cbcbSEric Anholt if (copy_to_user(u64_to_user_ptr(get_state->bo),
13421461365SEric Anholt bo_state,
13565c4777dSDan Carpenter state->bo_count * sizeof(*bo_state)))
13665c4777dSDan Carpenter ret = -EFAULT;
13765c4777dSDan Carpenter
138d0b1d259SChristophe JAILLET err_delete_handle:
139d0b1d259SChristophe JAILLET if (ret) {
140d0b1d259SChristophe JAILLET for (i = 0; i < state->bo_count; i++)
141d0b1d259SChristophe JAILLET drm_gem_handle_delete(file_priv, bo_state[i].handle);
142d0b1d259SChristophe JAILLET }
14321461365SEric Anholt
14421461365SEric Anholt err_free:
14521461365SEric Anholt vc4_free_hang_state(dev, kernel_state);
146d0b1d259SChristophe JAILLET kfree(bo_state);
14721461365SEric Anholt
14821461365SEric Anholt return ret;
14921461365SEric Anholt }
15021461365SEric Anholt
15121461365SEric Anholt static void
vc4_save_hang_state(struct drm_device * dev)15221461365SEric Anholt vc4_save_hang_state(struct drm_device *dev)
15321461365SEric Anholt {
15421461365SEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev);
15521461365SEric Anholt struct drm_vc4_get_hang_state *state;
15621461365SEric Anholt struct vc4_hang_state *kernel_state;
157ca26d28bSVarad Gautam struct vc4_exec_info *exec[2];
15821461365SEric Anholt struct vc4_bo *bo;
15921461365SEric Anholt unsigned long irqflags;
16017b11b76SBoris Brezillon unsigned int i, j, k, unref_list_count;
16121461365SEric Anholt
1627e5082fbSDan Carpenter kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
16321461365SEric Anholt if (!kernel_state)
16421461365SEric Anholt return;
16521461365SEric Anholt
16621461365SEric Anholt state = &kernel_state->user_state;
16721461365SEric Anholt
16821461365SEric Anholt spin_lock_irqsave(&vc4->job_lock, irqflags);
169ca26d28bSVarad Gautam exec[0] = vc4_first_bin_job(vc4);
170ca26d28bSVarad Gautam exec[1] = vc4_first_render_job(vc4);
171ca26d28bSVarad Gautam if (!exec[0] && !exec[1]) {
17221461365SEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags);
17321461365SEric Anholt return;
17421461365SEric Anholt }
17521461365SEric Anholt
176ca26d28bSVarad Gautam /* Get the bos from both binner and renderer into hang state. */
177ca26d28bSVarad Gautam state->bo_count = 0;
178ca26d28bSVarad Gautam for (i = 0; i < 2; i++) {
179ca26d28bSVarad Gautam if (!exec[i])
180ca26d28bSVarad Gautam continue;
18121461365SEric Anholt
182ca26d28bSVarad Gautam unref_list_count = 0;
183ca26d28bSVarad Gautam list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
184ca26d28bSVarad Gautam unref_list_count++;
185ca26d28bSVarad Gautam state->bo_count += exec[i]->bo_count + unref_list_count;
186ca26d28bSVarad Gautam }
187ca26d28bSVarad Gautam
188ca26d28bSVarad Gautam kernel_state->bo = kcalloc(state->bo_count,
189ca26d28bSVarad Gautam sizeof(*kernel_state->bo), GFP_ATOMIC);
190ca26d28bSVarad Gautam
19121461365SEric Anholt if (!kernel_state->bo) {
19221461365SEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags);
19321461365SEric Anholt return;
19421461365SEric Anholt }
19521461365SEric Anholt
19617b11b76SBoris Brezillon k = 0;
197ca26d28bSVarad Gautam for (i = 0; i < 2; i++) {
198ca26d28bSVarad Gautam if (!exec[i])
199ca26d28bSVarad Gautam continue;
200ca26d28bSVarad Gautam
201ca26d28bSVarad Gautam for (j = 0; j < exec[i]->bo_count; j++) {
20247c07e46SMaíra Canal bo = to_vc4_bo(exec[i]->bo[j]);
203b9f19259SBoris Brezillon
204b9f19259SBoris Brezillon /* Retain BOs just in case they were marked purgeable.
205b9f19259SBoris Brezillon * This prevents the BO from being purged before
206b9f19259SBoris Brezillon * someone had a chance to dump the hang state.
207b9f19259SBoris Brezillon */
208b9f19259SBoris Brezillon WARN_ON(!refcount_read(&bo->usecnt));
209b9f19259SBoris Brezillon refcount_inc(&bo->usecnt);
21047c07e46SMaíra Canal drm_gem_object_get(exec[i]->bo[j]);
21147c07e46SMaíra Canal kernel_state->bo[k++] = exec[i]->bo[j];
21221461365SEric Anholt }
21321461365SEric Anholt
214ca26d28bSVarad Gautam list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
215b9f19259SBoris Brezillon /* No need to retain BOs coming from the ->unref_list
216b9f19259SBoris Brezillon * because they are naturally unpurgeable.
217b9f19259SBoris Brezillon */
2181d5494e9SCihangir Akturk drm_gem_object_get(&bo->base.base);
21917b11b76SBoris Brezillon kernel_state->bo[k++] = &bo->base.base;
220ca26d28bSVarad Gautam }
22121461365SEric Anholt }
22221461365SEric Anholt
22317b11b76SBoris Brezillon WARN_ON_ONCE(k != state->bo_count);
22417b11b76SBoris Brezillon
225ca26d28bSVarad Gautam if (exec[0])
226ca26d28bSVarad Gautam state->start_bin = exec[0]->ct0ca;
227ca26d28bSVarad Gautam if (exec[1])
228ca26d28bSVarad Gautam state->start_render = exec[1]->ct1ca;
22921461365SEric Anholt
23021461365SEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags);
23121461365SEric Anholt
23221461365SEric Anholt state->ct0ca = V3D_READ(V3D_CTNCA(0));
23321461365SEric Anholt state->ct0ea = V3D_READ(V3D_CTNEA(0));
23421461365SEric Anholt
23521461365SEric Anholt state->ct1ca = V3D_READ(V3D_CTNCA(1));
23621461365SEric Anholt state->ct1ea = V3D_READ(V3D_CTNEA(1));
23721461365SEric Anholt
23821461365SEric Anholt state->ct0cs = V3D_READ(V3D_CTNCS(0));
23921461365SEric Anholt state->ct1cs = V3D_READ(V3D_CTNCS(1));
24021461365SEric Anholt
24121461365SEric Anholt state->ct0ra0 = V3D_READ(V3D_CT00RA0);
24221461365SEric Anholt state->ct1ra0 = V3D_READ(V3D_CT01RA0);
24321461365SEric Anholt
24421461365SEric Anholt state->bpca = V3D_READ(V3D_BPCA);
24521461365SEric Anholt state->bpcs = V3D_READ(V3D_BPCS);
24621461365SEric Anholt state->bpoa = V3D_READ(V3D_BPOA);
24721461365SEric Anholt state->bpos = V3D_READ(V3D_BPOS);
24821461365SEric Anholt
24921461365SEric Anholt state->vpmbase = V3D_READ(V3D_VPMBASE);
25021461365SEric Anholt
25121461365SEric Anholt state->dbge = V3D_READ(V3D_DBGE);
25221461365SEric Anholt state->fdbgo = V3D_READ(V3D_FDBGO);
25321461365SEric Anholt state->fdbgb = V3D_READ(V3D_FDBGB);
25421461365SEric Anholt state->fdbgr = V3D_READ(V3D_FDBGR);
25521461365SEric Anholt state->fdbgs = V3D_READ(V3D_FDBGS);
25621461365SEric Anholt state->errstat = V3D_READ(V3D_ERRSTAT);
25721461365SEric Anholt
258b9f19259SBoris Brezillon /* We need to turn purgeable BOs into unpurgeable ones so that
259b9f19259SBoris Brezillon * userspace has a chance to dump the hang state before the kernel
260b9f19259SBoris Brezillon * decides to purge those BOs.
261b9f19259SBoris Brezillon * Note that BO consistency at dump time cannot be guaranteed. For
262b9f19259SBoris Brezillon * example, if the owner of these BOs decides to re-use them or mark
263b9f19259SBoris Brezillon * them purgeable again there's nothing we can do to prevent it.
264b9f19259SBoris Brezillon */
265b9f19259SBoris Brezillon for (i = 0; i < kernel_state->user_state.bo_count; i++) {
266b9f19259SBoris Brezillon struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]);
267b9f19259SBoris Brezillon
268b9f19259SBoris Brezillon if (bo->madv == __VC4_MADV_NOTSUPP)
269b9f19259SBoris Brezillon continue;
270b9f19259SBoris Brezillon
271b9f19259SBoris Brezillon mutex_lock(&bo->madv_lock);
272b9f19259SBoris Brezillon if (!WARN_ON(bo->madv == __VC4_MADV_PURGED))
273b9f19259SBoris Brezillon bo->madv = VC4_MADV_WILLNEED;
274b9f19259SBoris Brezillon refcount_dec(&bo->usecnt);
275b9f19259SBoris Brezillon mutex_unlock(&bo->madv_lock);
276b9f19259SBoris Brezillon }
277b9f19259SBoris Brezillon
27821461365SEric Anholt spin_lock_irqsave(&vc4->job_lock, irqflags);
27921461365SEric Anholt if (vc4->hang_state) {
28021461365SEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags);
28121461365SEric Anholt vc4_free_hang_state(dev, kernel_state);
28221461365SEric Anholt } else {
28321461365SEric Anholt vc4->hang_state = kernel_state;
28421461365SEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags);
28521461365SEric Anholt }
28621461365SEric Anholt }
28721461365SEric Anholt
288d5b1a78aSEric Anholt static void
vc4_reset(struct drm_device * dev)289d5b1a78aSEric Anholt vc4_reset(struct drm_device *dev)
290d5b1a78aSEric Anholt {
291d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev);
292d5b1a78aSEric Anholt
293d5b1a78aSEric Anholt DRM_INFO("Resetting GPU.\n");
29436cb6253SEric Anholt
29536cb6253SEric Anholt mutex_lock(&vc4->power_lock);
29636cb6253SEric Anholt if (vc4->power_refcount) {
29736cb6253SEric Anholt /* Power the device off and back on the by dropping the
29836cb6253SEric Anholt * reference on runtime PM.
29936cb6253SEric Anholt */
30036cb6253SEric Anholt pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
30136cb6253SEric Anholt pm_runtime_get_sync(&vc4->v3d->pdev->dev);
30236cb6253SEric Anholt }
30336cb6253SEric Anholt mutex_unlock(&vc4->power_lock);
304d5b1a78aSEric Anholt
305d5b1a78aSEric Anholt vc4_irq_reset(dev);
306d5b1a78aSEric Anholt
307d5b1a78aSEric Anholt /* Rearm the hangcheck -- another job might have been waiting
308d5b1a78aSEric Anholt * for our hung one to get kicked off, and vc4_irq_reset()
309d5b1a78aSEric Anholt * would have started it.
310d5b1a78aSEric Anholt */
311d5b1a78aSEric Anholt vc4_queue_hangcheck(dev);
312d5b1a78aSEric Anholt }
313d5b1a78aSEric Anholt
314d5b1a78aSEric Anholt static void
vc4_reset_work(struct work_struct * work)315d5b1a78aSEric Anholt vc4_reset_work(struct work_struct *work)
316d5b1a78aSEric Anholt {
317d5b1a78aSEric Anholt struct vc4_dev *vc4 =
318d5b1a78aSEric Anholt container_of(work, struct vc4_dev, hangcheck.reset_work);
319d5b1a78aSEric Anholt
32084d7d472SMaxime Ripard vc4_save_hang_state(&vc4->base);
32121461365SEric Anholt
32284d7d472SMaxime Ripard vc4_reset(&vc4->base);
323d5b1a78aSEric Anholt }
324d5b1a78aSEric Anholt
325d5b1a78aSEric Anholt static void
vc4_hangcheck_elapsed(struct timer_list * t)3260078730fSKees Cook vc4_hangcheck_elapsed(struct timer_list *t)
327d5b1a78aSEric Anholt {
3280078730fSKees Cook struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
32984d7d472SMaxime Ripard struct drm_device *dev = &vc4->base;
330d5b1a78aSEric Anholt uint32_t ct0ca, ct1ca;
331c4ce60dcSEric Anholt unsigned long irqflags;
332ca26d28bSVarad Gautam struct vc4_exec_info *bin_exec, *render_exec;
333c4ce60dcSEric Anholt
334c4ce60dcSEric Anholt spin_lock_irqsave(&vc4->job_lock, irqflags);
335ca26d28bSVarad Gautam
336ca26d28bSVarad Gautam bin_exec = vc4_first_bin_job(vc4);
337ca26d28bSVarad Gautam render_exec = vc4_first_render_job(vc4);
338d5b1a78aSEric Anholt
339d5b1a78aSEric Anholt /* If idle, we can stop watching for hangs. */
340ca26d28bSVarad Gautam if (!bin_exec && !render_exec) {
341c4ce60dcSEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags);
342d5b1a78aSEric Anholt return;
343c4ce60dcSEric Anholt }
344d5b1a78aSEric Anholt
345d5b1a78aSEric Anholt ct0ca = V3D_READ(V3D_CTNCA(0));
346d5b1a78aSEric Anholt ct1ca = V3D_READ(V3D_CTNCA(1));
347d5b1a78aSEric Anholt
348d5b1a78aSEric Anholt /* If we've made any progress in execution, rearm the timer
349d5b1a78aSEric Anholt * and wait.
350d5b1a78aSEric Anholt */
351ca26d28bSVarad Gautam if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
352ca26d28bSVarad Gautam (render_exec && ct1ca != render_exec->last_ct1ca)) {
353ca26d28bSVarad Gautam if (bin_exec)
354ca26d28bSVarad Gautam bin_exec->last_ct0ca = ct0ca;
355ca26d28bSVarad Gautam if (render_exec)
356ca26d28bSVarad Gautam render_exec->last_ct1ca = ct1ca;
357c4ce60dcSEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags);
358d5b1a78aSEric Anholt vc4_queue_hangcheck(dev);
359d5b1a78aSEric Anholt return;
360d5b1a78aSEric Anholt }
361d5b1a78aSEric Anholt
362c4ce60dcSEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags);
363c4ce60dcSEric Anholt
364d5b1a78aSEric Anholt /* We've gone too long with no progress, reset. This has to
365d5b1a78aSEric Anholt * be done from a work struct, since resetting can sleep and
366d5b1a78aSEric Anholt * this timer hook isn't allowed to.
367d5b1a78aSEric Anholt */
368d5b1a78aSEric Anholt schedule_work(&vc4->hangcheck.reset_work);
369d5b1a78aSEric Anholt }
370d5b1a78aSEric Anholt
371d5b1a78aSEric Anholt static void
submit_cl(struct drm_device * dev,uint32_t thread,uint32_t start,uint32_t end)372d5b1a78aSEric Anholt submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
373d5b1a78aSEric Anholt {
374d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev);
375d5b1a78aSEric Anholt
376d5b1a78aSEric Anholt /* Set the current and end address of the control list.
377d5b1a78aSEric Anholt * Writing the end register is what starts the job.
378d5b1a78aSEric Anholt */
379d5b1a78aSEric Anholt V3D_WRITE(V3D_CTNCA(thread), start);
380d5b1a78aSEric Anholt V3D_WRITE(V3D_CTNEA(thread), end);
381d5b1a78aSEric Anholt }
382d5b1a78aSEric Anholt
383d5b1a78aSEric Anholt int
vc4_wait_for_seqno(struct drm_device * dev,uint64_t seqno,uint64_t timeout_ns,bool interruptible)384d5b1a78aSEric Anholt vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
385d5b1a78aSEric Anholt bool interruptible)
386d5b1a78aSEric Anholt {
387d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev);
388d5b1a78aSEric Anholt int ret = 0;
389d5b1a78aSEric Anholt unsigned long timeout_expire;
390d5b1a78aSEric Anholt DEFINE_WAIT(wait);
391d5b1a78aSEric Anholt
39230f8c74cSMaxime Ripard if (WARN_ON_ONCE(vc4->is_vc5))
39330f8c74cSMaxime Ripard return -ENODEV;
39430f8c74cSMaxime Ripard
395d5b1a78aSEric Anholt if (vc4->finished_seqno >= seqno)
396d5b1a78aSEric Anholt return 0;
397d5b1a78aSEric Anholt
398d5b1a78aSEric Anholt if (timeout_ns == 0)
399d5b1a78aSEric Anholt return -ETIME;
400d5b1a78aSEric Anholt
401d5b1a78aSEric Anholt timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
402d5b1a78aSEric Anholt
403d5b1a78aSEric Anholt trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
404d5b1a78aSEric Anholt for (;;) {
405d5b1a78aSEric Anholt prepare_to_wait(&vc4->job_wait_queue, &wait,
406d5b1a78aSEric Anholt interruptible ? TASK_INTERRUPTIBLE :
407d5b1a78aSEric Anholt TASK_UNINTERRUPTIBLE);
408d5b1a78aSEric Anholt
409d5b1a78aSEric Anholt if (interruptible && signal_pending(current)) {
410d5b1a78aSEric Anholt ret = -ERESTARTSYS;
411d5b1a78aSEric Anholt break;
412d5b1a78aSEric Anholt }
413d5b1a78aSEric Anholt
414d5b1a78aSEric Anholt if (vc4->finished_seqno >= seqno)
415d5b1a78aSEric Anholt break;
416d5b1a78aSEric Anholt
417d5b1a78aSEric Anholt if (timeout_ns != ~0ull) {
418d5b1a78aSEric Anholt if (time_after_eq(jiffies, timeout_expire)) {
419d5b1a78aSEric Anholt ret = -ETIME;
420d5b1a78aSEric Anholt break;
421d5b1a78aSEric Anholt }
422d5b1a78aSEric Anholt schedule_timeout(timeout_expire - jiffies);
423d5b1a78aSEric Anholt } else {
424d5b1a78aSEric Anholt schedule();
425d5b1a78aSEric Anholt }
426d5b1a78aSEric Anholt }
427d5b1a78aSEric Anholt
428d5b1a78aSEric Anholt finish_wait(&vc4->job_wait_queue, &wait);
429d5b1a78aSEric Anholt trace_vc4_wait_for_seqno_end(dev, seqno);
430d5b1a78aSEric Anholt
43113cf8909SEric Anholt return ret;
432d5b1a78aSEric Anholt }
433d5b1a78aSEric Anholt
434d5b1a78aSEric Anholt static void
vc4_flush_caches(struct drm_device * dev)435d5b1a78aSEric Anholt vc4_flush_caches(struct drm_device *dev)
436d5b1a78aSEric Anholt {
437d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev);
438d5b1a78aSEric Anholt
439d5b1a78aSEric Anholt /* Flush the GPU L2 caches. These caches sit on top of system
440d5b1a78aSEric Anholt * L3 (the 128kb or so shared with the CPU), and are
441d5b1a78aSEric Anholt * non-allocating in the L3.
442d5b1a78aSEric Anholt */
443d5b1a78aSEric Anholt V3D_WRITE(V3D_L2CACTL,
444d5b1a78aSEric Anholt V3D_L2CACTL_L2CCLR);
445d5b1a78aSEric Anholt
446d5b1a78aSEric Anholt V3D_WRITE(V3D_SLCACTL,
447d5b1a78aSEric Anholt VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
448d5b1a78aSEric Anholt VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
449d5b1a78aSEric Anholt VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
450d5b1a78aSEric Anholt VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
451d5b1a78aSEric Anholt }
452d5b1a78aSEric Anholt
453f61145f1SEric Anholt static void
vc4_flush_texture_caches(struct drm_device * dev)454f61145f1SEric Anholt vc4_flush_texture_caches(struct drm_device *dev)
455f61145f1SEric Anholt {
456f61145f1SEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev);
457f61145f1SEric Anholt
458f61145f1SEric Anholt V3D_WRITE(V3D_L2CACTL,
459f61145f1SEric Anholt V3D_L2CACTL_L2CCLR);
460f61145f1SEric Anholt
461f61145f1SEric Anholt V3D_WRITE(V3D_SLCACTL,
462f61145f1SEric Anholt VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
463f61145f1SEric Anholt VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
464f61145f1SEric Anholt }
465f61145f1SEric Anholt
466d5b1a78aSEric Anholt /* Sets the registers for the next job to be actually be executed in
467d5b1a78aSEric Anholt * the hardware.
468d5b1a78aSEric Anholt *
469d5b1a78aSEric Anholt * The job_lock should be held during this.
470d5b1a78aSEric Anholt */
471d5b1a78aSEric Anholt void
vc4_submit_next_bin_job(struct drm_device * dev)472ca26d28bSVarad Gautam vc4_submit_next_bin_job(struct drm_device *dev)
473d5b1a78aSEric Anholt {
474d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev);
475ca26d28bSVarad Gautam struct vc4_exec_info *exec;
476d5b1a78aSEric Anholt
47730f8c74cSMaxime Ripard if (WARN_ON_ONCE(vc4->is_vc5))
47830f8c74cSMaxime Ripard return;
47930f8c74cSMaxime Ripard
480ca26d28bSVarad Gautam again:
481ca26d28bSVarad Gautam exec = vc4_first_bin_job(vc4);
482d5b1a78aSEric Anholt if (!exec)
483d5b1a78aSEric Anholt return;
484d5b1a78aSEric Anholt
485d5b1a78aSEric Anholt vc4_flush_caches(dev);
486d5b1a78aSEric Anholt
48765101d8cSBoris Brezillon /* Only start the perfmon if it was not already started by a previous
48865101d8cSBoris Brezillon * job.
48965101d8cSBoris Brezillon */
49065101d8cSBoris Brezillon if (exec->perfmon && vc4->active_perfmon != exec->perfmon)
49165101d8cSBoris Brezillon vc4_perfmon_start(vc4, exec->perfmon);
49265101d8cSBoris Brezillon
493ca26d28bSVarad Gautam /* Either put the job in the binner if it uses the binner, or
494ca26d28bSVarad Gautam * immediately move it to the to-be-rendered queue.
495ca26d28bSVarad Gautam */
496ca26d28bSVarad Gautam if (exec->ct0ca != exec->ct0ea) {
497044feb97SMelissa Wen trace_vc4_submit_cl(dev, false, exec->seqno, exec->ct0ca,
498044feb97SMelissa Wen exec->ct0ea);
499d5b1a78aSEric Anholt submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
500ca26d28bSVarad Gautam } else {
50165101d8cSBoris Brezillon struct vc4_exec_info *next;
50265101d8cSBoris Brezillon
503ca26d28bSVarad Gautam vc4_move_job_to_render(dev, exec);
50465101d8cSBoris Brezillon next = vc4_first_bin_job(vc4);
50565101d8cSBoris Brezillon
50665101d8cSBoris Brezillon /* We can't start the next bin job if the previous job had a
50765101d8cSBoris Brezillon * different perfmon instance attached to it. The same goes
50865101d8cSBoris Brezillon * if one of them had a perfmon attached to it and the other
50965101d8cSBoris Brezillon * one doesn't.
51065101d8cSBoris Brezillon */
51165101d8cSBoris Brezillon if (next && next->perfmon == exec->perfmon)
512ca26d28bSVarad Gautam goto again;
513ca26d28bSVarad Gautam }
514ca26d28bSVarad Gautam }
515ca26d28bSVarad Gautam
516ca26d28bSVarad Gautam void
vc4_submit_next_render_job(struct drm_device * dev)517ca26d28bSVarad Gautam vc4_submit_next_render_job(struct drm_device *dev)
518ca26d28bSVarad Gautam {
519ca26d28bSVarad Gautam struct vc4_dev *vc4 = to_vc4_dev(dev);
520ca26d28bSVarad Gautam struct vc4_exec_info *exec = vc4_first_render_job(vc4);
521ca26d28bSVarad Gautam
522ca26d28bSVarad Gautam if (!exec)
523ca26d28bSVarad Gautam return;
524ca26d28bSVarad Gautam
52530f8c74cSMaxime Ripard if (WARN_ON_ONCE(vc4->is_vc5))
52630f8c74cSMaxime Ripard return;
52730f8c74cSMaxime Ripard
528f61145f1SEric Anholt /* A previous RCL may have written to one of our textures, and
529f61145f1SEric Anholt * our full cache flush at bin time may have occurred before
530f61145f1SEric Anholt * that RCL completed. Flush the texture cache now, but not
531f61145f1SEric Anholt * the instructions or uniforms (since we don't write those
532f61145f1SEric Anholt * from an RCL).
533f61145f1SEric Anholt */
534f61145f1SEric Anholt vc4_flush_texture_caches(dev);
535f61145f1SEric Anholt
536044feb97SMelissa Wen trace_vc4_submit_cl(dev, true, exec->seqno, exec->ct1ca, exec->ct1ea);
537d5b1a78aSEric Anholt submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
538d5b1a78aSEric Anholt }
539d5b1a78aSEric Anholt
540ca26d28bSVarad Gautam void
vc4_move_job_to_render(struct drm_device * dev,struct vc4_exec_info * exec)541ca26d28bSVarad Gautam vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
542ca26d28bSVarad Gautam {
543ca26d28bSVarad Gautam struct vc4_dev *vc4 = to_vc4_dev(dev);
544ca26d28bSVarad Gautam bool was_empty = list_empty(&vc4->render_job_list);
545ca26d28bSVarad Gautam
54630f8c74cSMaxime Ripard if (WARN_ON_ONCE(vc4->is_vc5))
54730f8c74cSMaxime Ripard return;
54830f8c74cSMaxime Ripard
549ca26d28bSVarad Gautam list_move_tail(&exec->head, &vc4->render_job_list);
550ca26d28bSVarad Gautam if (was_empty)
551ca26d28bSVarad Gautam vc4_submit_next_render_job(dev);
552ca26d28bSVarad Gautam }
553ca26d28bSVarad Gautam
554d5b1a78aSEric Anholt static void
vc4_update_bo_seqnos(struct vc4_exec_info * exec,uint64_t seqno)555d5b1a78aSEric Anholt vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
556d5b1a78aSEric Anholt {
557d5b1a78aSEric Anholt struct vc4_bo *bo;
558d5b1a78aSEric Anholt unsigned i;
559d5b1a78aSEric Anholt
560d5b1a78aSEric Anholt for (i = 0; i < exec->bo_count; i++) {
56147c07e46SMaíra Canal bo = to_vc4_bo(exec->bo[i]);
562d5b1a78aSEric Anholt bo->seqno = seqno;
563cdec4d36SEric Anholt
564be273ecfSJavier Martinez Canillas dma_resv_add_fence(bo->base.base.resv, exec->fence,
565be273ecfSJavier Martinez Canillas DMA_RESV_USAGE_READ);
566d5b1a78aSEric Anholt }
567d5b1a78aSEric Anholt
568d5b1a78aSEric Anholt list_for_each_entry(bo, &exec->unref_list, unref_head) {
569d5b1a78aSEric Anholt bo->seqno = seqno;
570d5b1a78aSEric Anholt }
5717edabee0SEric Anholt
5727edabee0SEric Anholt for (i = 0; i < exec->rcl_write_bo_count; i++) {
5737edabee0SEric Anholt bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
5747edabee0SEric Anholt bo->write_seqno = seqno;
575cdec4d36SEric Anholt
576be273ecfSJavier Martinez Canillas dma_resv_add_fence(bo->base.base.resv, exec->fence,
577be273ecfSJavier Martinez Canillas DMA_RESV_USAGE_WRITE);
5787edabee0SEric Anholt }
579d5b1a78aSEric Anholt }
580d5b1a78aSEric Anholt
581cdec4d36SEric Anholt static void
vc4_unlock_bo_reservations(struct drm_device * dev,struct vc4_exec_info * exec,struct ww_acquire_ctx * acquire_ctx)582cdec4d36SEric Anholt vc4_unlock_bo_reservations(struct drm_device *dev,
583cdec4d36SEric Anholt struct vc4_exec_info *exec,
584cdec4d36SEric Anholt struct ww_acquire_ctx *acquire_ctx)
585cdec4d36SEric Anholt {
586cdec4d36SEric Anholt int i;
587cdec4d36SEric Anholt
58847c07e46SMaíra Canal for (i = 0; i < exec->bo_count; i++)
58947c07e46SMaíra Canal dma_resv_unlock(exec->bo[i]->resv);
590cdec4d36SEric Anholt
591cdec4d36SEric Anholt ww_acquire_fini(acquire_ctx);
592cdec4d36SEric Anholt }
593cdec4d36SEric Anholt
594cdec4d36SEric Anholt /* Takes the reservation lock on all the BOs being referenced, so that
595cdec4d36SEric Anholt * at queue submit time we can update the reservations.
596cdec4d36SEric Anholt *
597cdec4d36SEric Anholt * We don't lock the RCL the tile alloc/state BOs, or overflow memory
598cdec4d36SEric Anholt * (all of which are on exec->unref_list). They're entirely private
599cdec4d36SEric Anholt * to vc4, so we don't attach dma-buf fences to them.
600cdec4d36SEric Anholt */
601cdec4d36SEric Anholt static int
vc4_lock_bo_reservations(struct drm_device * dev,struct vc4_exec_info * exec,struct ww_acquire_ctx * acquire_ctx)602cdec4d36SEric Anholt vc4_lock_bo_reservations(struct drm_device *dev,
603cdec4d36SEric Anholt struct vc4_exec_info *exec,
604cdec4d36SEric Anholt struct ww_acquire_ctx *acquire_ctx)
605cdec4d36SEric Anholt {
606cdec4d36SEric Anholt int contended_lock = -1;
607cdec4d36SEric Anholt int i, ret;
608bd7de1e8SRob Herring struct drm_gem_object *bo;
609cdec4d36SEric Anholt
610cdec4d36SEric Anholt ww_acquire_init(acquire_ctx, &reservation_ww_class);
611cdec4d36SEric Anholt
612cdec4d36SEric Anholt retry:
613cdec4d36SEric Anholt if (contended_lock != -1) {
61447c07e46SMaíra Canal bo = exec->bo[contended_lock];
615616b549bSDaniel Vetter ret = dma_resv_lock_slow_interruptible(bo->resv, acquire_ctx);
616cdec4d36SEric Anholt if (ret) {
617cdec4d36SEric Anholt ww_acquire_done(acquire_ctx);
618cdec4d36SEric Anholt return ret;
619cdec4d36SEric Anholt }
620cdec4d36SEric Anholt }
621cdec4d36SEric Anholt
622cdec4d36SEric Anholt for (i = 0; i < exec->bo_count; i++) {
623cdec4d36SEric Anholt if (i == contended_lock)
624cdec4d36SEric Anholt continue;
625cdec4d36SEric Anholt
62647c07e46SMaíra Canal bo = exec->bo[i];
627cdec4d36SEric Anholt
628616b549bSDaniel Vetter ret = dma_resv_lock_interruptible(bo->resv, acquire_ctx);
629cdec4d36SEric Anholt if (ret) {
630cdec4d36SEric Anholt int j;
631cdec4d36SEric Anholt
632cdec4d36SEric Anholt for (j = 0; j < i; j++) {
63347c07e46SMaíra Canal bo = exec->bo[j];
634616b549bSDaniel Vetter dma_resv_unlock(bo->resv);
635cdec4d36SEric Anholt }
636cdec4d36SEric Anholt
637cdec4d36SEric Anholt if (contended_lock != -1 && contended_lock >= i) {
63847c07e46SMaíra Canal bo = exec->bo[contended_lock];
639cdec4d36SEric Anholt
640616b549bSDaniel Vetter dma_resv_unlock(bo->resv);
641cdec4d36SEric Anholt }
642cdec4d36SEric Anholt
643cdec4d36SEric Anholt if (ret == -EDEADLK) {
644cdec4d36SEric Anholt contended_lock = i;
645cdec4d36SEric Anholt goto retry;
646cdec4d36SEric Anholt }
647cdec4d36SEric Anholt
648cdec4d36SEric Anholt ww_acquire_done(acquire_ctx);
649cdec4d36SEric Anholt return ret;
650cdec4d36SEric Anholt }
651cdec4d36SEric Anholt }
652cdec4d36SEric Anholt
653cdec4d36SEric Anholt ww_acquire_done(acquire_ctx);
654cdec4d36SEric Anholt
655cdec4d36SEric Anholt /* Reserve space for our shared (read-only) fence references,
656cdec4d36SEric Anholt * before we commit the CL to the hardware.
657cdec4d36SEric Anholt */
658cdec4d36SEric Anholt for (i = 0; i < exec->bo_count; i++) {
65947c07e46SMaíra Canal bo = exec->bo[i];
660cdec4d36SEric Anholt
661c8d4c18bSChristian König ret = dma_resv_reserve_fences(bo->resv, 1);
662cdec4d36SEric Anholt if (ret) {
663cdec4d36SEric Anholt vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
664cdec4d36SEric Anholt return ret;
665cdec4d36SEric Anholt }
666cdec4d36SEric Anholt }
667cdec4d36SEric Anholt
668cdec4d36SEric Anholt return 0;
669cdec4d36SEric Anholt }
670cdec4d36SEric Anholt
671d5b1a78aSEric Anholt /* Queues a struct vc4_exec_info for execution. If no job is
672d5b1a78aSEric Anholt * currently executing, then submits it.
673d5b1a78aSEric Anholt *
674d5b1a78aSEric Anholt * Unlike most GPUs, our hardware only handles one command list at a
675d5b1a78aSEric Anholt * time. To queue multiple jobs at once, we'd need to edit the
676d5b1a78aSEric Anholt * previous command list to have a jump to the new one at the end, and
677d5b1a78aSEric Anholt * then bump the end address. That's a change for a later date,
678d5b1a78aSEric Anholt * though.
679d5b1a78aSEric Anholt */
680cdec4d36SEric Anholt static int
vc4_queue_submit(struct drm_device * dev,struct vc4_exec_info * exec,struct ww_acquire_ctx * acquire_ctx,struct drm_syncobj * out_sync)681cdec4d36SEric Anholt vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
682e84fcb95SStefan Schake struct ww_acquire_ctx *acquire_ctx,
683e84fcb95SStefan Schake struct drm_syncobj *out_sync)
684d5b1a78aSEric Anholt {
685d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev);
68665101d8cSBoris Brezillon struct vc4_exec_info *renderjob;
687d5b1a78aSEric Anholt uint64_t seqno;
688d5b1a78aSEric Anholt unsigned long irqflags;
689cdec4d36SEric Anholt struct vc4_fence *fence;
690cdec4d36SEric Anholt
691cdec4d36SEric Anholt fence = kzalloc(sizeof(*fence), GFP_KERNEL);
692cdec4d36SEric Anholt if (!fence)
693cdec4d36SEric Anholt return -ENOMEM;
694cdec4d36SEric Anholt fence->dev = dev;
695d5b1a78aSEric Anholt
696d5b1a78aSEric Anholt spin_lock_irqsave(&vc4->job_lock, irqflags);
697d5b1a78aSEric Anholt
698d5b1a78aSEric Anholt seqno = ++vc4->emit_seqno;
699d5b1a78aSEric Anholt exec->seqno = seqno;
700cdec4d36SEric Anholt
701cdec4d36SEric Anholt dma_fence_init(&fence->base, &vc4_fence_ops, &vc4->job_lock,
702cdec4d36SEric Anholt vc4->dma_fence_context, exec->seqno);
703cdec4d36SEric Anholt fence->seqno = exec->seqno;
704cdec4d36SEric Anholt exec->fence = &fence->base;
705cdec4d36SEric Anholt
706e84fcb95SStefan Schake if (out_sync)
7070b258ed1SChristian König drm_syncobj_replace_fence(out_sync, exec->fence);
708e84fcb95SStefan Schake
709d5b1a78aSEric Anholt vc4_update_bo_seqnos(exec, seqno);
710d5b1a78aSEric Anholt
711cdec4d36SEric Anholt vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
712cdec4d36SEric Anholt
713ca26d28bSVarad Gautam list_add_tail(&exec->head, &vc4->bin_job_list);
714d5b1a78aSEric Anholt
71565101d8cSBoris Brezillon /* If no bin job was executing and if the render job (if any) has the
71665101d8cSBoris Brezillon * same perfmon as our job attached to it (or if both jobs don't have
71765101d8cSBoris Brezillon * perfmon activated), then kick ours off. Otherwise, it'll get
71865101d8cSBoris Brezillon * started when the previous job's flush/render done interrupt occurs.
719d5b1a78aSEric Anholt */
72065101d8cSBoris Brezillon renderjob = vc4_first_render_job(vc4);
72165101d8cSBoris Brezillon if (vc4_first_bin_job(vc4) == exec &&
72265101d8cSBoris Brezillon (!renderjob || renderjob->perfmon == exec->perfmon)) {
723ca26d28bSVarad Gautam vc4_submit_next_bin_job(dev);
724d5b1a78aSEric Anholt vc4_queue_hangcheck(dev);
725d5b1a78aSEric Anholt }
726d5b1a78aSEric Anholt
727d5b1a78aSEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags);
728cdec4d36SEric Anholt
729cdec4d36SEric Anholt return 0;
730d5b1a78aSEric Anholt }
731d5b1a78aSEric Anholt
732d5b1a78aSEric Anholt /**
73372f793f1SEric Anholt * vc4_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
73472f793f1SEric Anholt * referenced by the job.
73572f793f1SEric Anholt * @dev: DRM device
73672f793f1SEric Anholt * @file_priv: DRM file for this fd
73772f793f1SEric Anholt * @exec: V3D job being set up
73872f793f1SEric Anholt *
73972f793f1SEric Anholt * The command validator needs to reference BOs by their index within
74072f793f1SEric Anholt * the submitted job's BO list. This does the validation of the job's
74172f793f1SEric Anholt * BO list and reference counting for the lifetime of the job.
742d5b1a78aSEric Anholt */
743d5b1a78aSEric Anholt static int
vc4_cl_lookup_bos(struct drm_device * dev,struct drm_file * file_priv,struct vc4_exec_info * exec)744d5b1a78aSEric Anholt vc4_cl_lookup_bos(struct drm_device *dev,
745d5b1a78aSEric Anholt struct drm_file *file_priv,
746d5b1a78aSEric Anholt struct vc4_exec_info *exec)
747d5b1a78aSEric Anholt {
748d5b1a78aSEric Anholt struct drm_vc4_submit_cl *args = exec->args;
749d5b1a78aSEric Anholt int ret = 0;
750d5b1a78aSEric Anholt int i;
751d5b1a78aSEric Anholt
752d5b1a78aSEric Anholt exec->bo_count = args->bo_handle_count;
753d5b1a78aSEric Anholt
754d5b1a78aSEric Anholt if (!exec->bo_count) {
755d5b1a78aSEric Anholt /* See comment on bo_index for why we have to check
756d5b1a78aSEric Anholt * this.
757d5b1a78aSEric Anholt */
758fb95992aSEric Anholt DRM_DEBUG("Rendering requires BOs to validate\n");
759d5b1a78aSEric Anholt return -EINVAL;
760d5b1a78aSEric Anholt }
761d5b1a78aSEric Anholt
762*ba3f6db4SMaíra Canal ret = drm_gem_objects_lookup(file_priv, u64_to_user_ptr(args->bo_handles),
763*ba3f6db4SMaíra Canal exec->bo_count, &exec->bo);
764d5b1a78aSEric Anholt
765b9f19259SBoris Brezillon if (ret)
766b9f19259SBoris Brezillon goto fail_put_bo;
767b9f19259SBoris Brezillon
768b9f19259SBoris Brezillon for (i = 0; i < exec->bo_count; i++) {
76947c07e46SMaíra Canal ret = vc4_bo_inc_usecnt(to_vc4_bo(exec->bo[i]));
770b9f19259SBoris Brezillon if (ret)
771b9f19259SBoris Brezillon goto fail_dec_usecnt;
772b9f19259SBoris Brezillon }
773b9f19259SBoris Brezillon
774b9f19259SBoris Brezillon return 0;
775b9f19259SBoris Brezillon
776b9f19259SBoris Brezillon fail_dec_usecnt:
777b9f19259SBoris Brezillon /* Decrease usecnt on acquired objects.
778b9f19259SBoris Brezillon * We cannot rely on vc4_complete_exec() to release resources here,
779b9f19259SBoris Brezillon * because vc4_complete_exec() has no information about which BO has
780b9f19259SBoris Brezillon * had its ->usecnt incremented.
781b9f19259SBoris Brezillon * To make things easier we just free everything explicitly and set
782b9f19259SBoris Brezillon * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release'
783b9f19259SBoris Brezillon * step.
784b9f19259SBoris Brezillon */
785b9f19259SBoris Brezillon for (i-- ; i >= 0; i--)
78647c07e46SMaíra Canal vc4_bo_dec_usecnt(to_vc4_bo(exec->bo[i]));
787b9f19259SBoris Brezillon
788b9f19259SBoris Brezillon fail_put_bo:
789b9f19259SBoris Brezillon /* Release any reference to acquired objects. */
790b9f19259SBoris Brezillon for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
79147c07e46SMaíra Canal drm_gem_object_put(exec->bo[i]);
792b9f19259SBoris Brezillon
793b9f19259SBoris Brezillon kvfree(exec->bo);
794b9f19259SBoris Brezillon exec->bo = NULL;
795552416c1SEric Anholt return ret;
796d5b1a78aSEric Anholt }
797d5b1a78aSEric Anholt
798d5b1a78aSEric Anholt static int
vc4_get_bcl(struct drm_device * dev,struct vc4_exec_info * exec)799d5b1a78aSEric Anholt vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
800d5b1a78aSEric Anholt {
801d5b1a78aSEric Anholt struct drm_vc4_submit_cl *args = exec->args;
80235c8b4b2SPaul Kocialkowski struct vc4_dev *vc4 = to_vc4_dev(dev);
803d5b1a78aSEric Anholt void *temp = NULL;
804d5b1a78aSEric Anholt void *bin;
805d5b1a78aSEric Anholt int ret = 0;
806d5b1a78aSEric Anholt uint32_t bin_offset = 0;
807d5b1a78aSEric Anholt uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
808d5b1a78aSEric Anholt 16);
809d5b1a78aSEric Anholt uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
810d5b1a78aSEric Anholt uint32_t exec_size = uniforms_offset + args->uniforms_size;
811d5b1a78aSEric Anholt uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
812d5b1a78aSEric Anholt args->shader_rec_count);
813d5b1a78aSEric Anholt struct vc4_bo *bo;
814d5b1a78aSEric Anholt
8150f2ff82eSEric Anholt if (shader_rec_offset < args->bin_cl_size ||
8160f2ff82eSEric Anholt uniforms_offset < shader_rec_offset ||
817d5b1a78aSEric Anholt exec_size < uniforms_offset ||
818d5b1a78aSEric Anholt args->shader_rec_count >= (UINT_MAX /
819d5b1a78aSEric Anholt sizeof(struct vc4_shader_state)) ||
820d5b1a78aSEric Anholt temp_size < exec_size) {
821fb95992aSEric Anholt DRM_DEBUG("overflow in exec arguments\n");
8226b8ac638SEric Anholt ret = -EINVAL;
823d5b1a78aSEric Anholt goto fail;
824d5b1a78aSEric Anholt }
825d5b1a78aSEric Anholt
826d5b1a78aSEric Anholt /* Allocate space where we'll store the copied in user command lists
827d5b1a78aSEric Anholt * and shader records.
828d5b1a78aSEric Anholt *
829d5b1a78aSEric Anholt * We don't just copy directly into the BOs because we need to
830d5b1a78aSEric Anholt * read the contents back for validation, and I think the
831d5b1a78aSEric Anholt * bo->vaddr is uncached access.
832d5b1a78aSEric Anholt */
8332098105eSMichal Hocko temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
834d5b1a78aSEric Anholt if (!temp) {
835d5b1a78aSEric Anholt DRM_ERROR("Failed to allocate storage for copying "
836d5b1a78aSEric Anholt "in bin/render CLs.\n");
837d5b1a78aSEric Anholt ret = -ENOMEM;
838d5b1a78aSEric Anholt goto fail;
839d5b1a78aSEric Anholt }
840d5b1a78aSEric Anholt bin = temp + bin_offset;
841d5b1a78aSEric Anholt exec->shader_rec_u = temp + shader_rec_offset;
842d5b1a78aSEric Anholt exec->uniforms_u = temp + uniforms_offset;
843d5b1a78aSEric Anholt exec->shader_state = temp + exec_size;
844d5b1a78aSEric Anholt exec->shader_state_size = args->shader_rec_count;
845d5b1a78aSEric Anholt
84665c4777dSDan Carpenter if (copy_from_user(bin,
84795d7cbcbSEric Anholt u64_to_user_ptr(args->bin_cl),
84865c4777dSDan Carpenter args->bin_cl_size)) {
84965c4777dSDan Carpenter ret = -EFAULT;
850d5b1a78aSEric Anholt goto fail;
851d5b1a78aSEric Anholt }
852d5b1a78aSEric Anholt
85365c4777dSDan Carpenter if (copy_from_user(exec->shader_rec_u,
85495d7cbcbSEric Anholt u64_to_user_ptr(args->shader_rec),
85565c4777dSDan Carpenter args->shader_rec_size)) {
85665c4777dSDan Carpenter ret = -EFAULT;
857d5b1a78aSEric Anholt goto fail;
858d5b1a78aSEric Anholt }
859d5b1a78aSEric Anholt
86065c4777dSDan Carpenter if (copy_from_user(exec->uniforms_u,
86195d7cbcbSEric Anholt u64_to_user_ptr(args->uniforms),
86265c4777dSDan Carpenter args->uniforms_size)) {
86365c4777dSDan Carpenter ret = -EFAULT;
864d5b1a78aSEric Anholt goto fail;
865d5b1a78aSEric Anholt }
866d5b1a78aSEric Anholt
867f3099462SEric Anholt bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL);
8682c68f1fcSEric Anholt if (IS_ERR(bo)) {
869d5b1a78aSEric Anholt DRM_ERROR("Couldn't allocate BO for binning\n");
8702c68f1fcSEric Anholt ret = PTR_ERR(bo);
871d5b1a78aSEric Anholt goto fail;
872d5b1a78aSEric Anholt }
873d5b1a78aSEric Anholt exec->exec_bo = &bo->base;
874d5b1a78aSEric Anholt
875d5b1a78aSEric Anholt list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
876d5b1a78aSEric Anholt &exec->unref_list);
877d5b1a78aSEric Anholt
8788c30eeccSDanilo Krummrich exec->ct0ca = exec->exec_bo->dma_addr + bin_offset;
879d5b1a78aSEric Anholt
880d5b1a78aSEric Anholt exec->bin_u = bin;
881d5b1a78aSEric Anholt
882d5b1a78aSEric Anholt exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
8838c30eeccSDanilo Krummrich exec->shader_rec_p = exec->exec_bo->dma_addr + shader_rec_offset;
884d5b1a78aSEric Anholt exec->shader_rec_size = args->shader_rec_size;
885d5b1a78aSEric Anholt
886d5b1a78aSEric Anholt exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
8878c30eeccSDanilo Krummrich exec->uniforms_p = exec->exec_bo->dma_addr + uniforms_offset;
888d5b1a78aSEric Anholt exec->uniforms_size = args->uniforms_size;
889d5b1a78aSEric Anholt
890d5b1a78aSEric Anholt ret = vc4_validate_bin_cl(dev,
891d5b1a78aSEric Anholt exec->exec_bo->vaddr + bin_offset,
892d5b1a78aSEric Anholt bin,
893d5b1a78aSEric Anholt exec);
894d5b1a78aSEric Anholt if (ret)
895d5b1a78aSEric Anholt goto fail;
896d5b1a78aSEric Anholt
897d5b1a78aSEric Anholt ret = vc4_validate_shader_recs(dev, exec);
8987edabee0SEric Anholt if (ret)
8997edabee0SEric Anholt goto fail;
9007edabee0SEric Anholt
90135c8b4b2SPaul Kocialkowski if (exec->found_tile_binning_mode_config_packet) {
90235c8b4b2SPaul Kocialkowski ret = vc4_v3d_bin_bo_get(vc4, &exec->bin_bo_used);
90335c8b4b2SPaul Kocialkowski if (ret)
90435c8b4b2SPaul Kocialkowski goto fail;
90535c8b4b2SPaul Kocialkowski }
90635c8b4b2SPaul Kocialkowski
9077edabee0SEric Anholt /* Block waiting on any previous rendering into the CS's VBO,
9087edabee0SEric Anholt * IB, or textures, so that pixels are actually written by the
9097edabee0SEric Anholt * time we try to read them.
9107edabee0SEric Anholt */
9117edabee0SEric Anholt ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
912d5b1a78aSEric Anholt
913d5b1a78aSEric Anholt fail:
9142098105eSMichal Hocko kvfree(temp);
915d5b1a78aSEric Anholt return ret;
916d5b1a78aSEric Anholt }
917d5b1a78aSEric Anholt
918d5b1a78aSEric Anholt static void
vc4_complete_exec(struct drm_device * dev,struct vc4_exec_info * exec)919d5b1a78aSEric Anholt vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
920d5b1a78aSEric Anholt {
921001bdb55SEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev);
922553c942fSEric Anholt unsigned long irqflags;
923d5b1a78aSEric Anholt unsigned i;
924d5b1a78aSEric Anholt
925cdec4d36SEric Anholt /* If we got force-completed because of GPU reset rather than
926cdec4d36SEric Anholt * through our IRQ handler, signal the fence now.
927cdec4d36SEric Anholt */
928babc8110SStefan Schake if (exec->fence) {
929cdec4d36SEric Anholt dma_fence_signal(exec->fence);
930babc8110SStefan Schake dma_fence_put(exec->fence);
931babc8110SStefan Schake }
932cdec4d36SEric Anholt
933d5b1a78aSEric Anholt if (exec->bo) {
934b9f19259SBoris Brezillon for (i = 0; i < exec->bo_count; i++) {
93547c07e46SMaíra Canal struct vc4_bo *bo = to_vc4_bo(exec->bo[i]);
936b9f19259SBoris Brezillon
937b9f19259SBoris Brezillon vc4_bo_dec_usecnt(bo);
93847c07e46SMaíra Canal drm_gem_object_put(exec->bo[i]);
939b9f19259SBoris Brezillon }
9402098105eSMichal Hocko kvfree(exec->bo);
941d5b1a78aSEric Anholt }
942d5b1a78aSEric Anholt
943d5b1a78aSEric Anholt while (!list_empty(&exec->unref_list)) {
944d5b1a78aSEric Anholt struct vc4_bo *bo = list_first_entry(&exec->unref_list,
945d5b1a78aSEric Anholt struct vc4_bo, unref_head);
946d5b1a78aSEric Anholt list_del(&bo->unref_head);
947f7a8cd30SEmil Velikov drm_gem_object_put(&bo->base.base);
948d5b1a78aSEric Anholt }
949d5b1a78aSEric Anholt
950553c942fSEric Anholt /* Free up the allocation of any bin slots we used. */
951553c942fSEric Anholt spin_lock_irqsave(&vc4->job_lock, irqflags);
952553c942fSEric Anholt vc4->bin_alloc_used &= ~exec->bin_slots;
953553c942fSEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags);
954553c942fSEric Anholt
95535c8b4b2SPaul Kocialkowski /* Release the reference on the binner BO if needed. */
95635c8b4b2SPaul Kocialkowski if (exec->bin_bo_used)
95735c8b4b2SPaul Kocialkowski vc4_v3d_bin_bo_put(vc4);
95835c8b4b2SPaul Kocialkowski
95965101d8cSBoris Brezillon /* Release the reference we had on the perf monitor. */
96065101d8cSBoris Brezillon vc4_perfmon_put(exec->perfmon);
96165101d8cSBoris Brezillon
962cb74f6eeSEric Anholt vc4_v3d_pm_put(vc4);
963001bdb55SEric Anholt
964d5b1a78aSEric Anholt kfree(exec);
965d5b1a78aSEric Anholt }
966d5b1a78aSEric Anholt
967d5b1a78aSEric Anholt void
vc4_job_handle_completed(struct vc4_dev * vc4)968d5b1a78aSEric Anholt vc4_job_handle_completed(struct vc4_dev *vc4)
969d5b1a78aSEric Anholt {
970d5b1a78aSEric Anholt unsigned long irqflags;
971b501baccSEric Anholt struct vc4_seqno_cb *cb, *cb_temp;
972d5b1a78aSEric Anholt
97330f8c74cSMaxime Ripard if (WARN_ON_ONCE(vc4->is_vc5))
97430f8c74cSMaxime Ripard return;
97530f8c74cSMaxime Ripard
976d5b1a78aSEric Anholt spin_lock_irqsave(&vc4->job_lock, irqflags);
977d5b1a78aSEric Anholt while (!list_empty(&vc4->job_done_list)) {
978d5b1a78aSEric Anholt struct vc4_exec_info *exec =
979d5b1a78aSEric Anholt list_first_entry(&vc4->job_done_list,
980d5b1a78aSEric Anholt struct vc4_exec_info, head);
981d5b1a78aSEric Anholt list_del(&exec->head);
982d5b1a78aSEric Anholt
983d5b1a78aSEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags);
98484d7d472SMaxime Ripard vc4_complete_exec(&vc4->base, exec);
985d5b1a78aSEric Anholt spin_lock_irqsave(&vc4->job_lock, irqflags);
986d5b1a78aSEric Anholt }
987b501baccSEric Anholt
988b501baccSEric Anholt list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
989b501baccSEric Anholt if (cb->seqno <= vc4->finished_seqno) {
990b501baccSEric Anholt list_del_init(&cb->work.entry);
991b501baccSEric Anholt schedule_work(&cb->work);
992b501baccSEric Anholt }
993b501baccSEric Anholt }
994b501baccSEric Anholt
995d5b1a78aSEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags);
996d5b1a78aSEric Anholt }
997d5b1a78aSEric Anholt
vc4_seqno_cb_work(struct work_struct * work)998b501baccSEric Anholt static void vc4_seqno_cb_work(struct work_struct *work)
999b501baccSEric Anholt {
1000b501baccSEric Anholt struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
1001b501baccSEric Anholt
1002b501baccSEric Anholt cb->func(cb);
1003b501baccSEric Anholt }
1004b501baccSEric Anholt
vc4_queue_seqno_cb(struct drm_device * dev,struct vc4_seqno_cb * cb,uint64_t seqno,void (* func)(struct vc4_seqno_cb * cb))1005b501baccSEric Anholt int vc4_queue_seqno_cb(struct drm_device *dev,
1006b501baccSEric Anholt struct vc4_seqno_cb *cb, uint64_t seqno,
1007b501baccSEric Anholt void (*func)(struct vc4_seqno_cb *cb))
1008b501baccSEric Anholt {
1009b501baccSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev);
1010b501baccSEric Anholt unsigned long irqflags;
1011b501baccSEric Anholt
101230f8c74cSMaxime Ripard if (WARN_ON_ONCE(vc4->is_vc5))
101330f8c74cSMaxime Ripard return -ENODEV;
101430f8c74cSMaxime Ripard
1015b501baccSEric Anholt cb->func = func;
1016b501baccSEric Anholt INIT_WORK(&cb->work, vc4_seqno_cb_work);
1017b501baccSEric Anholt
1018b501baccSEric Anholt spin_lock_irqsave(&vc4->job_lock, irqflags);
1019b501baccSEric Anholt if (seqno > vc4->finished_seqno) {
1020b501baccSEric Anholt cb->seqno = seqno;
1021b501baccSEric Anholt list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
1022b501baccSEric Anholt } else {
1023b501baccSEric Anholt schedule_work(&cb->work);
1024b501baccSEric Anholt }
1025b501baccSEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1026b501baccSEric Anholt
1027439dde0aSBernard Zhao return 0;
1028b501baccSEric Anholt }
1029b501baccSEric Anholt
1030d5b1a78aSEric Anholt /* Scheduled when any job has been completed, this walks the list of
1031d5b1a78aSEric Anholt * jobs that had completed and unrefs their BOs and frees their exec
1032d5b1a78aSEric Anholt * structs.
1033d5b1a78aSEric Anholt */
1034d5b1a78aSEric Anholt static void
vc4_job_done_work(struct work_struct * work)1035d5b1a78aSEric Anholt vc4_job_done_work(struct work_struct *work)
1036d5b1a78aSEric Anholt {
1037d5b1a78aSEric Anholt struct vc4_dev *vc4 =
1038d5b1a78aSEric Anholt container_of(work, struct vc4_dev, job_done_work);
1039d5b1a78aSEric Anholt
1040d5b1a78aSEric Anholt vc4_job_handle_completed(vc4);
1041d5b1a78aSEric Anholt }
1042d5b1a78aSEric Anholt
1043d5b1a78aSEric Anholt static int
vc4_wait_for_seqno_ioctl_helper(struct drm_device * dev,uint64_t seqno,uint64_t * timeout_ns)1044d5b1a78aSEric Anholt vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
1045d5b1a78aSEric Anholt uint64_t seqno,
1046d5b1a78aSEric Anholt uint64_t *timeout_ns)
1047d5b1a78aSEric Anholt {
1048d5b1a78aSEric Anholt unsigned long start = jiffies;
1049d5b1a78aSEric Anholt int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
1050d5b1a78aSEric Anholt
1051d5b1a78aSEric Anholt if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
1052d5b1a78aSEric Anholt uint64_t delta = jiffies_to_nsecs(jiffies - start);
1053d5b1a78aSEric Anholt
1054d5b1a78aSEric Anholt if (*timeout_ns >= delta)
1055d5b1a78aSEric Anholt *timeout_ns -= delta;
1056d5b1a78aSEric Anholt }
1057d5b1a78aSEric Anholt
1058d5b1a78aSEric Anholt return ret;
1059d5b1a78aSEric Anholt }
1060d5b1a78aSEric Anholt
1061d5b1a78aSEric Anholt int
vc4_wait_seqno_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1062d5b1a78aSEric Anholt vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
1063d5b1a78aSEric Anholt struct drm_file *file_priv)
1064d5b1a78aSEric Anholt {
106530f8c74cSMaxime Ripard struct vc4_dev *vc4 = to_vc4_dev(dev);
1066d5b1a78aSEric Anholt struct drm_vc4_wait_seqno *args = data;
1067d5b1a78aSEric Anholt
106830f8c74cSMaxime Ripard if (WARN_ON_ONCE(vc4->is_vc5))
106930f8c74cSMaxime Ripard return -ENODEV;
107030f8c74cSMaxime Ripard
1071d5b1a78aSEric Anholt return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
1072d5b1a78aSEric Anholt &args->timeout_ns);
1073d5b1a78aSEric Anholt }
1074d5b1a78aSEric Anholt
1075d5b1a78aSEric Anholt int
vc4_wait_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1076d5b1a78aSEric Anholt vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
1077d5b1a78aSEric Anholt struct drm_file *file_priv)
1078d5b1a78aSEric Anholt {
107930f8c74cSMaxime Ripard struct vc4_dev *vc4 = to_vc4_dev(dev);
1080d5b1a78aSEric Anholt int ret;
1081d5b1a78aSEric Anholt struct drm_vc4_wait_bo *args = data;
1082d5b1a78aSEric Anholt struct drm_gem_object *gem_obj;
1083d5b1a78aSEric Anholt struct vc4_bo *bo;
1084d5b1a78aSEric Anholt
108530f8c74cSMaxime Ripard if (WARN_ON_ONCE(vc4->is_vc5))
108630f8c74cSMaxime Ripard return -ENODEV;
108730f8c74cSMaxime Ripard
1088e0015236SEric Anholt if (args->pad != 0)
1089e0015236SEric Anholt return -EINVAL;
1090e0015236SEric Anholt
1091a8ad0bd8SChris Wilson gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1092d5b1a78aSEric Anholt if (!gem_obj) {
1093fb95992aSEric Anholt DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1094d5b1a78aSEric Anholt return -EINVAL;
1095d5b1a78aSEric Anholt }
1096d5b1a78aSEric Anholt bo = to_vc4_bo(gem_obj);
1097d5b1a78aSEric Anholt
1098d5b1a78aSEric Anholt ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
1099d5b1a78aSEric Anholt &args->timeout_ns);
1100d5b1a78aSEric Anholt
1101f7a8cd30SEmil Velikov drm_gem_object_put(gem_obj);
1102d5b1a78aSEric Anholt return ret;
1103d5b1a78aSEric Anholt }
1104d5b1a78aSEric Anholt
1105d5b1a78aSEric Anholt /**
110672f793f1SEric Anholt * vc4_submit_cl_ioctl() - Submits a job (frame) to the VC4.
110772f793f1SEric Anholt * @dev: DRM device
110872f793f1SEric Anholt * @data: ioctl argument
110972f793f1SEric Anholt * @file_priv: DRM file for this fd
1110d5b1a78aSEric Anholt *
111172f793f1SEric Anholt * This is the main entrypoint for userspace to submit a 3D frame to
111272f793f1SEric Anholt * the GPU. Userspace provides the binner command list (if
111372f793f1SEric Anholt * applicable), and the kernel sets up the render command list to draw
111472f793f1SEric Anholt * to the framebuffer described in the ioctl, using the command lists
111572f793f1SEric Anholt * that the 3D engine's binner will produce.
1116d5b1a78aSEric Anholt */
1117d5b1a78aSEric Anholt int
vc4_submit_cl_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1118d5b1a78aSEric Anholt vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1119d5b1a78aSEric Anholt struct drm_file *file_priv)
1120d5b1a78aSEric Anholt {
1121d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev);
112265101d8cSBoris Brezillon struct vc4_file *vc4file = file_priv->driver_priv;
1123d5b1a78aSEric Anholt struct drm_vc4_submit_cl *args = data;
1124e84fcb95SStefan Schake struct drm_syncobj *out_sync = NULL;
1125d5b1a78aSEric Anholt struct vc4_exec_info *exec;
1126cdec4d36SEric Anholt struct ww_acquire_ctx acquire_ctx;
1127818f5c8fSStefan Schake struct dma_fence *in_fence;
112836cb6253SEric Anholt int ret = 0;
1129d5b1a78aSEric Anholt
1130044feb97SMelissa Wen trace_vc4_submit_cl_ioctl(dev, args->bin_cl_size,
1131044feb97SMelissa Wen args->shader_rec_size,
1132044feb97SMelissa Wen args->bo_handle_count);
1133044feb97SMelissa Wen
113430f8c74cSMaxime Ripard if (WARN_ON_ONCE(vc4->is_vc5))
113530f8c74cSMaxime Ripard return -ENODEV;
113630f8c74cSMaxime Ripard
1137ffc26740SEric Anholt if (!vc4->v3d) {
1138ffc26740SEric Anholt DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n");
1139ffc26740SEric Anholt return -ENODEV;
1140ffc26740SEric Anholt }
1141ffc26740SEric Anholt
11423be8edddSEric Anholt if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
11433be8edddSEric Anholt VC4_SUBMIT_CL_FIXED_RCL_ORDER |
11443be8edddSEric Anholt VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
11453be8edddSEric Anholt VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) {
1146fb95992aSEric Anholt DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags);
1147d5b1a78aSEric Anholt return -EINVAL;
1148d5b1a78aSEric Anholt }
1149d5b1a78aSEric Anholt
11504c70ac76SEric Anholt if (args->pad2 != 0) {
11514c70ac76SEric Anholt DRM_DEBUG("Invalid pad: 0x%08x\n", args->pad2);
11524c70ac76SEric Anholt return -EINVAL;
11534c70ac76SEric Anholt }
11544c70ac76SEric Anholt
1155d5b1a78aSEric Anholt exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
1156d5b1a78aSEric Anholt if (!exec) {
1157d5b1a78aSEric Anholt DRM_ERROR("malloc failure on exec struct\n");
1158d5b1a78aSEric Anholt return -ENOMEM;
1159d5b1a78aSEric Anholt }
116030f8c74cSMaxime Ripard exec->dev = vc4;
1161d5b1a78aSEric Anholt
1162cb74f6eeSEric Anholt ret = vc4_v3d_pm_get(vc4);
1163cb74f6eeSEric Anholt if (ret) {
1164001bdb55SEric Anholt kfree(exec);
1165001bdb55SEric Anholt return ret;
1166001bdb55SEric Anholt }
1167001bdb55SEric Anholt
1168d5b1a78aSEric Anholt exec->args = args;
1169d5b1a78aSEric Anholt INIT_LIST_HEAD(&exec->unref_list);
1170d5b1a78aSEric Anholt
1171d5b1a78aSEric Anholt ret = vc4_cl_lookup_bos(dev, file_priv, exec);
1172d5b1a78aSEric Anholt if (ret)
1173d5b1a78aSEric Anholt goto fail;
1174d5b1a78aSEric Anholt
117565101d8cSBoris Brezillon if (args->perfmonid) {
117665101d8cSBoris Brezillon exec->perfmon = vc4_perfmon_find(vc4file,
117765101d8cSBoris Brezillon args->perfmonid);
117865101d8cSBoris Brezillon if (!exec->perfmon) {
117965101d8cSBoris Brezillon ret = -ENOENT;
118065101d8cSBoris Brezillon goto fail;
118165101d8cSBoris Brezillon }
118265101d8cSBoris Brezillon }
118365101d8cSBoris Brezillon
1184818f5c8fSStefan Schake if (args->in_sync) {
1185818f5c8fSStefan Schake ret = drm_syncobj_find_fence(file_priv, args->in_sync,
1186649fdce2SChunming Zhou 0, 0, &in_fence);
1187818f5c8fSStefan Schake if (ret)
1188818f5c8fSStefan Schake goto fail;
1189818f5c8fSStefan Schake
1190818f5c8fSStefan Schake /* When the fence (or fence array) is exclusively from our
1191818f5c8fSStefan Schake * context we can skip the wait since jobs are executed in
1192818f5c8fSStefan Schake * order of their submission through this ioctl and this can
1193818f5c8fSStefan Schake * only have fences from a prior job.
1194818f5c8fSStefan Schake */
1195818f5c8fSStefan Schake if (!dma_fence_match_context(in_fence,
1196818f5c8fSStefan Schake vc4->dma_fence_context)) {
1197818f5c8fSStefan Schake ret = dma_fence_wait(in_fence, true);
1198818f5c8fSStefan Schake if (ret) {
1199818f5c8fSStefan Schake dma_fence_put(in_fence);
1200818f5c8fSStefan Schake goto fail;
1201818f5c8fSStefan Schake }
1202818f5c8fSStefan Schake }
1203818f5c8fSStefan Schake
1204818f5c8fSStefan Schake dma_fence_put(in_fence);
1205818f5c8fSStefan Schake }
1206818f5c8fSStefan Schake
1207d5b1a78aSEric Anholt if (exec->args->bin_cl_size != 0) {
1208d5b1a78aSEric Anholt ret = vc4_get_bcl(dev, exec);
1209d5b1a78aSEric Anholt if (ret)
1210d5b1a78aSEric Anholt goto fail;
1211d5b1a78aSEric Anholt } else {
1212d5b1a78aSEric Anholt exec->ct0ca = 0;
1213d5b1a78aSEric Anholt exec->ct0ea = 0;
1214d5b1a78aSEric Anholt }
1215d5b1a78aSEric Anholt
1216d5b1a78aSEric Anholt ret = vc4_get_rcl(dev, exec);
1217d5b1a78aSEric Anholt if (ret)
1218d5b1a78aSEric Anholt goto fail;
1219d5b1a78aSEric Anholt
1220cdec4d36SEric Anholt ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx);
1221cdec4d36SEric Anholt if (ret)
1222cdec4d36SEric Anholt goto fail;
1223cdec4d36SEric Anholt
1224e84fcb95SStefan Schake if (args->out_sync) {
1225e84fcb95SStefan Schake out_sync = drm_syncobj_find(file_priv, args->out_sync);
1226e84fcb95SStefan Schake if (!out_sync) {
1227e84fcb95SStefan Schake ret = -EINVAL;
1228e84fcb95SStefan Schake goto fail;
1229e84fcb95SStefan Schake }
1230e84fcb95SStefan Schake
1231e84fcb95SStefan Schake /* We replace the fence in out_sync in vc4_queue_submit since
1232e84fcb95SStefan Schake * the render job could execute immediately after that call.
1233e84fcb95SStefan Schake * If it finishes before our ioctl processing resumes the
1234e84fcb95SStefan Schake * render job fence could already have been freed.
1235e84fcb95SStefan Schake */
1236e84fcb95SStefan Schake }
1237e84fcb95SStefan Schake
1238d5b1a78aSEric Anholt /* Clear this out of the struct we'll be putting in the queue,
1239d5b1a78aSEric Anholt * since it's part of our stack.
1240d5b1a78aSEric Anholt */
1241d5b1a78aSEric Anholt exec->args = NULL;
1242d5b1a78aSEric Anholt
1243e84fcb95SStefan Schake ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync);
1244e84fcb95SStefan Schake
1245e84fcb95SStefan Schake /* The syncobj isn't part of the exec data and we need to free our
1246e84fcb95SStefan Schake * reference even if job submission failed.
1247e84fcb95SStefan Schake */
1248e84fcb95SStefan Schake if (out_sync)
1249e84fcb95SStefan Schake drm_syncobj_put(out_sync);
1250e84fcb95SStefan Schake
1251cdec4d36SEric Anholt if (ret)
1252cdec4d36SEric Anholt goto fail;
1253d5b1a78aSEric Anholt
1254d5b1a78aSEric Anholt /* Return the seqno for our job. */
1255d5b1a78aSEric Anholt args->seqno = vc4->emit_seqno;
1256d5b1a78aSEric Anholt
1257d5b1a78aSEric Anholt return 0;
1258d5b1a78aSEric Anholt
1259d5b1a78aSEric Anholt fail:
126084d7d472SMaxime Ripard vc4_complete_exec(&vc4->base, exec);
1261d5b1a78aSEric Anholt
1262d5b1a78aSEric Anholt return ret;
1263d5b1a78aSEric Anholt }
1264d5b1a78aSEric Anholt
1265171a072bSMaxime Ripard static void vc4_gem_destroy(struct drm_device *dev, void *unused);
vc4_gem_init(struct drm_device * dev)1266171a072bSMaxime Ripard int vc4_gem_init(struct drm_device *dev)
1267d5b1a78aSEric Anholt {
1268d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev);
1269374146caSMaxime Ripard int ret;
1270d5b1a78aSEric Anholt
127130f8c74cSMaxime Ripard if (WARN_ON_ONCE(vc4->is_vc5))
127230f8c74cSMaxime Ripard return -ENODEV;
127330f8c74cSMaxime Ripard
1274cdec4d36SEric Anholt vc4->dma_fence_context = dma_fence_context_alloc(1);
1275cdec4d36SEric Anholt
1276ca26d28bSVarad Gautam INIT_LIST_HEAD(&vc4->bin_job_list);
1277ca26d28bSVarad Gautam INIT_LIST_HEAD(&vc4->render_job_list);
1278d5b1a78aSEric Anholt INIT_LIST_HEAD(&vc4->job_done_list);
1279b501baccSEric Anholt INIT_LIST_HEAD(&vc4->seqno_cb_list);
1280d5b1a78aSEric Anholt spin_lock_init(&vc4->job_lock);
1281d5b1a78aSEric Anholt
1282d5b1a78aSEric Anholt INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
12830078730fSKees Cook timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
1284d5b1a78aSEric Anholt
1285d5b1a78aSEric Anholt INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
128636cb6253SEric Anholt
1287374146caSMaxime Ripard ret = drmm_mutex_init(dev, &vc4->power_lock);
1288374146caSMaxime Ripard if (ret)
1289374146caSMaxime Ripard return ret;
1290b9f19259SBoris Brezillon
1291b9f19259SBoris Brezillon INIT_LIST_HEAD(&vc4->purgeable.list);
1292374146caSMaxime Ripard
1293374146caSMaxime Ripard ret = drmm_mutex_init(dev, &vc4->purgeable.lock);
1294374146caSMaxime Ripard if (ret)
1295374146caSMaxime Ripard return ret;
1296171a072bSMaxime Ripard
1297171a072bSMaxime Ripard return drmm_add_action_or_reset(dev, vc4_gem_destroy, NULL);
1298d5b1a78aSEric Anholt }
1299d5b1a78aSEric Anholt
vc4_gem_destroy(struct drm_device * dev,void * unused)1300171a072bSMaxime Ripard static void vc4_gem_destroy(struct drm_device *dev, void *unused)
1301d5b1a78aSEric Anholt {
1302d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev);
1303d5b1a78aSEric Anholt
1304d5b1a78aSEric Anholt /* Waiting for exec to finish would need to be done before
1305d5b1a78aSEric Anholt * unregistering V3D.
1306d5b1a78aSEric Anholt */
1307d5b1a78aSEric Anholt WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
1308d5b1a78aSEric Anholt
1309d5b1a78aSEric Anholt /* V3D should already have disabled its interrupt and cleared
1310d5b1a78aSEric Anholt * the overflow allocation registers. Now free the object.
1311d5b1a78aSEric Anholt */
1312553c942fSEric Anholt if (vc4->bin_bo) {
1313f7a8cd30SEmil Velikov drm_gem_object_put(&vc4->bin_bo->base.base);
1314553c942fSEric Anholt vc4->bin_bo = NULL;
1315d5b1a78aSEric Anholt }
1316d5b1a78aSEric Anholt
131721461365SEric Anholt if (vc4->hang_state)
131821461365SEric Anholt vc4_free_hang_state(dev, vc4->hang_state);
1319d5b1a78aSEric Anholt }
1320b9f19259SBoris Brezillon
vc4_gem_madvise_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1321b9f19259SBoris Brezillon int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
1322b9f19259SBoris Brezillon struct drm_file *file_priv)
1323b9f19259SBoris Brezillon {
132430f8c74cSMaxime Ripard struct vc4_dev *vc4 = to_vc4_dev(dev);
1325b9f19259SBoris Brezillon struct drm_vc4_gem_madvise *args = data;
1326b9f19259SBoris Brezillon struct drm_gem_object *gem_obj;
1327b9f19259SBoris Brezillon struct vc4_bo *bo;
1328b9f19259SBoris Brezillon int ret;
1329b9f19259SBoris Brezillon
133030f8c74cSMaxime Ripard if (WARN_ON_ONCE(vc4->is_vc5))
133130f8c74cSMaxime Ripard return -ENODEV;
133230f8c74cSMaxime Ripard
1333b9f19259SBoris Brezillon switch (args->madv) {
1334b9f19259SBoris Brezillon case VC4_MADV_DONTNEED:
1335b9f19259SBoris Brezillon case VC4_MADV_WILLNEED:
1336b9f19259SBoris Brezillon break;
1337b9f19259SBoris Brezillon default:
1338b9f19259SBoris Brezillon return -EINVAL;
1339b9f19259SBoris Brezillon }
1340b9f19259SBoris Brezillon
1341b9f19259SBoris Brezillon if (args->pad != 0)
1342b9f19259SBoris Brezillon return -EINVAL;
1343b9f19259SBoris Brezillon
1344b9f19259SBoris Brezillon gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1345b9f19259SBoris Brezillon if (!gem_obj) {
1346b9f19259SBoris Brezillon DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1347b9f19259SBoris Brezillon return -ENOENT;
1348b9f19259SBoris Brezillon }
1349b9f19259SBoris Brezillon
1350b9f19259SBoris Brezillon bo = to_vc4_bo(gem_obj);
1351b9f19259SBoris Brezillon
1352b9f19259SBoris Brezillon /* Only BOs exposed to userspace can be purged. */
1353b9f19259SBoris Brezillon if (bo->madv == __VC4_MADV_NOTSUPP) {
1354b9f19259SBoris Brezillon DRM_DEBUG("madvise not supported on this BO\n");
1355b9f19259SBoris Brezillon ret = -EINVAL;
1356b9f19259SBoris Brezillon goto out_put_gem;
1357b9f19259SBoris Brezillon }
1358b9f19259SBoris Brezillon
1359b9f19259SBoris Brezillon /* Not sure it's safe to purge imported BOs. Let's just assume it's
1360b9f19259SBoris Brezillon * not until proven otherwise.
1361b9f19259SBoris Brezillon */
1362b9f19259SBoris Brezillon if (gem_obj->import_attach) {
1363b9f19259SBoris Brezillon DRM_DEBUG("madvise not supported on imported BOs\n");
1364b9f19259SBoris Brezillon ret = -EINVAL;
1365b9f19259SBoris Brezillon goto out_put_gem;
1366b9f19259SBoris Brezillon }
1367b9f19259SBoris Brezillon
1368b9f19259SBoris Brezillon mutex_lock(&bo->madv_lock);
1369b9f19259SBoris Brezillon
1370b9f19259SBoris Brezillon if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED &&
1371b9f19259SBoris Brezillon !refcount_read(&bo->usecnt)) {
1372b9f19259SBoris Brezillon /* If the BO is about to be marked as purgeable, is not used
1373b9f19259SBoris Brezillon * and is not already purgeable or purged, add it to the
1374b9f19259SBoris Brezillon * purgeable list.
1375b9f19259SBoris Brezillon */
1376b9f19259SBoris Brezillon vc4_bo_add_to_purgeable_pool(bo);
1377b9f19259SBoris Brezillon } else if (args->madv == VC4_MADV_WILLNEED &&
1378b9f19259SBoris Brezillon bo->madv == VC4_MADV_DONTNEED &&
1379b9f19259SBoris Brezillon !refcount_read(&bo->usecnt)) {
1380b9f19259SBoris Brezillon /* The BO has not been purged yet, just remove it from
1381b9f19259SBoris Brezillon * the purgeable list.
1382b9f19259SBoris Brezillon */
1383b9f19259SBoris Brezillon vc4_bo_remove_from_purgeable_pool(bo);
1384b9f19259SBoris Brezillon }
1385b9f19259SBoris Brezillon
1386b9f19259SBoris Brezillon /* Save the purged state. */
1387b9f19259SBoris Brezillon args->retained = bo->madv != __VC4_MADV_PURGED;
1388b9f19259SBoris Brezillon
1389b9f19259SBoris Brezillon /* Update internal madv state only if the bo was not purged. */
1390b9f19259SBoris Brezillon if (bo->madv != __VC4_MADV_PURGED)
1391b9f19259SBoris Brezillon bo->madv = args->madv;
1392b9f19259SBoris Brezillon
1393b9f19259SBoris Brezillon mutex_unlock(&bo->madv_lock);
1394b9f19259SBoris Brezillon
1395b9f19259SBoris Brezillon ret = 0;
1396b9f19259SBoris Brezillon
1397b9f19259SBoris Brezillon out_put_gem:
1398f7a8cd30SEmil Velikov drm_gem_object_put(gem_obj);
1399b9f19259SBoris Brezillon
1400b9f19259SBoris Brezillon return ret;
1401b9f19259SBoris Brezillon }
1402