xref: /openbmc/linux/drivers/gpu/drm/v3d/v3d_gem.c (revision c2b3e61a8df2da681ea291ca65c27a13685959bc)
157692c94SEric Anholt // SPDX-License-Identifier: GPL-2.0+
257692c94SEric Anholt /* Copyright (C) 2014-2018 Broadcom */
357692c94SEric Anholt 
457692c94SEric Anholt #include <drm/drmP.h>
557692c94SEric Anholt #include <drm/drm_syncobj.h>
657692c94SEric Anholt #include <linux/module.h>
757692c94SEric Anholt #include <linux/platform_device.h>
857692c94SEric Anholt #include <linux/pm_runtime.h>
9eea9b97bSEric Anholt #include <linux/reset.h>
1057692c94SEric Anholt #include <linux/device.h>
1157692c94SEric Anholt #include <linux/io.h>
1257692c94SEric Anholt #include <linux/sched/signal.h>
1357692c94SEric Anholt 
1457692c94SEric Anholt #include "uapi/drm/v3d_drm.h"
1557692c94SEric Anholt #include "v3d_drv.h"
1657692c94SEric Anholt #include "v3d_regs.h"
1757692c94SEric Anholt #include "v3d_trace.h"
1857692c94SEric Anholt 
1957692c94SEric Anholt static void
2057692c94SEric Anholt v3d_init_core(struct v3d_dev *v3d, int core)
2157692c94SEric Anholt {
2257692c94SEric Anholt 	/* Set OVRTMUOUT, which means that the texture sampler uniform
2357692c94SEric Anholt 	 * configuration's tmu output type field is used, instead of
2457692c94SEric Anholt 	 * using the hardware default behavior based on the texture
2557692c94SEric Anholt 	 * type.  If you want the default behavior, you can still put
2657692c94SEric Anholt 	 * "2" in the indirect texture state's output_type field.
2757692c94SEric Anholt 	 */
28a7dde1b7SEric Anholt 	if (v3d->ver < 40)
2957692c94SEric Anholt 		V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
3057692c94SEric Anholt 
3157692c94SEric Anholt 	/* Whenever we flush the L2T cache, we always want to flush
3257692c94SEric Anholt 	 * the whole thing.
3357692c94SEric Anholt 	 */
3457692c94SEric Anholt 	V3D_CORE_WRITE(core, V3D_CTL_L2TFLSTA, 0);
3557692c94SEric Anholt 	V3D_CORE_WRITE(core, V3D_CTL_L2TFLEND, ~0);
3657692c94SEric Anholt }
3757692c94SEric Anholt 
3857692c94SEric Anholt /* Sets invariant state for the HW. */
3957692c94SEric Anholt static void
4057692c94SEric Anholt v3d_init_hw_state(struct v3d_dev *v3d)
4157692c94SEric Anholt {
4257692c94SEric Anholt 	v3d_init_core(v3d, 0);
4357692c94SEric Anholt }
4457692c94SEric Anholt 
4557692c94SEric Anholt static void
4657692c94SEric Anholt v3d_idle_axi(struct v3d_dev *v3d, int core)
4757692c94SEric Anholt {
4857692c94SEric Anholt 	V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ);
4957692c94SEric Anholt 
5057692c94SEric Anholt 	if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) &
5157692c94SEric Anholt 		      (V3D_GMP_STATUS_RD_COUNT_MASK |
5257692c94SEric Anholt 		       V3D_GMP_STATUS_WR_COUNT_MASK |
5357692c94SEric Anholt 		       V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) {
5457692c94SEric Anholt 		DRM_ERROR("Failed to wait for safe GMP shutdown\n");
5557692c94SEric Anholt 	}
5657692c94SEric Anholt }
5757692c94SEric Anholt 
5857692c94SEric Anholt static void
5957692c94SEric Anholt v3d_idle_gca(struct v3d_dev *v3d)
6057692c94SEric Anholt {
6157692c94SEric Anholt 	if (v3d->ver >= 41)
6257692c94SEric Anholt 		return;
6357692c94SEric Anholt 
6457692c94SEric Anholt 	V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN);
6557692c94SEric Anholt 
6657692c94SEric Anholt 	if (wait_for((V3D_GCA_READ(V3D_GCA_SAFE_SHUTDOWN_ACK) &
6757692c94SEric Anholt 		      V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED) ==
6857692c94SEric Anholt 		     V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED, 100)) {
6957692c94SEric Anholt 		DRM_ERROR("Failed to wait for safe GCA shutdown\n");
7057692c94SEric Anholt 	}
7157692c94SEric Anholt }
7257692c94SEric Anholt 
7357692c94SEric Anholt static void
74eea9b97bSEric Anholt v3d_reset_by_bridge(struct v3d_dev *v3d)
7557692c94SEric Anholt {
7657692c94SEric Anholt 	int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION);
7757692c94SEric Anholt 
7857692c94SEric Anholt 	if (V3D_GET_FIELD(version, V3D_TOP_GR_BRIDGE_MAJOR) == 2) {
7957692c94SEric Anholt 		V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0,
8057692c94SEric Anholt 				 V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT);
8157692c94SEric Anholt 		V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 0);
8257692c94SEric Anholt 
8357692c94SEric Anholt 		/* GFXH-1383: The SW_INIT may cause a stray write to address 0
8457692c94SEric Anholt 		 * of the unit, so reset it to its power-on value here.
8557692c94SEric Anholt 		 */
8657692c94SEric Anholt 		V3D_WRITE(V3D_HUB_AXICFG, V3D_HUB_AXICFG_MAX_LEN_MASK);
8757692c94SEric Anholt 	} else {
8857692c94SEric Anholt 		WARN_ON_ONCE(V3D_GET_FIELD(version,
8957692c94SEric Anholt 					   V3D_TOP_GR_BRIDGE_MAJOR) != 7);
9057692c94SEric Anholt 		V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1,
9157692c94SEric Anholt 				 V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT);
9257692c94SEric Anholt 		V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0);
9357692c94SEric Anholt 	}
94eea9b97bSEric Anholt }
95eea9b97bSEric Anholt 
96eea9b97bSEric Anholt static void
97eea9b97bSEric Anholt v3d_reset_v3d(struct v3d_dev *v3d)
98eea9b97bSEric Anholt {
99eea9b97bSEric Anholt 	if (v3d->reset)
100eea9b97bSEric Anholt 		reset_control_reset(v3d->reset);
101eea9b97bSEric Anholt 	else
102eea9b97bSEric Anholt 		v3d_reset_by_bridge(v3d);
10357692c94SEric Anholt 
10457692c94SEric Anholt 	v3d_init_hw_state(v3d);
10557692c94SEric Anholt }
10657692c94SEric Anholt 
10757692c94SEric Anholt void
10857692c94SEric Anholt v3d_reset(struct v3d_dev *v3d)
10957692c94SEric Anholt {
11057692c94SEric Anholt 	struct drm_device *dev = &v3d->drm;
11157692c94SEric Anholt 
11257692c94SEric Anholt 	DRM_ERROR("Resetting GPU.\n");
11357692c94SEric Anholt 	trace_v3d_reset_begin(dev);
11457692c94SEric Anholt 
11557692c94SEric Anholt 	/* XXX: only needed for safe powerdown, not reset. */
11657692c94SEric Anholt 	if (false)
11757692c94SEric Anholt 		v3d_idle_axi(v3d, 0);
11857692c94SEric Anholt 
11957692c94SEric Anholt 	v3d_idle_gca(v3d);
12057692c94SEric Anholt 	v3d_reset_v3d(v3d);
12157692c94SEric Anholt 
12257692c94SEric Anholt 	v3d_mmu_set_page_table(v3d);
12357692c94SEric Anholt 	v3d_irq_reset(v3d);
12457692c94SEric Anholt 
12557692c94SEric Anholt 	trace_v3d_reset_end(dev);
12657692c94SEric Anholt }
12757692c94SEric Anholt 
12857692c94SEric Anholt static void
12957692c94SEric Anholt v3d_flush_l3(struct v3d_dev *v3d)
13057692c94SEric Anholt {
13157692c94SEric Anholt 	if (v3d->ver < 41) {
13257692c94SEric Anholt 		u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL);
13357692c94SEric Anholt 
13457692c94SEric Anholt 		V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
13557692c94SEric Anholt 			      gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH);
13657692c94SEric Anholt 
13757692c94SEric Anholt 		if (v3d->ver < 33) {
13857692c94SEric Anholt 			V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
13957692c94SEric Anholt 				      gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH);
14057692c94SEric Anholt 		}
14157692c94SEric Anholt 	}
14257692c94SEric Anholt }
14357692c94SEric Anholt 
1447b9d2fe4SEric Anholt /* Invalidates the (read-only) L2C cache.  This was the L2 cache for
1457b9d2fe4SEric Anholt  * uniforms and instructions on V3D 3.2.
1467b9d2fe4SEric Anholt  */
14757692c94SEric Anholt static void
1487b9d2fe4SEric Anholt v3d_invalidate_l2c(struct v3d_dev *v3d, int core)
14957692c94SEric Anholt {
1507b9d2fe4SEric Anholt 	if (v3d->ver > 32)
1517b9d2fe4SEric Anholt 		return;
1527b9d2fe4SEric Anholt 
15357692c94SEric Anholt 	V3D_CORE_WRITE(core, V3D_CTL_L2CACTL,
15457692c94SEric Anholt 		       V3D_L2CACTL_L2CCLR |
15557692c94SEric Anholt 		       V3D_L2CACTL_L2CENA);
15657692c94SEric Anholt }
15757692c94SEric Anholt 
15857692c94SEric Anholt /* Invalidates texture L2 cachelines */
15957692c94SEric Anholt static void
16057692c94SEric Anholt v3d_flush_l2t(struct v3d_dev *v3d, int core)
16157692c94SEric Anholt {
16251c1b6f9SEric Anholt 	/* While there is a busy bit (V3D_L2TCACTL_L2TFLS), we don't
16351c1b6f9SEric Anholt 	 * need to wait for completion before dispatching the job --
16451c1b6f9SEric Anholt 	 * L2T accesses will be stalled until the flush has completed.
16551c1b6f9SEric Anholt 	 */
16657692c94SEric Anholt 	V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
16757692c94SEric Anholt 		       V3D_L2TCACTL_L2TFLS |
16857692c94SEric Anholt 		       V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM));
16957692c94SEric Anholt }
17057692c94SEric Anholt 
17157692c94SEric Anholt /* Invalidates the slice caches.  These are read-only caches. */
17257692c94SEric Anholt static void
17357692c94SEric Anholt v3d_invalidate_slices(struct v3d_dev *v3d, int core)
17457692c94SEric Anholt {
17557692c94SEric Anholt 	V3D_CORE_WRITE(core, V3D_CTL_SLCACTL,
17657692c94SEric Anholt 		       V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS) |
17757692c94SEric Anholt 		       V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS) |
17857692c94SEric Anholt 		       V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
17957692c94SEric Anholt 		       V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC));
18057692c94SEric Anholt }
18157692c94SEric Anholt 
18257692c94SEric Anholt void
18357692c94SEric Anholt v3d_invalidate_caches(struct v3d_dev *v3d)
18457692c94SEric Anholt {
185aa5beec3SEric Anholt 	/* Invalidate the caches from the outside in.  That way if
186aa5beec3SEric Anholt 	 * another CL's concurrent use of nearby memory were to pull
187aa5beec3SEric Anholt 	 * an invalidated cacheline back in, we wouldn't leave stale
188aa5beec3SEric Anholt 	 * data in the inner cache.
189aa5beec3SEric Anholt 	 */
19057692c94SEric Anholt 	v3d_flush_l3(v3d);
1917b9d2fe4SEric Anholt 	v3d_invalidate_l2c(v3d, 0);
19257692c94SEric Anholt 	v3d_flush_l2t(v3d, 0);
193aa5beec3SEric Anholt 	v3d_invalidate_slices(v3d, 0);
19457692c94SEric Anholt }
19557692c94SEric Anholt 
19657692c94SEric Anholt static void
1971584f16cSEric Anholt v3d_attach_object_fences(struct v3d_bo **bos, int bo_count,
1981584f16cSEric Anholt 			 struct dma_fence *fence)
19957692c94SEric Anholt {
20057692c94SEric Anholt 	int i;
20157692c94SEric Anholt 
2021584f16cSEric Anholt 	for (i = 0; i < bo_count; i++) {
20357692c94SEric Anholt 		/* XXX: Use shared fences for read-only objects. */
2048d668309SRob Herring 		reservation_object_add_excl_fence(bos[i]->base.resv, fence);
20557692c94SEric Anholt 	}
20657692c94SEric Anholt }
20757692c94SEric Anholt 
20857692c94SEric Anholt static void
209e14a07fcSEric Anholt v3d_unlock_bo_reservations(struct v3d_bo **bos,
2101584f16cSEric Anholt 			   int bo_count,
21157692c94SEric Anholt 			   struct ww_acquire_ctx *acquire_ctx)
21257692c94SEric Anholt {
213*c2b3e61aSEric Anholt 	drm_gem_unlock_reservations((struct drm_gem_object **)bos, bo_count,
214*c2b3e61aSEric Anholt 				    acquire_ctx);
21557692c94SEric Anholt }
21657692c94SEric Anholt 
21757692c94SEric Anholt /* Takes the reservation lock on all the BOs being referenced, so that
21857692c94SEric Anholt  * at queue submit time we can update the reservations.
21957692c94SEric Anholt  *
22057692c94SEric Anholt  * We don't lock the RCL the tile alloc/state BOs, or overflow memory
22157692c94SEric Anholt  * (all of which are on exec->unref_list).  They're entirely private
22257692c94SEric Anholt  * to v3d, so we don't attach dma-buf fences to them.
22357692c94SEric Anholt  */
22457692c94SEric Anholt static int
225e14a07fcSEric Anholt v3d_lock_bo_reservations(struct v3d_bo **bos,
2261584f16cSEric Anholt 			 int bo_count,
22757692c94SEric Anholt 			 struct ww_acquire_ctx *acquire_ctx)
22857692c94SEric Anholt {
22957692c94SEric Anholt 	int i, ret;
23057692c94SEric Anholt 
231*c2b3e61aSEric Anholt 	ret = drm_gem_lock_reservations((struct drm_gem_object **)bos,
232*c2b3e61aSEric Anholt 					bo_count, acquire_ctx);
233*c2b3e61aSEric Anholt 	if (ret)
23457692c94SEric Anholt 		return ret;
23557692c94SEric Anholt 
23657692c94SEric Anholt 	/* Reserve space for our shared (read-only) fence references,
23757692c94SEric Anholt 	 * before we commit the CL to the hardware.
23857692c94SEric Anholt 	 */
2391584f16cSEric Anholt 	for (i = 0; i < bo_count; i++) {
2408d668309SRob Herring 		ret = reservation_object_reserve_shared(bos[i]->base.resv, 1);
24157692c94SEric Anholt 		if (ret) {
242e14a07fcSEric Anholt 			v3d_unlock_bo_reservations(bos, bo_count,
2431584f16cSEric Anholt 						   acquire_ctx);
24457692c94SEric Anholt 			return ret;
24557692c94SEric Anholt 		}
24657692c94SEric Anholt 	}
24757692c94SEric Anholt 
24857692c94SEric Anholt 	return 0;
24957692c94SEric Anholt }
25057692c94SEric Anholt 
25157692c94SEric Anholt /**
25257692c94SEric Anholt  * v3d_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
25357692c94SEric Anholt  * referenced by the job.
25457692c94SEric Anholt  * @dev: DRM device
25557692c94SEric Anholt  * @file_priv: DRM file for this fd
25657692c94SEric Anholt  * @exec: V3D job being set up
25757692c94SEric Anholt  *
25857692c94SEric Anholt  * The command validator needs to reference BOs by their index within
25957692c94SEric Anholt  * the submitted job's BO list.  This does the validation of the job's
26057692c94SEric Anholt  * BO list and reference counting for the lifetime of the job.
26157692c94SEric Anholt  *
26257692c94SEric Anholt  * Note that this function doesn't need to unreference the BOs on
26357692c94SEric Anholt  * failure, because that will happen at v3d_exec_cleanup() time.
26457692c94SEric Anholt  */
26557692c94SEric Anholt static int
26657692c94SEric Anholt v3d_cl_lookup_bos(struct drm_device *dev,
26757692c94SEric Anholt 		  struct drm_file *file_priv,
26857692c94SEric Anholt 		  struct drm_v3d_submit_cl *args,
26957692c94SEric Anholt 		  struct v3d_exec_info *exec)
27057692c94SEric Anholt {
27157692c94SEric Anholt 	u32 *handles;
27257692c94SEric Anholt 	int ret = 0;
27357692c94SEric Anholt 	int i;
27457692c94SEric Anholt 
27557692c94SEric Anholt 	exec->bo_count = args->bo_handle_count;
27657692c94SEric Anholt 
27757692c94SEric Anholt 	if (!exec->bo_count) {
27857692c94SEric Anholt 		/* See comment on bo_index for why we have to check
27957692c94SEric Anholt 		 * this.
28057692c94SEric Anholt 		 */
28157692c94SEric Anholt 		DRM_DEBUG("Rendering requires BOs\n");
28257692c94SEric Anholt 		return -EINVAL;
28357692c94SEric Anholt 	}
28457692c94SEric Anholt 
28557692c94SEric Anholt 	exec->bo = kvmalloc_array(exec->bo_count,
28657692c94SEric Anholt 				  sizeof(struct drm_gem_cma_object *),
28757692c94SEric Anholt 				  GFP_KERNEL | __GFP_ZERO);
28857692c94SEric Anholt 	if (!exec->bo) {
28957692c94SEric Anholt 		DRM_DEBUG("Failed to allocate validated BO pointers\n");
29057692c94SEric Anholt 		return -ENOMEM;
29157692c94SEric Anholt 	}
29257692c94SEric Anholt 
29357692c94SEric Anholt 	handles = kvmalloc_array(exec->bo_count, sizeof(u32), GFP_KERNEL);
29457692c94SEric Anholt 	if (!handles) {
29557692c94SEric Anholt 		ret = -ENOMEM;
29657692c94SEric Anholt 		DRM_DEBUG("Failed to allocate incoming GEM handles\n");
29757692c94SEric Anholt 		goto fail;
29857692c94SEric Anholt 	}
29957692c94SEric Anholt 
30057692c94SEric Anholt 	if (copy_from_user(handles,
30157692c94SEric Anholt 			   (void __user *)(uintptr_t)args->bo_handles,
30257692c94SEric Anholt 			   exec->bo_count * sizeof(u32))) {
30357692c94SEric Anholt 		ret = -EFAULT;
30457692c94SEric Anholt 		DRM_DEBUG("Failed to copy in GEM handles\n");
30557692c94SEric Anholt 		goto fail;
30657692c94SEric Anholt 	}
30757692c94SEric Anholt 
30857692c94SEric Anholt 	spin_lock(&file_priv->table_lock);
30957692c94SEric Anholt 	for (i = 0; i < exec->bo_count; i++) {
31057692c94SEric Anholt 		struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
31157692c94SEric Anholt 						     handles[i]);
31257692c94SEric Anholt 		if (!bo) {
31357692c94SEric Anholt 			DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
31457692c94SEric Anholt 				  i, handles[i]);
31557692c94SEric Anholt 			ret = -ENOENT;
31657692c94SEric Anholt 			spin_unlock(&file_priv->table_lock);
31757692c94SEric Anholt 			goto fail;
31857692c94SEric Anholt 		}
31957692c94SEric Anholt 		drm_gem_object_get(bo);
32057692c94SEric Anholt 		exec->bo[i] = to_v3d_bo(bo);
32157692c94SEric Anholt 	}
32257692c94SEric Anholt 	spin_unlock(&file_priv->table_lock);
32357692c94SEric Anholt 
32457692c94SEric Anholt fail:
32557692c94SEric Anholt 	kvfree(handles);
32657692c94SEric Anholt 	return ret;
32757692c94SEric Anholt }
32857692c94SEric Anholt 
32957692c94SEric Anholt static void
33057692c94SEric Anholt v3d_exec_cleanup(struct kref *ref)
33157692c94SEric Anholt {
33257692c94SEric Anholt 	struct v3d_exec_info *exec = container_of(ref, struct v3d_exec_info,
33357692c94SEric Anholt 						  refcount);
33457692c94SEric Anholt 	struct v3d_dev *v3d = exec->v3d;
33557692c94SEric Anholt 	unsigned int i;
33657692c94SEric Anholt 	struct v3d_bo *bo, *save;
33757692c94SEric Anholt 
33857692c94SEric Anholt 	dma_fence_put(exec->bin.in_fence);
33957692c94SEric Anholt 	dma_fence_put(exec->render.in_fence);
34057692c94SEric Anholt 
34157692c94SEric Anholt 	dma_fence_put(exec->bin.done_fence);
34257692c94SEric Anholt 	dma_fence_put(exec->render.done_fence);
34357692c94SEric Anholt 
34457692c94SEric Anholt 	dma_fence_put(exec->bin_done_fence);
34534c2c4f6SEric Anholt 	dma_fence_put(exec->render_done_fence);
34657692c94SEric Anholt 
34757692c94SEric Anholt 	for (i = 0; i < exec->bo_count; i++)
34857692c94SEric Anholt 		drm_gem_object_put_unlocked(&exec->bo[i]->base);
34957692c94SEric Anholt 	kvfree(exec->bo);
35057692c94SEric Anholt 
35157692c94SEric Anholt 	list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) {
35257692c94SEric Anholt 		drm_gem_object_put_unlocked(&bo->base);
35357692c94SEric Anholt 	}
35457692c94SEric Anholt 
35557692c94SEric Anholt 	pm_runtime_mark_last_busy(v3d->dev);
35657692c94SEric Anholt 	pm_runtime_put_autosuspend(v3d->dev);
35757692c94SEric Anholt 
35857692c94SEric Anholt 	kfree(exec);
35957692c94SEric Anholt }
36057692c94SEric Anholt 
36157692c94SEric Anholt void v3d_exec_put(struct v3d_exec_info *exec)
36257692c94SEric Anholt {
36357692c94SEric Anholt 	kref_put(&exec->refcount, v3d_exec_cleanup);
36457692c94SEric Anholt }
36557692c94SEric Anholt 
3661584f16cSEric Anholt static void
3671584f16cSEric Anholt v3d_tfu_job_cleanup(struct kref *ref)
3681584f16cSEric Anholt {
3691584f16cSEric Anholt 	struct v3d_tfu_job *job = container_of(ref, struct v3d_tfu_job,
3701584f16cSEric Anholt 					       refcount);
3711584f16cSEric Anholt 	struct v3d_dev *v3d = job->v3d;
3721584f16cSEric Anholt 	unsigned int i;
3731584f16cSEric Anholt 
3741584f16cSEric Anholt 	dma_fence_put(job->in_fence);
3751584f16cSEric Anholt 	dma_fence_put(job->done_fence);
3761584f16cSEric Anholt 
3771584f16cSEric Anholt 	for (i = 0; i < ARRAY_SIZE(job->bo); i++) {
3781584f16cSEric Anholt 		if (job->bo[i])
3791584f16cSEric Anholt 			drm_gem_object_put_unlocked(&job->bo[i]->base);
3801584f16cSEric Anholt 	}
3811584f16cSEric Anholt 
3821584f16cSEric Anholt 	pm_runtime_mark_last_busy(v3d->dev);
3831584f16cSEric Anholt 	pm_runtime_put_autosuspend(v3d->dev);
3841584f16cSEric Anholt 
3851584f16cSEric Anholt 	kfree(job);
3861584f16cSEric Anholt }
3871584f16cSEric Anholt 
3881584f16cSEric Anholt void v3d_tfu_job_put(struct v3d_tfu_job *job)
3891584f16cSEric Anholt {
3901584f16cSEric Anholt 	kref_put(&job->refcount, v3d_tfu_job_cleanup);
3911584f16cSEric Anholt }
3921584f16cSEric Anholt 
39357692c94SEric Anholt int
39457692c94SEric Anholt v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
39557692c94SEric Anholt 		  struct drm_file *file_priv)
39657692c94SEric Anholt {
39757692c94SEric Anholt 	int ret;
39857692c94SEric Anholt 	struct drm_v3d_wait_bo *args = data;
39957692c94SEric Anholt 	ktime_t start = ktime_get();
40057692c94SEric Anholt 	u64 delta_ns;
40157692c94SEric Anholt 	unsigned long timeout_jiffies =
40257692c94SEric Anholt 		nsecs_to_jiffies_timeout(args->timeout_ns);
40357692c94SEric Anholt 
40457692c94SEric Anholt 	if (args->pad != 0)
40557692c94SEric Anholt 		return -EINVAL;
40657692c94SEric Anholt 
4078d668309SRob Herring 	ret = drm_gem_reservation_object_wait(file_priv, args->handle,
4088d668309SRob Herring 					      true, timeout_jiffies);
40957692c94SEric Anholt 
41057692c94SEric Anholt 	/* Decrement the user's timeout, in case we got interrupted
41157692c94SEric Anholt 	 * such that the ioctl will be restarted.
41257692c94SEric Anholt 	 */
41357692c94SEric Anholt 	delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
41457692c94SEric Anholt 	if (delta_ns < args->timeout_ns)
41557692c94SEric Anholt 		args->timeout_ns -= delta_ns;
41657692c94SEric Anholt 	else
41757692c94SEric Anholt 		args->timeout_ns = 0;
41857692c94SEric Anholt 
41957692c94SEric Anholt 	/* Asked to wait beyond the jiffie/scheduler precision? */
42057692c94SEric Anholt 	if (ret == -ETIME && args->timeout_ns)
42157692c94SEric Anholt 		ret = -EAGAIN;
42257692c94SEric Anholt 
42357692c94SEric Anholt 	return ret;
42457692c94SEric Anholt }
42557692c94SEric Anholt 
42657692c94SEric Anholt /**
42757692c94SEric Anholt  * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
42857692c94SEric Anholt  * @dev: DRM device
42957692c94SEric Anholt  * @data: ioctl argument
43057692c94SEric Anholt  * @file_priv: DRM file for this fd
43157692c94SEric Anholt  *
43257692c94SEric Anholt  * This is the main entrypoint for userspace to submit a 3D frame to
43357692c94SEric Anholt  * the GPU.  Userspace provides the binner command list (if
43457692c94SEric Anholt  * applicable), and the kernel sets up the render command list to draw
43557692c94SEric Anholt  * to the framebuffer described in the ioctl, using the command lists
43657692c94SEric Anholt  * that the 3D engine's binner will produce.
43757692c94SEric Anholt  */
43857692c94SEric Anholt int
43957692c94SEric Anholt v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
44057692c94SEric Anholt 		    struct drm_file *file_priv)
44157692c94SEric Anholt {
44257692c94SEric Anholt 	struct v3d_dev *v3d = to_v3d_dev(dev);
44357692c94SEric Anholt 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
44457692c94SEric Anholt 	struct drm_v3d_submit_cl *args = data;
44557692c94SEric Anholt 	struct v3d_exec_info *exec;
44657692c94SEric Anholt 	struct ww_acquire_ctx acquire_ctx;
44757692c94SEric Anholt 	struct drm_syncobj *sync_out;
44857692c94SEric Anholt 	int ret = 0;
44957692c94SEric Anholt 
45055a9b748SEric Anholt 	trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
45155a9b748SEric Anholt 
45257692c94SEric Anholt 	if (args->pad != 0) {
45357692c94SEric Anholt 		DRM_INFO("pad must be zero: %d\n", args->pad);
45457692c94SEric Anholt 		return -EINVAL;
45557692c94SEric Anholt 	}
45657692c94SEric Anholt 
45757692c94SEric Anholt 	exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
45857692c94SEric Anholt 	if (!exec)
45957692c94SEric Anholt 		return -ENOMEM;
46057692c94SEric Anholt 
46157692c94SEric Anholt 	ret = pm_runtime_get_sync(v3d->dev);
46257692c94SEric Anholt 	if (ret < 0) {
46357692c94SEric Anholt 		kfree(exec);
46457692c94SEric Anholt 		return ret;
46557692c94SEric Anholt 	}
46657692c94SEric Anholt 
46757692c94SEric Anholt 	kref_init(&exec->refcount);
46857692c94SEric Anholt 
46957692c94SEric Anholt 	ret = drm_syncobj_find_fence(file_priv, args->in_sync_bcl,
470649fdce2SChunming Zhou 				     0, 0, &exec->bin.in_fence);
47157692c94SEric Anholt 	if (ret == -EINVAL)
47257692c94SEric Anholt 		goto fail;
47357692c94SEric Anholt 
47457692c94SEric Anholt 	ret = drm_syncobj_find_fence(file_priv, args->in_sync_rcl,
475649fdce2SChunming Zhou 				     0, 0, &exec->render.in_fence);
47657692c94SEric Anholt 	if (ret == -EINVAL)
47757692c94SEric Anholt 		goto fail;
47857692c94SEric Anholt 
47957692c94SEric Anholt 	exec->qma = args->qma;
48057692c94SEric Anholt 	exec->qms = args->qms;
48157692c94SEric Anholt 	exec->qts = args->qts;
48257692c94SEric Anholt 	exec->bin.exec = exec;
48357692c94SEric Anholt 	exec->bin.start = args->bcl_start;
48457692c94SEric Anholt 	exec->bin.end = args->bcl_end;
48557692c94SEric Anholt 	exec->render.exec = exec;
48657692c94SEric Anholt 	exec->render.start = args->rcl_start;
48757692c94SEric Anholt 	exec->render.end = args->rcl_end;
48857692c94SEric Anholt 	exec->v3d = v3d;
48957692c94SEric Anholt 	INIT_LIST_HEAD(&exec->unref_list);
49057692c94SEric Anholt 
49157692c94SEric Anholt 	ret = v3d_cl_lookup_bos(dev, file_priv, args, exec);
49257692c94SEric Anholt 	if (ret)
49357692c94SEric Anholt 		goto fail;
49457692c94SEric Anholt 
495e14a07fcSEric Anholt 	ret = v3d_lock_bo_reservations(exec->bo, exec->bo_count,
4961584f16cSEric Anholt 				       &acquire_ctx);
49757692c94SEric Anholt 	if (ret)
49857692c94SEric Anholt 		goto fail;
49957692c94SEric Anholt 
5007122b68bSEric Anholt 	mutex_lock(&v3d->sched_lock);
50157692c94SEric Anholt 	if (exec->bin.start != exec->bin.end) {
50257692c94SEric Anholt 		ret = drm_sched_job_init(&exec->bin.base,
50357692c94SEric Anholt 					 &v3d_priv->sched_entity[V3D_BIN],
50457692c94SEric Anholt 					 v3d_priv);
50557692c94SEric Anholt 		if (ret)
50657692c94SEric Anholt 			goto fail_unreserve;
50757692c94SEric Anholt 
50857692c94SEric Anholt 		exec->bin_done_fence =
50957692c94SEric Anholt 			dma_fence_get(&exec->bin.base.s_fence->finished);
51057692c94SEric Anholt 
51157692c94SEric Anholt 		kref_get(&exec->refcount); /* put by scheduler job completion */
51257692c94SEric Anholt 		drm_sched_entity_push_job(&exec->bin.base,
51357692c94SEric Anholt 					  &v3d_priv->sched_entity[V3D_BIN]);
51457692c94SEric Anholt 	}
51557692c94SEric Anholt 
51657692c94SEric Anholt 	ret = drm_sched_job_init(&exec->render.base,
51757692c94SEric Anholt 				 &v3d_priv->sched_entity[V3D_RENDER],
51857692c94SEric Anholt 				 v3d_priv);
51957692c94SEric Anholt 	if (ret)
52057692c94SEric Anholt 		goto fail_unreserve;
52157692c94SEric Anholt 
52234c2c4f6SEric Anholt 	exec->render_done_fence =
52334c2c4f6SEric Anholt 		dma_fence_get(&exec->render.base.s_fence->finished);
52434c2c4f6SEric Anholt 
52557692c94SEric Anholt 	kref_get(&exec->refcount); /* put by scheduler job completion */
52657692c94SEric Anholt 	drm_sched_entity_push_job(&exec->render.base,
52757692c94SEric Anholt 				  &v3d_priv->sched_entity[V3D_RENDER]);
5287122b68bSEric Anholt 	mutex_unlock(&v3d->sched_lock);
52957692c94SEric Anholt 
5301584f16cSEric Anholt 	v3d_attach_object_fences(exec->bo, exec->bo_count,
5311584f16cSEric Anholt 				 exec->render_done_fence);
53257692c94SEric Anholt 
533e14a07fcSEric Anholt 	v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx);
53457692c94SEric Anholt 
53557692c94SEric Anholt 	/* Update the return sync object for the */
53657692c94SEric Anholt 	sync_out = drm_syncobj_find(file_priv, args->out_sync);
53757692c94SEric Anholt 	if (sync_out) {
5380b258ed1SChristian König 		drm_syncobj_replace_fence(sync_out, exec->render_done_fence);
53957692c94SEric Anholt 		drm_syncobj_put(sync_out);
54057692c94SEric Anholt 	}
54157692c94SEric Anholt 
54257692c94SEric Anholt 	v3d_exec_put(exec);
54357692c94SEric Anholt 
54457692c94SEric Anholt 	return 0;
54557692c94SEric Anholt 
54657692c94SEric Anholt fail_unreserve:
5477122b68bSEric Anholt 	mutex_unlock(&v3d->sched_lock);
548e14a07fcSEric Anholt 	v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx);
54957692c94SEric Anholt fail:
55057692c94SEric Anholt 	v3d_exec_put(exec);
55157692c94SEric Anholt 
55257692c94SEric Anholt 	return ret;
55357692c94SEric Anholt }
55457692c94SEric Anholt 
5551584f16cSEric Anholt /**
5561584f16cSEric Anholt  * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
5571584f16cSEric Anholt  * @dev: DRM device
5581584f16cSEric Anholt  * @data: ioctl argument
5591584f16cSEric Anholt  * @file_priv: DRM file for this fd
5601584f16cSEric Anholt  *
5611584f16cSEric Anholt  * Userspace provides the register setup for the TFU, which we don't
5621584f16cSEric Anholt  * need to validate since the TFU is behind the MMU.
5631584f16cSEric Anholt  */
5641584f16cSEric Anholt int
5651584f16cSEric Anholt v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
5661584f16cSEric Anholt 		     struct drm_file *file_priv)
5671584f16cSEric Anholt {
5681584f16cSEric Anholt 	struct v3d_dev *v3d = to_v3d_dev(dev);
5691584f16cSEric Anholt 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
5701584f16cSEric Anholt 	struct drm_v3d_submit_tfu *args = data;
5711584f16cSEric Anholt 	struct v3d_tfu_job *job;
5721584f16cSEric Anholt 	struct ww_acquire_ctx acquire_ctx;
5731584f16cSEric Anholt 	struct drm_syncobj *sync_out;
5741584f16cSEric Anholt 	struct dma_fence *sched_done_fence;
5751584f16cSEric Anholt 	int ret = 0;
5761584f16cSEric Anholt 	int bo_count;
5771584f16cSEric Anholt 
57855a9b748SEric Anholt 	trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
57955a9b748SEric Anholt 
5801584f16cSEric Anholt 	job = kcalloc(1, sizeof(*job), GFP_KERNEL);
5811584f16cSEric Anholt 	if (!job)
5821584f16cSEric Anholt 		return -ENOMEM;
5831584f16cSEric Anholt 
5841584f16cSEric Anholt 	ret = pm_runtime_get_sync(v3d->dev);
5851584f16cSEric Anholt 	if (ret < 0) {
5861584f16cSEric Anholt 		kfree(job);
5871584f16cSEric Anholt 		return ret;
5881584f16cSEric Anholt 	}
5891584f16cSEric Anholt 
5901584f16cSEric Anholt 	kref_init(&job->refcount);
5911584f16cSEric Anholt 
5921584f16cSEric Anholt 	ret = drm_syncobj_find_fence(file_priv, args->in_sync,
5931584f16cSEric Anholt 				     0, 0, &job->in_fence);
5941584f16cSEric Anholt 	if (ret == -EINVAL)
5951584f16cSEric Anholt 		goto fail;
5961584f16cSEric Anholt 
5971584f16cSEric Anholt 	job->args = *args;
5981584f16cSEric Anholt 	job->v3d = v3d;
5991584f16cSEric Anholt 
6001584f16cSEric Anholt 	spin_lock(&file_priv->table_lock);
6011584f16cSEric Anholt 	for (bo_count = 0; bo_count < ARRAY_SIZE(job->bo); bo_count++) {
6021584f16cSEric Anholt 		struct drm_gem_object *bo;
6031584f16cSEric Anholt 
6041584f16cSEric Anholt 		if (!args->bo_handles[bo_count])
6051584f16cSEric Anholt 			break;
6061584f16cSEric Anholt 
6071584f16cSEric Anholt 		bo = idr_find(&file_priv->object_idr,
6081584f16cSEric Anholt 			      args->bo_handles[bo_count]);
6091584f16cSEric Anholt 		if (!bo) {
6101584f16cSEric Anholt 			DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
6111584f16cSEric Anholt 				  bo_count, args->bo_handles[bo_count]);
6121584f16cSEric Anholt 			ret = -ENOENT;
6131584f16cSEric Anholt 			spin_unlock(&file_priv->table_lock);
6141584f16cSEric Anholt 			goto fail;
6151584f16cSEric Anholt 		}
6161584f16cSEric Anholt 		drm_gem_object_get(bo);
6171584f16cSEric Anholt 		job->bo[bo_count] = to_v3d_bo(bo);
6181584f16cSEric Anholt 	}
6191584f16cSEric Anholt 	spin_unlock(&file_priv->table_lock);
6201584f16cSEric Anholt 
621e14a07fcSEric Anholt 	ret = v3d_lock_bo_reservations(job->bo, bo_count, &acquire_ctx);
6221584f16cSEric Anholt 	if (ret)
6231584f16cSEric Anholt 		goto fail;
6241584f16cSEric Anholt 
6251584f16cSEric Anholt 	mutex_lock(&v3d->sched_lock);
6261584f16cSEric Anholt 	ret = drm_sched_job_init(&job->base,
6271584f16cSEric Anholt 				 &v3d_priv->sched_entity[V3D_TFU],
6281584f16cSEric Anholt 				 v3d_priv);
6291584f16cSEric Anholt 	if (ret)
6301584f16cSEric Anholt 		goto fail_unreserve;
6311584f16cSEric Anholt 
6321584f16cSEric Anholt 	sched_done_fence = dma_fence_get(&job->base.s_fence->finished);
6331584f16cSEric Anholt 
6341584f16cSEric Anholt 	kref_get(&job->refcount); /* put by scheduler job completion */
6351584f16cSEric Anholt 	drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[V3D_TFU]);
6361584f16cSEric Anholt 	mutex_unlock(&v3d->sched_lock);
6371584f16cSEric Anholt 
6381584f16cSEric Anholt 	v3d_attach_object_fences(job->bo, bo_count, sched_done_fence);
6391584f16cSEric Anholt 
640e14a07fcSEric Anholt 	v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx);
6411584f16cSEric Anholt 
6421584f16cSEric Anholt 	/* Update the return sync object */
6431584f16cSEric Anholt 	sync_out = drm_syncobj_find(file_priv, args->out_sync);
6441584f16cSEric Anholt 	if (sync_out) {
6452312f984SChristian König 		drm_syncobj_replace_fence(sync_out, sched_done_fence);
6461584f16cSEric Anholt 		drm_syncobj_put(sync_out);
6471584f16cSEric Anholt 	}
6481584f16cSEric Anholt 	dma_fence_put(sched_done_fence);
6491584f16cSEric Anholt 
6501584f16cSEric Anholt 	v3d_tfu_job_put(job);
6511584f16cSEric Anholt 
6521584f16cSEric Anholt 	return 0;
6531584f16cSEric Anholt 
6541584f16cSEric Anholt fail_unreserve:
6551584f16cSEric Anholt 	mutex_unlock(&v3d->sched_lock);
656e14a07fcSEric Anholt 	v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx);
6571584f16cSEric Anholt fail:
6581584f16cSEric Anholt 	v3d_tfu_job_put(job);
6591584f16cSEric Anholt 
6601584f16cSEric Anholt 	return ret;
6611584f16cSEric Anholt }
6621584f16cSEric Anholt 
66357692c94SEric Anholt int
66457692c94SEric Anholt v3d_gem_init(struct drm_device *dev)
66557692c94SEric Anholt {
66657692c94SEric Anholt 	struct v3d_dev *v3d = to_v3d_dev(dev);
66757692c94SEric Anholt 	u32 pt_size = 4096 * 1024;
66857692c94SEric Anholt 	int ret, i;
66957692c94SEric Anholt 
67057692c94SEric Anholt 	for (i = 0; i < V3D_MAX_QUEUES; i++)
67157692c94SEric Anholt 		v3d->queue[i].fence_context = dma_fence_context_alloc(1);
67257692c94SEric Anholt 
67357692c94SEric Anholt 	spin_lock_init(&v3d->mm_lock);
67457692c94SEric Anholt 	spin_lock_init(&v3d->job_lock);
67557692c94SEric Anholt 	mutex_init(&v3d->bo_lock);
67657692c94SEric Anholt 	mutex_init(&v3d->reset_lock);
6777122b68bSEric Anholt 	mutex_init(&v3d->sched_lock);
67857692c94SEric Anholt 
67957692c94SEric Anholt 	/* Note: We don't allocate address 0.  Various bits of HW
68057692c94SEric Anholt 	 * treat 0 as special, such as the occlusion query counters
68157692c94SEric Anholt 	 * where 0 means "disabled".
68257692c94SEric Anholt 	 */
68357692c94SEric Anholt 	drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1);
68457692c94SEric Anholt 
68557692c94SEric Anholt 	v3d->pt = dma_alloc_wc(v3d->dev, pt_size,
68657692c94SEric Anholt 			       &v3d->pt_paddr,
68757692c94SEric Anholt 			       GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
68857692c94SEric Anholt 	if (!v3d->pt) {
68957692c94SEric Anholt 		drm_mm_takedown(&v3d->mm);
69057692c94SEric Anholt 		dev_err(v3d->dev,
69157692c94SEric Anholt 			"Failed to allocate page tables. "
69257692c94SEric Anholt 			"Please ensure you have CMA enabled.\n");
69357692c94SEric Anholt 		return -ENOMEM;
69457692c94SEric Anholt 	}
69557692c94SEric Anholt 
69657692c94SEric Anholt 	v3d_init_hw_state(v3d);
69757692c94SEric Anholt 	v3d_mmu_set_page_table(v3d);
69857692c94SEric Anholt 
69957692c94SEric Anholt 	ret = v3d_sched_init(v3d);
70057692c94SEric Anholt 	if (ret) {
70157692c94SEric Anholt 		drm_mm_takedown(&v3d->mm);
70257692c94SEric Anholt 		dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt,
70357692c94SEric Anholt 				  v3d->pt_paddr);
70457692c94SEric Anholt 	}
70557692c94SEric Anholt 
70657692c94SEric Anholt 	return 0;
70757692c94SEric Anholt }
70857692c94SEric Anholt 
70957692c94SEric Anholt void
71057692c94SEric Anholt v3d_gem_destroy(struct drm_device *dev)
71157692c94SEric Anholt {
71257692c94SEric Anholt 	struct v3d_dev *v3d = to_v3d_dev(dev);
71357692c94SEric Anholt 
71457692c94SEric Anholt 	v3d_sched_fini(v3d);
71557692c94SEric Anholt 
71657692c94SEric Anholt 	/* Waiting for exec to finish would need to be done before
71757692c94SEric Anholt 	 * unregistering V3D.
71857692c94SEric Anholt 	 */
71914d1d190SEric Anholt 	WARN_ON(v3d->bin_job);
72014d1d190SEric Anholt 	WARN_ON(v3d->render_job);
72157692c94SEric Anholt 
72257692c94SEric Anholt 	drm_mm_takedown(&v3d->mm);
72357692c94SEric Anholt 
72457692c94SEric Anholt 	dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, v3d->pt_paddr);
72557692c94SEric Anholt }
726