xref: /openbmc/linux/drivers/gpu/drm/v3d/v3d_gem.c (revision ba61bb17)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2014-2018 Broadcom */
3 
4 #include <drm/drmP.h>
5 #include <drm/drm_syncobj.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/device.h>
10 #include <linux/io.h>
11 #include <linux/sched/signal.h>
12 
13 #include "uapi/drm/v3d_drm.h"
14 #include "v3d_drv.h"
15 #include "v3d_regs.h"
16 #include "v3d_trace.h"
17 
18 static void
19 v3d_init_core(struct v3d_dev *v3d, int core)
20 {
21 	/* Set OVRTMUOUT, which means that the texture sampler uniform
22 	 * configuration's tmu output type field is used, instead of
23 	 * using the hardware default behavior based on the texture
24 	 * type.  If you want the default behavior, you can still put
25 	 * "2" in the indirect texture state's output_type field.
26 	 */
27 	V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
28 
29 	/* Whenever we flush the L2T cache, we always want to flush
30 	 * the whole thing.
31 	 */
32 	V3D_CORE_WRITE(core, V3D_CTL_L2TFLSTA, 0);
33 	V3D_CORE_WRITE(core, V3D_CTL_L2TFLEND, ~0);
34 }
35 
36 /* Sets invariant state for the HW. */
37 static void
38 v3d_init_hw_state(struct v3d_dev *v3d)
39 {
40 	v3d_init_core(v3d, 0);
41 }
42 
43 static void
44 v3d_idle_axi(struct v3d_dev *v3d, int core)
45 {
46 	V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ);
47 
48 	if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) &
49 		      (V3D_GMP_STATUS_RD_COUNT_MASK |
50 		       V3D_GMP_STATUS_WR_COUNT_MASK |
51 		       V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) {
52 		DRM_ERROR("Failed to wait for safe GMP shutdown\n");
53 	}
54 }
55 
56 static void
57 v3d_idle_gca(struct v3d_dev *v3d)
58 {
59 	if (v3d->ver >= 41)
60 		return;
61 
62 	V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN);
63 
64 	if (wait_for((V3D_GCA_READ(V3D_GCA_SAFE_SHUTDOWN_ACK) &
65 		      V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED) ==
66 		     V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED, 100)) {
67 		DRM_ERROR("Failed to wait for safe GCA shutdown\n");
68 	}
69 }
70 
71 static void
72 v3d_reset_v3d(struct v3d_dev *v3d)
73 {
74 	int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION);
75 
76 	if (V3D_GET_FIELD(version, V3D_TOP_GR_BRIDGE_MAJOR) == 2) {
77 		V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0,
78 				 V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT);
79 		V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 0);
80 
81 		/* GFXH-1383: The SW_INIT may cause a stray write to address 0
82 		 * of the unit, so reset it to its power-on value here.
83 		 */
84 		V3D_WRITE(V3D_HUB_AXICFG, V3D_HUB_AXICFG_MAX_LEN_MASK);
85 	} else {
86 		WARN_ON_ONCE(V3D_GET_FIELD(version,
87 					   V3D_TOP_GR_BRIDGE_MAJOR) != 7);
88 		V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1,
89 				 V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT);
90 		V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0);
91 	}
92 
93 	v3d_init_hw_state(v3d);
94 }
95 
96 void
97 v3d_reset(struct v3d_dev *v3d)
98 {
99 	struct drm_device *dev = &v3d->drm;
100 
101 	DRM_ERROR("Resetting GPU.\n");
102 	trace_v3d_reset_begin(dev);
103 
104 	/* XXX: only needed for safe powerdown, not reset. */
105 	if (false)
106 		v3d_idle_axi(v3d, 0);
107 
108 	v3d_idle_gca(v3d);
109 	v3d_reset_v3d(v3d);
110 
111 	v3d_mmu_set_page_table(v3d);
112 	v3d_irq_reset(v3d);
113 
114 	trace_v3d_reset_end(dev);
115 }
116 
117 static void
118 v3d_flush_l3(struct v3d_dev *v3d)
119 {
120 	if (v3d->ver < 41) {
121 		u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL);
122 
123 		V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
124 			      gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH);
125 
126 		if (v3d->ver < 33) {
127 			V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
128 				      gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH);
129 		}
130 	}
131 }
132 
133 /* Invalidates the (read-only) L2 cache. */
134 static void
135 v3d_invalidate_l2(struct v3d_dev *v3d, int core)
136 {
137 	V3D_CORE_WRITE(core, V3D_CTL_L2CACTL,
138 		       V3D_L2CACTL_L2CCLR |
139 		       V3D_L2CACTL_L2CENA);
140 }
141 
142 static void
143 v3d_invalidate_l1td(struct v3d_dev *v3d, int core)
144 {
145 	V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
146 	if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
147 		       V3D_L2TCACTL_L2TFLS), 100)) {
148 		DRM_ERROR("Timeout waiting for L1T write combiner flush\n");
149 	}
150 }
151 
152 /* Invalidates texture L2 cachelines */
153 static void
154 v3d_flush_l2t(struct v3d_dev *v3d, int core)
155 {
156 	v3d_invalidate_l1td(v3d, core);
157 
158 	V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
159 		       V3D_L2TCACTL_L2TFLS |
160 		       V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM));
161 	if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
162 		       V3D_L2TCACTL_L2TFLS), 100)) {
163 		DRM_ERROR("Timeout waiting for L2T flush\n");
164 	}
165 }
166 
167 /* Invalidates the slice caches.  These are read-only caches. */
168 static void
169 v3d_invalidate_slices(struct v3d_dev *v3d, int core)
170 {
171 	V3D_CORE_WRITE(core, V3D_CTL_SLCACTL,
172 		       V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS) |
173 		       V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS) |
174 		       V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
175 		       V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC));
176 }
177 
178 /* Invalidates texture L2 cachelines */
179 static void
180 v3d_invalidate_l2t(struct v3d_dev *v3d, int core)
181 {
182 	V3D_CORE_WRITE(core,
183 		       V3D_CTL_L2TCACTL,
184 		       V3D_L2TCACTL_L2TFLS |
185 		       V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAR, V3D_L2TCACTL_FLM));
186 	if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
187 		       V3D_L2TCACTL_L2TFLS), 100)) {
188 		DRM_ERROR("Timeout waiting for L2T invalidate\n");
189 	}
190 }
191 
192 void
193 v3d_invalidate_caches(struct v3d_dev *v3d)
194 {
195 	v3d_flush_l3(v3d);
196 
197 	v3d_invalidate_l2(v3d, 0);
198 	v3d_invalidate_slices(v3d, 0);
199 	v3d_flush_l2t(v3d, 0);
200 }
201 
202 void
203 v3d_flush_caches(struct v3d_dev *v3d)
204 {
205 	v3d_invalidate_l1td(v3d, 0);
206 	v3d_invalidate_l2t(v3d, 0);
207 }
208 
209 static void
210 v3d_attach_object_fences(struct v3d_exec_info *exec)
211 {
212 	struct dma_fence *out_fence = &exec->render.base.s_fence->finished;
213 	struct v3d_bo *bo;
214 	int i;
215 
216 	for (i = 0; i < exec->bo_count; i++) {
217 		bo = to_v3d_bo(&exec->bo[i]->base);
218 
219 		/* XXX: Use shared fences for read-only objects. */
220 		reservation_object_add_excl_fence(bo->resv, out_fence);
221 	}
222 }
223 
224 static void
225 v3d_unlock_bo_reservations(struct drm_device *dev,
226 			   struct v3d_exec_info *exec,
227 			   struct ww_acquire_ctx *acquire_ctx)
228 {
229 	int i;
230 
231 	for (i = 0; i < exec->bo_count; i++) {
232 		struct v3d_bo *bo = to_v3d_bo(&exec->bo[i]->base);
233 
234 		ww_mutex_unlock(&bo->resv->lock);
235 	}
236 
237 	ww_acquire_fini(acquire_ctx);
238 }
239 
240 /* Takes the reservation lock on all the BOs being referenced, so that
241  * at queue submit time we can update the reservations.
242  *
243  * We don't lock the RCL the tile alloc/state BOs, or overflow memory
244  * (all of which are on exec->unref_list).  They're entirely private
245  * to v3d, so we don't attach dma-buf fences to them.
246  */
247 static int
248 v3d_lock_bo_reservations(struct drm_device *dev,
249 			 struct v3d_exec_info *exec,
250 			 struct ww_acquire_ctx *acquire_ctx)
251 {
252 	int contended_lock = -1;
253 	int i, ret;
254 	struct v3d_bo *bo;
255 
256 	ww_acquire_init(acquire_ctx, &reservation_ww_class);
257 
258 retry:
259 	if (contended_lock != -1) {
260 		bo = to_v3d_bo(&exec->bo[contended_lock]->base);
261 		ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
262 						       acquire_ctx);
263 		if (ret) {
264 			ww_acquire_done(acquire_ctx);
265 			return ret;
266 		}
267 	}
268 
269 	for (i = 0; i < exec->bo_count; i++) {
270 		if (i == contended_lock)
271 			continue;
272 
273 		bo = to_v3d_bo(&exec->bo[i]->base);
274 
275 		ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
276 		if (ret) {
277 			int j;
278 
279 			for (j = 0; j < i; j++) {
280 				bo = to_v3d_bo(&exec->bo[j]->base);
281 				ww_mutex_unlock(&bo->resv->lock);
282 			}
283 
284 			if (contended_lock != -1 && contended_lock >= i) {
285 				bo = to_v3d_bo(&exec->bo[contended_lock]->base);
286 
287 				ww_mutex_unlock(&bo->resv->lock);
288 			}
289 
290 			if (ret == -EDEADLK) {
291 				contended_lock = i;
292 				goto retry;
293 			}
294 
295 			ww_acquire_done(acquire_ctx);
296 			return ret;
297 		}
298 	}
299 
300 	ww_acquire_done(acquire_ctx);
301 
302 	/* Reserve space for our shared (read-only) fence references,
303 	 * before we commit the CL to the hardware.
304 	 */
305 	for (i = 0; i < exec->bo_count; i++) {
306 		bo = to_v3d_bo(&exec->bo[i]->base);
307 
308 		ret = reservation_object_reserve_shared(bo->resv);
309 		if (ret) {
310 			v3d_unlock_bo_reservations(dev, exec, acquire_ctx);
311 			return ret;
312 		}
313 	}
314 
315 	return 0;
316 }
317 
318 /**
319  * v3d_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
320  * referenced by the job.
321  * @dev: DRM device
322  * @file_priv: DRM file for this fd
323  * @exec: V3D job being set up
324  *
325  * The command validator needs to reference BOs by their index within
326  * the submitted job's BO list.  This does the validation of the job's
327  * BO list and reference counting for the lifetime of the job.
328  *
329  * Note that this function doesn't need to unreference the BOs on
330  * failure, because that will happen at v3d_exec_cleanup() time.
331  */
332 static int
333 v3d_cl_lookup_bos(struct drm_device *dev,
334 		  struct drm_file *file_priv,
335 		  struct drm_v3d_submit_cl *args,
336 		  struct v3d_exec_info *exec)
337 {
338 	u32 *handles;
339 	int ret = 0;
340 	int i;
341 
342 	exec->bo_count = args->bo_handle_count;
343 
344 	if (!exec->bo_count) {
345 		/* See comment on bo_index for why we have to check
346 		 * this.
347 		 */
348 		DRM_DEBUG("Rendering requires BOs\n");
349 		return -EINVAL;
350 	}
351 
352 	exec->bo = kvmalloc_array(exec->bo_count,
353 				  sizeof(struct drm_gem_cma_object *),
354 				  GFP_KERNEL | __GFP_ZERO);
355 	if (!exec->bo) {
356 		DRM_DEBUG("Failed to allocate validated BO pointers\n");
357 		return -ENOMEM;
358 	}
359 
360 	handles = kvmalloc_array(exec->bo_count, sizeof(u32), GFP_KERNEL);
361 	if (!handles) {
362 		ret = -ENOMEM;
363 		DRM_DEBUG("Failed to allocate incoming GEM handles\n");
364 		goto fail;
365 	}
366 
367 	if (copy_from_user(handles,
368 			   (void __user *)(uintptr_t)args->bo_handles,
369 			   exec->bo_count * sizeof(u32))) {
370 		ret = -EFAULT;
371 		DRM_DEBUG("Failed to copy in GEM handles\n");
372 		goto fail;
373 	}
374 
375 	spin_lock(&file_priv->table_lock);
376 	for (i = 0; i < exec->bo_count; i++) {
377 		struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
378 						     handles[i]);
379 		if (!bo) {
380 			DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
381 				  i, handles[i]);
382 			ret = -ENOENT;
383 			spin_unlock(&file_priv->table_lock);
384 			goto fail;
385 		}
386 		drm_gem_object_get(bo);
387 		exec->bo[i] = to_v3d_bo(bo);
388 	}
389 	spin_unlock(&file_priv->table_lock);
390 
391 fail:
392 	kvfree(handles);
393 	return ret;
394 }
395 
396 static void
397 v3d_exec_cleanup(struct kref *ref)
398 {
399 	struct v3d_exec_info *exec = container_of(ref, struct v3d_exec_info,
400 						  refcount);
401 	struct v3d_dev *v3d = exec->v3d;
402 	unsigned int i;
403 	struct v3d_bo *bo, *save;
404 
405 	dma_fence_put(exec->bin.in_fence);
406 	dma_fence_put(exec->render.in_fence);
407 
408 	dma_fence_put(exec->bin.done_fence);
409 	dma_fence_put(exec->render.done_fence);
410 
411 	dma_fence_put(exec->bin_done_fence);
412 
413 	for (i = 0; i < exec->bo_count; i++)
414 		drm_gem_object_put_unlocked(&exec->bo[i]->base);
415 	kvfree(exec->bo);
416 
417 	list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) {
418 		drm_gem_object_put_unlocked(&bo->base);
419 	}
420 
421 	pm_runtime_mark_last_busy(v3d->dev);
422 	pm_runtime_put_autosuspend(v3d->dev);
423 
424 	kfree(exec);
425 }
426 
427 void v3d_exec_put(struct v3d_exec_info *exec)
428 {
429 	kref_put(&exec->refcount, v3d_exec_cleanup);
430 }
431 
432 int
433 v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
434 		  struct drm_file *file_priv)
435 {
436 	int ret;
437 	struct drm_v3d_wait_bo *args = data;
438 	struct drm_gem_object *gem_obj;
439 	struct v3d_bo *bo;
440 	ktime_t start = ktime_get();
441 	u64 delta_ns;
442 	unsigned long timeout_jiffies =
443 		nsecs_to_jiffies_timeout(args->timeout_ns);
444 
445 	if (args->pad != 0)
446 		return -EINVAL;
447 
448 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
449 	if (!gem_obj) {
450 		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
451 		return -EINVAL;
452 	}
453 	bo = to_v3d_bo(gem_obj);
454 
455 	ret = reservation_object_wait_timeout_rcu(bo->resv,
456 						  true, true,
457 						  timeout_jiffies);
458 
459 	if (ret == 0)
460 		ret = -ETIME;
461 	else if (ret > 0)
462 		ret = 0;
463 
464 	/* Decrement the user's timeout, in case we got interrupted
465 	 * such that the ioctl will be restarted.
466 	 */
467 	delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
468 	if (delta_ns < args->timeout_ns)
469 		args->timeout_ns -= delta_ns;
470 	else
471 		args->timeout_ns = 0;
472 
473 	/* Asked to wait beyond the jiffie/scheduler precision? */
474 	if (ret == -ETIME && args->timeout_ns)
475 		ret = -EAGAIN;
476 
477 	drm_gem_object_put_unlocked(gem_obj);
478 
479 	return ret;
480 }
481 
482 /**
483  * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
484  * @dev: DRM device
485  * @data: ioctl argument
486  * @file_priv: DRM file for this fd
487  *
488  * This is the main entrypoint for userspace to submit a 3D frame to
489  * the GPU.  Userspace provides the binner command list (if
490  * applicable), and the kernel sets up the render command list to draw
491  * to the framebuffer described in the ioctl, using the command lists
492  * that the 3D engine's binner will produce.
493  */
494 int
495 v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
496 		    struct drm_file *file_priv)
497 {
498 	struct v3d_dev *v3d = to_v3d_dev(dev);
499 	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
500 	struct drm_v3d_submit_cl *args = data;
501 	struct v3d_exec_info *exec;
502 	struct ww_acquire_ctx acquire_ctx;
503 	struct drm_syncobj *sync_out;
504 	int ret = 0;
505 
506 	if (args->pad != 0) {
507 		DRM_INFO("pad must be zero: %d\n", args->pad);
508 		return -EINVAL;
509 	}
510 
511 	exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
512 	if (!exec)
513 		return -ENOMEM;
514 
515 	ret = pm_runtime_get_sync(v3d->dev);
516 	if (ret < 0) {
517 		kfree(exec);
518 		return ret;
519 	}
520 
521 	kref_init(&exec->refcount);
522 
523 	ret = drm_syncobj_find_fence(file_priv, args->in_sync_bcl,
524 				     &exec->bin.in_fence);
525 	if (ret == -EINVAL)
526 		goto fail;
527 
528 	ret = drm_syncobj_find_fence(file_priv, args->in_sync_rcl,
529 				     &exec->render.in_fence);
530 	if (ret == -EINVAL)
531 		goto fail;
532 
533 	exec->qma = args->qma;
534 	exec->qms = args->qms;
535 	exec->qts = args->qts;
536 	exec->bin.exec = exec;
537 	exec->bin.start = args->bcl_start;
538 	exec->bin.end = args->bcl_end;
539 	exec->render.exec = exec;
540 	exec->render.start = args->rcl_start;
541 	exec->render.end = args->rcl_end;
542 	exec->v3d = v3d;
543 	INIT_LIST_HEAD(&exec->unref_list);
544 
545 	ret = v3d_cl_lookup_bos(dev, file_priv, args, exec);
546 	if (ret)
547 		goto fail;
548 
549 	ret = v3d_lock_bo_reservations(dev, exec, &acquire_ctx);
550 	if (ret)
551 		goto fail;
552 
553 	mutex_lock(&v3d->sched_lock);
554 	if (exec->bin.start != exec->bin.end) {
555 		ret = drm_sched_job_init(&exec->bin.base,
556 					 &v3d->queue[V3D_BIN].sched,
557 					 &v3d_priv->sched_entity[V3D_BIN],
558 					 v3d_priv);
559 		if (ret)
560 			goto fail_unreserve;
561 
562 		exec->bin_done_fence =
563 			dma_fence_get(&exec->bin.base.s_fence->finished);
564 
565 		kref_get(&exec->refcount); /* put by scheduler job completion */
566 		drm_sched_entity_push_job(&exec->bin.base,
567 					  &v3d_priv->sched_entity[V3D_BIN]);
568 	}
569 
570 	ret = drm_sched_job_init(&exec->render.base,
571 				 &v3d->queue[V3D_RENDER].sched,
572 				 &v3d_priv->sched_entity[V3D_RENDER],
573 				 v3d_priv);
574 	if (ret)
575 		goto fail_unreserve;
576 
577 	kref_get(&exec->refcount); /* put by scheduler job completion */
578 	drm_sched_entity_push_job(&exec->render.base,
579 				  &v3d_priv->sched_entity[V3D_RENDER]);
580 	mutex_unlock(&v3d->sched_lock);
581 
582 	v3d_attach_object_fences(exec);
583 
584 	v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
585 
586 	/* Update the return sync object for the */
587 	sync_out = drm_syncobj_find(file_priv, args->out_sync);
588 	if (sync_out) {
589 		drm_syncobj_replace_fence(sync_out,
590 					  &exec->render.base.s_fence->finished);
591 		drm_syncobj_put(sync_out);
592 	}
593 
594 	v3d_exec_put(exec);
595 
596 	return 0;
597 
598 fail_unreserve:
599 	mutex_unlock(&v3d->sched_lock);
600 	v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
601 fail:
602 	v3d_exec_put(exec);
603 
604 	return ret;
605 }
606 
607 int
608 v3d_gem_init(struct drm_device *dev)
609 {
610 	struct v3d_dev *v3d = to_v3d_dev(dev);
611 	u32 pt_size = 4096 * 1024;
612 	int ret, i;
613 
614 	for (i = 0; i < V3D_MAX_QUEUES; i++)
615 		v3d->queue[i].fence_context = dma_fence_context_alloc(1);
616 
617 	spin_lock_init(&v3d->mm_lock);
618 	spin_lock_init(&v3d->job_lock);
619 	mutex_init(&v3d->bo_lock);
620 	mutex_init(&v3d->reset_lock);
621 	mutex_init(&v3d->sched_lock);
622 
623 	/* Note: We don't allocate address 0.  Various bits of HW
624 	 * treat 0 as special, such as the occlusion query counters
625 	 * where 0 means "disabled".
626 	 */
627 	drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1);
628 
629 	v3d->pt = dma_alloc_wc(v3d->dev, pt_size,
630 			       &v3d->pt_paddr,
631 			       GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
632 	if (!v3d->pt) {
633 		drm_mm_takedown(&v3d->mm);
634 		dev_err(v3d->dev,
635 			"Failed to allocate page tables. "
636 			"Please ensure you have CMA enabled.\n");
637 		return -ENOMEM;
638 	}
639 
640 	v3d_init_hw_state(v3d);
641 	v3d_mmu_set_page_table(v3d);
642 
643 	ret = v3d_sched_init(v3d);
644 	if (ret) {
645 		drm_mm_takedown(&v3d->mm);
646 		dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt,
647 				  v3d->pt_paddr);
648 	}
649 
650 	return 0;
651 }
652 
653 void
654 v3d_gem_destroy(struct drm_device *dev)
655 {
656 	struct v3d_dev *v3d = to_v3d_dev(dev);
657 
658 	v3d_sched_fini(v3d);
659 
660 	/* Waiting for exec to finish would need to be done before
661 	 * unregistering V3D.
662 	 */
663 	WARN_ON(v3d->bin_job);
664 	WARN_ON(v3d->render_job);
665 
666 	drm_mm_takedown(&v3d->mm);
667 
668 	dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, v3d->pt_paddr);
669 }
670