xref: /openbmc/linux/drivers/gpu/drm/vc4/vc4_gem.c (revision a32cc817)
1 /*
2  * Copyright © 2014 Broadcom
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/device.h>
28 #include <linux/io.h>
29 #include <linux/sched/signal.h>
30 #include <linux/dma-fence-array.h>
31 
32 #include <drm/drm_syncobj.h>
33 
34 #include "uapi/drm/vc4_drm.h"
35 #include "vc4_drv.h"
36 #include "vc4_regs.h"
37 #include "vc4_trace.h"
38 
39 static void
40 vc4_queue_hangcheck(struct drm_device *dev)
41 {
42 	struct vc4_dev *vc4 = to_vc4_dev(dev);
43 
44 	mod_timer(&vc4->hangcheck.timer,
45 		  round_jiffies_up(jiffies + msecs_to_jiffies(100)));
46 }
47 
48 struct vc4_hang_state {
49 	struct drm_vc4_get_hang_state user_state;
50 
51 	u32 bo_count;
52 	struct drm_gem_object **bo;
53 };
54 
55 static void
56 vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
57 {
58 	unsigned int i;
59 
60 	for (i = 0; i < state->user_state.bo_count; i++)
61 		drm_gem_object_put(state->bo[i]);
62 
63 	kfree(state);
64 }
65 
66 int
67 vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
68 			 struct drm_file *file_priv)
69 {
70 	struct drm_vc4_get_hang_state *get_state = data;
71 	struct drm_vc4_get_hang_state_bo *bo_state;
72 	struct vc4_hang_state *kernel_state;
73 	struct drm_vc4_get_hang_state *state;
74 	struct vc4_dev *vc4 = to_vc4_dev(dev);
75 	unsigned long irqflags;
76 	u32 i;
77 	int ret = 0;
78 
79 	if (!vc4->v3d) {
80 		DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n");
81 		return -ENODEV;
82 	}
83 
84 	spin_lock_irqsave(&vc4->job_lock, irqflags);
85 	kernel_state = vc4->hang_state;
86 	if (!kernel_state) {
87 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
88 		return -ENOENT;
89 	}
90 	state = &kernel_state->user_state;
91 
92 	/* If the user's array isn't big enough, just return the
93 	 * required array size.
94 	 */
95 	if (get_state->bo_count < state->bo_count) {
96 		get_state->bo_count = state->bo_count;
97 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
98 		return 0;
99 	}
100 
101 	vc4->hang_state = NULL;
102 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
103 
104 	/* Save the user's BO pointer, so we don't stomp it with the memcpy. */
105 	state->bo = get_state->bo;
106 	memcpy(get_state, state, sizeof(*state));
107 
108 	bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
109 	if (!bo_state) {
110 		ret = -ENOMEM;
111 		goto err_free;
112 	}
113 
114 	for (i = 0; i < state->bo_count; i++) {
115 		struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
116 		u32 handle;
117 
118 		ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
119 					    &handle);
120 
121 		if (ret) {
122 			state->bo_count = i;
123 			goto err_delete_handle;
124 		}
125 		bo_state[i].handle = handle;
126 		bo_state[i].paddr = vc4_bo->base.paddr;
127 		bo_state[i].size = vc4_bo->base.base.size;
128 	}
129 
130 	if (copy_to_user(u64_to_user_ptr(get_state->bo),
131 			 bo_state,
132 			 state->bo_count * sizeof(*bo_state)))
133 		ret = -EFAULT;
134 
135 err_delete_handle:
136 	if (ret) {
137 		for (i = 0; i < state->bo_count; i++)
138 			drm_gem_handle_delete(file_priv, bo_state[i].handle);
139 	}
140 
141 err_free:
142 	vc4_free_hang_state(dev, kernel_state);
143 	kfree(bo_state);
144 
145 	return ret;
146 }
147 
148 static void
149 vc4_save_hang_state(struct drm_device *dev)
150 {
151 	struct vc4_dev *vc4 = to_vc4_dev(dev);
152 	struct drm_vc4_get_hang_state *state;
153 	struct vc4_hang_state *kernel_state;
154 	struct vc4_exec_info *exec[2];
155 	struct vc4_bo *bo;
156 	unsigned long irqflags;
157 	unsigned int i, j, k, unref_list_count;
158 
159 	kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
160 	if (!kernel_state)
161 		return;
162 
163 	state = &kernel_state->user_state;
164 
165 	spin_lock_irqsave(&vc4->job_lock, irqflags);
166 	exec[0] = vc4_first_bin_job(vc4);
167 	exec[1] = vc4_first_render_job(vc4);
168 	if (!exec[0] && !exec[1]) {
169 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
170 		return;
171 	}
172 
173 	/* Get the bos from both binner and renderer into hang state. */
174 	state->bo_count = 0;
175 	for (i = 0; i < 2; i++) {
176 		if (!exec[i])
177 			continue;
178 
179 		unref_list_count = 0;
180 		list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
181 			unref_list_count++;
182 		state->bo_count += exec[i]->bo_count + unref_list_count;
183 	}
184 
185 	kernel_state->bo = kcalloc(state->bo_count,
186 				   sizeof(*kernel_state->bo), GFP_ATOMIC);
187 
188 	if (!kernel_state->bo) {
189 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
190 		return;
191 	}
192 
193 	k = 0;
194 	for (i = 0; i < 2; i++) {
195 		if (!exec[i])
196 			continue;
197 
198 		for (j = 0; j < exec[i]->bo_count; j++) {
199 			bo = to_vc4_bo(&exec[i]->bo[j]->base);
200 
201 			/* Retain BOs just in case they were marked purgeable.
202 			 * This prevents the BO from being purged before
203 			 * someone had a chance to dump the hang state.
204 			 */
205 			WARN_ON(!refcount_read(&bo->usecnt));
206 			refcount_inc(&bo->usecnt);
207 			drm_gem_object_get(&exec[i]->bo[j]->base);
208 			kernel_state->bo[k++] = &exec[i]->bo[j]->base;
209 		}
210 
211 		list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
212 			/* No need to retain BOs coming from the ->unref_list
213 			 * because they are naturally unpurgeable.
214 			 */
215 			drm_gem_object_get(&bo->base.base);
216 			kernel_state->bo[k++] = &bo->base.base;
217 		}
218 	}
219 
220 	WARN_ON_ONCE(k != state->bo_count);
221 
222 	if (exec[0])
223 		state->start_bin = exec[0]->ct0ca;
224 	if (exec[1])
225 		state->start_render = exec[1]->ct1ca;
226 
227 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
228 
229 	state->ct0ca = V3D_READ(V3D_CTNCA(0));
230 	state->ct0ea = V3D_READ(V3D_CTNEA(0));
231 
232 	state->ct1ca = V3D_READ(V3D_CTNCA(1));
233 	state->ct1ea = V3D_READ(V3D_CTNEA(1));
234 
235 	state->ct0cs = V3D_READ(V3D_CTNCS(0));
236 	state->ct1cs = V3D_READ(V3D_CTNCS(1));
237 
238 	state->ct0ra0 = V3D_READ(V3D_CT00RA0);
239 	state->ct1ra0 = V3D_READ(V3D_CT01RA0);
240 
241 	state->bpca = V3D_READ(V3D_BPCA);
242 	state->bpcs = V3D_READ(V3D_BPCS);
243 	state->bpoa = V3D_READ(V3D_BPOA);
244 	state->bpos = V3D_READ(V3D_BPOS);
245 
246 	state->vpmbase = V3D_READ(V3D_VPMBASE);
247 
248 	state->dbge = V3D_READ(V3D_DBGE);
249 	state->fdbgo = V3D_READ(V3D_FDBGO);
250 	state->fdbgb = V3D_READ(V3D_FDBGB);
251 	state->fdbgr = V3D_READ(V3D_FDBGR);
252 	state->fdbgs = V3D_READ(V3D_FDBGS);
253 	state->errstat = V3D_READ(V3D_ERRSTAT);
254 
255 	/* We need to turn purgeable BOs into unpurgeable ones so that
256 	 * userspace has a chance to dump the hang state before the kernel
257 	 * decides to purge those BOs.
258 	 * Note that BO consistency at dump time cannot be guaranteed. For
259 	 * example, if the owner of these BOs decides to re-use them or mark
260 	 * them purgeable again there's nothing we can do to prevent it.
261 	 */
262 	for (i = 0; i < kernel_state->user_state.bo_count; i++) {
263 		struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]);
264 
265 		if (bo->madv == __VC4_MADV_NOTSUPP)
266 			continue;
267 
268 		mutex_lock(&bo->madv_lock);
269 		if (!WARN_ON(bo->madv == __VC4_MADV_PURGED))
270 			bo->madv = VC4_MADV_WILLNEED;
271 		refcount_dec(&bo->usecnt);
272 		mutex_unlock(&bo->madv_lock);
273 	}
274 
275 	spin_lock_irqsave(&vc4->job_lock, irqflags);
276 	if (vc4->hang_state) {
277 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
278 		vc4_free_hang_state(dev, kernel_state);
279 	} else {
280 		vc4->hang_state = kernel_state;
281 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
282 	}
283 }
284 
285 static void
286 vc4_reset(struct drm_device *dev)
287 {
288 	struct vc4_dev *vc4 = to_vc4_dev(dev);
289 
290 	DRM_INFO("Resetting GPU.\n");
291 
292 	mutex_lock(&vc4->power_lock);
293 	if (vc4->power_refcount) {
294 		/* Power the device off and back on the by dropping the
295 		 * reference on runtime PM.
296 		 */
297 		pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
298 		pm_runtime_get_sync(&vc4->v3d->pdev->dev);
299 	}
300 	mutex_unlock(&vc4->power_lock);
301 
302 	vc4_irq_reset(dev);
303 
304 	/* Rearm the hangcheck -- another job might have been waiting
305 	 * for our hung one to get kicked off, and vc4_irq_reset()
306 	 * would have started it.
307 	 */
308 	vc4_queue_hangcheck(dev);
309 }
310 
311 static void
312 vc4_reset_work(struct work_struct *work)
313 {
314 	struct vc4_dev *vc4 =
315 		container_of(work, struct vc4_dev, hangcheck.reset_work);
316 
317 	vc4_save_hang_state(&vc4->base);
318 
319 	vc4_reset(&vc4->base);
320 }
321 
322 static void
323 vc4_hangcheck_elapsed(struct timer_list *t)
324 {
325 	struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
326 	struct drm_device *dev = &vc4->base;
327 	uint32_t ct0ca, ct1ca;
328 	unsigned long irqflags;
329 	struct vc4_exec_info *bin_exec, *render_exec;
330 
331 	spin_lock_irqsave(&vc4->job_lock, irqflags);
332 
333 	bin_exec = vc4_first_bin_job(vc4);
334 	render_exec = vc4_first_render_job(vc4);
335 
336 	/* If idle, we can stop watching for hangs. */
337 	if (!bin_exec && !render_exec) {
338 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
339 		return;
340 	}
341 
342 	ct0ca = V3D_READ(V3D_CTNCA(0));
343 	ct1ca = V3D_READ(V3D_CTNCA(1));
344 
345 	/* If we've made any progress in execution, rearm the timer
346 	 * and wait.
347 	 */
348 	if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
349 	    (render_exec && ct1ca != render_exec->last_ct1ca)) {
350 		if (bin_exec)
351 			bin_exec->last_ct0ca = ct0ca;
352 		if (render_exec)
353 			render_exec->last_ct1ca = ct1ca;
354 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
355 		vc4_queue_hangcheck(dev);
356 		return;
357 	}
358 
359 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
360 
361 	/* We've gone too long with no progress, reset.  This has to
362 	 * be done from a work struct, since resetting can sleep and
363 	 * this timer hook isn't allowed to.
364 	 */
365 	schedule_work(&vc4->hangcheck.reset_work);
366 }
367 
368 static void
369 submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
370 {
371 	struct vc4_dev *vc4 = to_vc4_dev(dev);
372 
373 	/* Set the current and end address of the control list.
374 	 * Writing the end register is what starts the job.
375 	 */
376 	V3D_WRITE(V3D_CTNCA(thread), start);
377 	V3D_WRITE(V3D_CTNEA(thread), end);
378 }
379 
380 int
381 vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
382 		   bool interruptible)
383 {
384 	struct vc4_dev *vc4 = to_vc4_dev(dev);
385 	int ret = 0;
386 	unsigned long timeout_expire;
387 	DEFINE_WAIT(wait);
388 
389 	if (vc4->finished_seqno >= seqno)
390 		return 0;
391 
392 	if (timeout_ns == 0)
393 		return -ETIME;
394 
395 	timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
396 
397 	trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
398 	for (;;) {
399 		prepare_to_wait(&vc4->job_wait_queue, &wait,
400 				interruptible ? TASK_INTERRUPTIBLE :
401 				TASK_UNINTERRUPTIBLE);
402 
403 		if (interruptible && signal_pending(current)) {
404 			ret = -ERESTARTSYS;
405 			break;
406 		}
407 
408 		if (vc4->finished_seqno >= seqno)
409 			break;
410 
411 		if (timeout_ns != ~0ull) {
412 			if (time_after_eq(jiffies, timeout_expire)) {
413 				ret = -ETIME;
414 				break;
415 			}
416 			schedule_timeout(timeout_expire - jiffies);
417 		} else {
418 			schedule();
419 		}
420 	}
421 
422 	finish_wait(&vc4->job_wait_queue, &wait);
423 	trace_vc4_wait_for_seqno_end(dev, seqno);
424 
425 	return ret;
426 }
427 
428 static void
429 vc4_flush_caches(struct drm_device *dev)
430 {
431 	struct vc4_dev *vc4 = to_vc4_dev(dev);
432 
433 	/* Flush the GPU L2 caches.  These caches sit on top of system
434 	 * L3 (the 128kb or so shared with the CPU), and are
435 	 * non-allocating in the L3.
436 	 */
437 	V3D_WRITE(V3D_L2CACTL,
438 		  V3D_L2CACTL_L2CCLR);
439 
440 	V3D_WRITE(V3D_SLCACTL,
441 		  VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
442 		  VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
443 		  VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
444 		  VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
445 }
446 
447 static void
448 vc4_flush_texture_caches(struct drm_device *dev)
449 {
450 	struct vc4_dev *vc4 = to_vc4_dev(dev);
451 
452 	V3D_WRITE(V3D_L2CACTL,
453 		  V3D_L2CACTL_L2CCLR);
454 
455 	V3D_WRITE(V3D_SLCACTL,
456 		  VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
457 		  VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
458 }
459 
460 /* Sets the registers for the next job to be actually be executed in
461  * the hardware.
462  *
463  * The job_lock should be held during this.
464  */
465 void
466 vc4_submit_next_bin_job(struct drm_device *dev)
467 {
468 	struct vc4_dev *vc4 = to_vc4_dev(dev);
469 	struct vc4_exec_info *exec;
470 
471 again:
472 	exec = vc4_first_bin_job(vc4);
473 	if (!exec)
474 		return;
475 
476 	vc4_flush_caches(dev);
477 
478 	/* Only start the perfmon if it was not already started by a previous
479 	 * job.
480 	 */
481 	if (exec->perfmon && vc4->active_perfmon != exec->perfmon)
482 		vc4_perfmon_start(vc4, exec->perfmon);
483 
484 	/* Either put the job in the binner if it uses the binner, or
485 	 * immediately move it to the to-be-rendered queue.
486 	 */
487 	if (exec->ct0ca != exec->ct0ea) {
488 		trace_vc4_submit_cl(dev, false, exec->seqno, exec->ct0ca,
489 				    exec->ct0ea);
490 		submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
491 	} else {
492 		struct vc4_exec_info *next;
493 
494 		vc4_move_job_to_render(dev, exec);
495 		next = vc4_first_bin_job(vc4);
496 
497 		/* We can't start the next bin job if the previous job had a
498 		 * different perfmon instance attached to it. The same goes
499 		 * if one of them had a perfmon attached to it and the other
500 		 * one doesn't.
501 		 */
502 		if (next && next->perfmon == exec->perfmon)
503 			goto again;
504 	}
505 }
506 
507 void
508 vc4_submit_next_render_job(struct drm_device *dev)
509 {
510 	struct vc4_dev *vc4 = to_vc4_dev(dev);
511 	struct vc4_exec_info *exec = vc4_first_render_job(vc4);
512 
513 	if (!exec)
514 		return;
515 
516 	/* A previous RCL may have written to one of our textures, and
517 	 * our full cache flush at bin time may have occurred before
518 	 * that RCL completed.  Flush the texture cache now, but not
519 	 * the instructions or uniforms (since we don't write those
520 	 * from an RCL).
521 	 */
522 	vc4_flush_texture_caches(dev);
523 
524 	trace_vc4_submit_cl(dev, true, exec->seqno, exec->ct1ca, exec->ct1ea);
525 	submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
526 }
527 
528 void
529 vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
530 {
531 	struct vc4_dev *vc4 = to_vc4_dev(dev);
532 	bool was_empty = list_empty(&vc4->render_job_list);
533 
534 	list_move_tail(&exec->head, &vc4->render_job_list);
535 	if (was_empty)
536 		vc4_submit_next_render_job(dev);
537 }
538 
539 static void
540 vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
541 {
542 	struct vc4_bo *bo;
543 	unsigned i;
544 
545 	for (i = 0; i < exec->bo_count; i++) {
546 		bo = to_vc4_bo(&exec->bo[i]->base);
547 		bo->seqno = seqno;
548 
549 		dma_resv_add_fence(bo->base.base.resv, exec->fence,
550 				   DMA_RESV_USAGE_READ);
551 	}
552 
553 	list_for_each_entry(bo, &exec->unref_list, unref_head) {
554 		bo->seqno = seqno;
555 	}
556 
557 	for (i = 0; i < exec->rcl_write_bo_count; i++) {
558 		bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
559 		bo->write_seqno = seqno;
560 
561 		dma_resv_add_fence(bo->base.base.resv, exec->fence,
562 				   DMA_RESV_USAGE_WRITE);
563 	}
564 }
565 
566 static void
567 vc4_unlock_bo_reservations(struct drm_device *dev,
568 			   struct vc4_exec_info *exec,
569 			   struct ww_acquire_ctx *acquire_ctx)
570 {
571 	int i;
572 
573 	for (i = 0; i < exec->bo_count; i++) {
574 		struct drm_gem_object *bo = &exec->bo[i]->base;
575 
576 		dma_resv_unlock(bo->resv);
577 	}
578 
579 	ww_acquire_fini(acquire_ctx);
580 }
581 
582 /* Takes the reservation lock on all the BOs being referenced, so that
583  * at queue submit time we can update the reservations.
584  *
585  * We don't lock the RCL the tile alloc/state BOs, or overflow memory
586  * (all of which are on exec->unref_list).  They're entirely private
587  * to vc4, so we don't attach dma-buf fences to them.
588  */
589 static int
590 vc4_lock_bo_reservations(struct drm_device *dev,
591 			 struct vc4_exec_info *exec,
592 			 struct ww_acquire_ctx *acquire_ctx)
593 {
594 	int contended_lock = -1;
595 	int i, ret;
596 	struct drm_gem_object *bo;
597 
598 	ww_acquire_init(acquire_ctx, &reservation_ww_class);
599 
600 retry:
601 	if (contended_lock != -1) {
602 		bo = &exec->bo[contended_lock]->base;
603 		ret = dma_resv_lock_slow_interruptible(bo->resv, acquire_ctx);
604 		if (ret) {
605 			ww_acquire_done(acquire_ctx);
606 			return ret;
607 		}
608 	}
609 
610 	for (i = 0; i < exec->bo_count; i++) {
611 		if (i == contended_lock)
612 			continue;
613 
614 		bo = &exec->bo[i]->base;
615 
616 		ret = dma_resv_lock_interruptible(bo->resv, acquire_ctx);
617 		if (ret) {
618 			int j;
619 
620 			for (j = 0; j < i; j++) {
621 				bo = &exec->bo[j]->base;
622 				dma_resv_unlock(bo->resv);
623 			}
624 
625 			if (contended_lock != -1 && contended_lock >= i) {
626 				bo = &exec->bo[contended_lock]->base;
627 
628 				dma_resv_unlock(bo->resv);
629 			}
630 
631 			if (ret == -EDEADLK) {
632 				contended_lock = i;
633 				goto retry;
634 			}
635 
636 			ww_acquire_done(acquire_ctx);
637 			return ret;
638 		}
639 	}
640 
641 	ww_acquire_done(acquire_ctx);
642 
643 	/* Reserve space for our shared (read-only) fence references,
644 	 * before we commit the CL to the hardware.
645 	 */
646 	for (i = 0; i < exec->bo_count; i++) {
647 		bo = &exec->bo[i]->base;
648 
649 		ret = dma_resv_reserve_fences(bo->resv, 1);
650 		if (ret) {
651 			vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
652 			return ret;
653 		}
654 	}
655 
656 	return 0;
657 }
658 
659 /* Queues a struct vc4_exec_info for execution.  If no job is
660  * currently executing, then submits it.
661  *
662  * Unlike most GPUs, our hardware only handles one command list at a
663  * time.  To queue multiple jobs at once, we'd need to edit the
664  * previous command list to have a jump to the new one at the end, and
665  * then bump the end address.  That's a change for a later date,
666  * though.
667  */
668 static int
669 vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
670 		 struct ww_acquire_ctx *acquire_ctx,
671 		 struct drm_syncobj *out_sync)
672 {
673 	struct vc4_dev *vc4 = to_vc4_dev(dev);
674 	struct vc4_exec_info *renderjob;
675 	uint64_t seqno;
676 	unsigned long irqflags;
677 	struct vc4_fence *fence;
678 
679 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
680 	if (!fence)
681 		return -ENOMEM;
682 	fence->dev = dev;
683 
684 	spin_lock_irqsave(&vc4->job_lock, irqflags);
685 
686 	seqno = ++vc4->emit_seqno;
687 	exec->seqno = seqno;
688 
689 	dma_fence_init(&fence->base, &vc4_fence_ops, &vc4->job_lock,
690 		       vc4->dma_fence_context, exec->seqno);
691 	fence->seqno = exec->seqno;
692 	exec->fence = &fence->base;
693 
694 	if (out_sync)
695 		drm_syncobj_replace_fence(out_sync, exec->fence);
696 
697 	vc4_update_bo_seqnos(exec, seqno);
698 
699 	vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
700 
701 	list_add_tail(&exec->head, &vc4->bin_job_list);
702 
703 	/* If no bin job was executing and if the render job (if any) has the
704 	 * same perfmon as our job attached to it (or if both jobs don't have
705 	 * perfmon activated), then kick ours off.  Otherwise, it'll get
706 	 * started when the previous job's flush/render done interrupt occurs.
707 	 */
708 	renderjob = vc4_first_render_job(vc4);
709 	if (vc4_first_bin_job(vc4) == exec &&
710 	    (!renderjob || renderjob->perfmon == exec->perfmon)) {
711 		vc4_submit_next_bin_job(dev);
712 		vc4_queue_hangcheck(dev);
713 	}
714 
715 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
716 
717 	return 0;
718 }
719 
720 /**
721  * vc4_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
722  * referenced by the job.
723  * @dev: DRM device
724  * @file_priv: DRM file for this fd
725  * @exec: V3D job being set up
726  *
727  * The command validator needs to reference BOs by their index within
728  * the submitted job's BO list.  This does the validation of the job's
729  * BO list and reference counting for the lifetime of the job.
730  */
731 static int
732 vc4_cl_lookup_bos(struct drm_device *dev,
733 		  struct drm_file *file_priv,
734 		  struct vc4_exec_info *exec)
735 {
736 	struct drm_vc4_submit_cl *args = exec->args;
737 	uint32_t *handles;
738 	int ret = 0;
739 	int i;
740 
741 	exec->bo_count = args->bo_handle_count;
742 
743 	if (!exec->bo_count) {
744 		/* See comment on bo_index for why we have to check
745 		 * this.
746 		 */
747 		DRM_DEBUG("Rendering requires BOs to validate\n");
748 		return -EINVAL;
749 	}
750 
751 	exec->bo = kvmalloc_array(exec->bo_count,
752 				    sizeof(struct drm_gem_cma_object *),
753 				    GFP_KERNEL | __GFP_ZERO);
754 	if (!exec->bo) {
755 		DRM_ERROR("Failed to allocate validated BO pointers\n");
756 		return -ENOMEM;
757 	}
758 
759 	handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
760 	if (!handles) {
761 		ret = -ENOMEM;
762 		DRM_ERROR("Failed to allocate incoming GEM handles\n");
763 		goto fail;
764 	}
765 
766 	if (copy_from_user(handles, u64_to_user_ptr(args->bo_handles),
767 			   exec->bo_count * sizeof(uint32_t))) {
768 		ret = -EFAULT;
769 		DRM_ERROR("Failed to copy in GEM handles\n");
770 		goto fail;
771 	}
772 
773 	spin_lock(&file_priv->table_lock);
774 	for (i = 0; i < exec->bo_count; i++) {
775 		struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
776 						     handles[i]);
777 		if (!bo) {
778 			DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
779 				  i, handles[i]);
780 			ret = -EINVAL;
781 			break;
782 		}
783 
784 		drm_gem_object_get(bo);
785 		exec->bo[i] = (struct drm_gem_cma_object *)bo;
786 	}
787 	spin_unlock(&file_priv->table_lock);
788 
789 	if (ret)
790 		goto fail_put_bo;
791 
792 	for (i = 0; i < exec->bo_count; i++) {
793 		ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
794 		if (ret)
795 			goto fail_dec_usecnt;
796 	}
797 
798 	kvfree(handles);
799 	return 0;
800 
801 fail_dec_usecnt:
802 	/* Decrease usecnt on acquired objects.
803 	 * We cannot rely on  vc4_complete_exec() to release resources here,
804 	 * because vc4_complete_exec() has no information about which BO has
805 	 * had its ->usecnt incremented.
806 	 * To make things easier we just free everything explicitly and set
807 	 * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release'
808 	 * step.
809 	 */
810 	for (i-- ; i >= 0; i--)
811 		vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
812 
813 fail_put_bo:
814 	/* Release any reference to acquired objects. */
815 	for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
816 		drm_gem_object_put(&exec->bo[i]->base);
817 
818 fail:
819 	kvfree(handles);
820 	kvfree(exec->bo);
821 	exec->bo = NULL;
822 	return ret;
823 }
824 
825 static int
826 vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
827 {
828 	struct drm_vc4_submit_cl *args = exec->args;
829 	struct vc4_dev *vc4 = to_vc4_dev(dev);
830 	void *temp = NULL;
831 	void *bin;
832 	int ret = 0;
833 	uint32_t bin_offset = 0;
834 	uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
835 					     16);
836 	uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
837 	uint32_t exec_size = uniforms_offset + args->uniforms_size;
838 	uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
839 					  args->shader_rec_count);
840 	struct vc4_bo *bo;
841 
842 	if (shader_rec_offset < args->bin_cl_size ||
843 	    uniforms_offset < shader_rec_offset ||
844 	    exec_size < uniforms_offset ||
845 	    args->shader_rec_count >= (UINT_MAX /
846 					  sizeof(struct vc4_shader_state)) ||
847 	    temp_size < exec_size) {
848 		DRM_DEBUG("overflow in exec arguments\n");
849 		ret = -EINVAL;
850 		goto fail;
851 	}
852 
853 	/* Allocate space where we'll store the copied in user command lists
854 	 * and shader records.
855 	 *
856 	 * We don't just copy directly into the BOs because we need to
857 	 * read the contents back for validation, and I think the
858 	 * bo->vaddr is uncached access.
859 	 */
860 	temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
861 	if (!temp) {
862 		DRM_ERROR("Failed to allocate storage for copying "
863 			  "in bin/render CLs.\n");
864 		ret = -ENOMEM;
865 		goto fail;
866 	}
867 	bin = temp + bin_offset;
868 	exec->shader_rec_u = temp + shader_rec_offset;
869 	exec->uniforms_u = temp + uniforms_offset;
870 	exec->shader_state = temp + exec_size;
871 	exec->shader_state_size = args->shader_rec_count;
872 
873 	if (copy_from_user(bin,
874 			   u64_to_user_ptr(args->bin_cl),
875 			   args->bin_cl_size)) {
876 		ret = -EFAULT;
877 		goto fail;
878 	}
879 
880 	if (copy_from_user(exec->shader_rec_u,
881 			   u64_to_user_ptr(args->shader_rec),
882 			   args->shader_rec_size)) {
883 		ret = -EFAULT;
884 		goto fail;
885 	}
886 
887 	if (copy_from_user(exec->uniforms_u,
888 			   u64_to_user_ptr(args->uniforms),
889 			   args->uniforms_size)) {
890 		ret = -EFAULT;
891 		goto fail;
892 	}
893 
894 	bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL);
895 	if (IS_ERR(bo)) {
896 		DRM_ERROR("Couldn't allocate BO for binning\n");
897 		ret = PTR_ERR(bo);
898 		goto fail;
899 	}
900 	exec->exec_bo = &bo->base;
901 
902 	list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
903 		      &exec->unref_list);
904 
905 	exec->ct0ca = exec->exec_bo->paddr + bin_offset;
906 
907 	exec->bin_u = bin;
908 
909 	exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
910 	exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
911 	exec->shader_rec_size = args->shader_rec_size;
912 
913 	exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
914 	exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
915 	exec->uniforms_size = args->uniforms_size;
916 
917 	ret = vc4_validate_bin_cl(dev,
918 				  exec->exec_bo->vaddr + bin_offset,
919 				  bin,
920 				  exec);
921 	if (ret)
922 		goto fail;
923 
924 	ret = vc4_validate_shader_recs(dev, exec);
925 	if (ret)
926 		goto fail;
927 
928 	if (exec->found_tile_binning_mode_config_packet) {
929 		ret = vc4_v3d_bin_bo_get(vc4, &exec->bin_bo_used);
930 		if (ret)
931 			goto fail;
932 	}
933 
934 	/* Block waiting on any previous rendering into the CS's VBO,
935 	 * IB, or textures, so that pixels are actually written by the
936 	 * time we try to read them.
937 	 */
938 	ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
939 
940 fail:
941 	kvfree(temp);
942 	return ret;
943 }
944 
945 static void
946 vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
947 {
948 	struct vc4_dev *vc4 = to_vc4_dev(dev);
949 	unsigned long irqflags;
950 	unsigned i;
951 
952 	/* If we got force-completed because of GPU reset rather than
953 	 * through our IRQ handler, signal the fence now.
954 	 */
955 	if (exec->fence) {
956 		dma_fence_signal(exec->fence);
957 		dma_fence_put(exec->fence);
958 	}
959 
960 	if (exec->bo) {
961 		for (i = 0; i < exec->bo_count; i++) {
962 			struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
963 
964 			vc4_bo_dec_usecnt(bo);
965 			drm_gem_object_put(&exec->bo[i]->base);
966 		}
967 		kvfree(exec->bo);
968 	}
969 
970 	while (!list_empty(&exec->unref_list)) {
971 		struct vc4_bo *bo = list_first_entry(&exec->unref_list,
972 						     struct vc4_bo, unref_head);
973 		list_del(&bo->unref_head);
974 		drm_gem_object_put(&bo->base.base);
975 	}
976 
977 	/* Free up the allocation of any bin slots we used. */
978 	spin_lock_irqsave(&vc4->job_lock, irqflags);
979 	vc4->bin_alloc_used &= ~exec->bin_slots;
980 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
981 
982 	/* Release the reference on the binner BO if needed. */
983 	if (exec->bin_bo_used)
984 		vc4_v3d_bin_bo_put(vc4);
985 
986 	/* Release the reference we had on the perf monitor. */
987 	vc4_perfmon_put(exec->perfmon);
988 
989 	vc4_v3d_pm_put(vc4);
990 
991 	kfree(exec);
992 }
993 
994 void
995 vc4_job_handle_completed(struct vc4_dev *vc4)
996 {
997 	unsigned long irqflags;
998 	struct vc4_seqno_cb *cb, *cb_temp;
999 
1000 	spin_lock_irqsave(&vc4->job_lock, irqflags);
1001 	while (!list_empty(&vc4->job_done_list)) {
1002 		struct vc4_exec_info *exec =
1003 			list_first_entry(&vc4->job_done_list,
1004 					 struct vc4_exec_info, head);
1005 		list_del(&exec->head);
1006 
1007 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1008 		vc4_complete_exec(&vc4->base, exec);
1009 		spin_lock_irqsave(&vc4->job_lock, irqflags);
1010 	}
1011 
1012 	list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
1013 		if (cb->seqno <= vc4->finished_seqno) {
1014 			list_del_init(&cb->work.entry);
1015 			schedule_work(&cb->work);
1016 		}
1017 	}
1018 
1019 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1020 }
1021 
1022 static void vc4_seqno_cb_work(struct work_struct *work)
1023 {
1024 	struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
1025 
1026 	cb->func(cb);
1027 }
1028 
1029 int vc4_queue_seqno_cb(struct drm_device *dev,
1030 		       struct vc4_seqno_cb *cb, uint64_t seqno,
1031 		       void (*func)(struct vc4_seqno_cb *cb))
1032 {
1033 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1034 	unsigned long irqflags;
1035 
1036 	cb->func = func;
1037 	INIT_WORK(&cb->work, vc4_seqno_cb_work);
1038 
1039 	spin_lock_irqsave(&vc4->job_lock, irqflags);
1040 	if (seqno > vc4->finished_seqno) {
1041 		cb->seqno = seqno;
1042 		list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
1043 	} else {
1044 		schedule_work(&cb->work);
1045 	}
1046 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1047 
1048 	return 0;
1049 }
1050 
1051 /* Scheduled when any job has been completed, this walks the list of
1052  * jobs that had completed and unrefs their BOs and frees their exec
1053  * structs.
1054  */
1055 static void
1056 vc4_job_done_work(struct work_struct *work)
1057 {
1058 	struct vc4_dev *vc4 =
1059 		container_of(work, struct vc4_dev, job_done_work);
1060 
1061 	vc4_job_handle_completed(vc4);
1062 }
1063 
1064 static int
1065 vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
1066 				uint64_t seqno,
1067 				uint64_t *timeout_ns)
1068 {
1069 	unsigned long start = jiffies;
1070 	int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
1071 
1072 	if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
1073 		uint64_t delta = jiffies_to_nsecs(jiffies - start);
1074 
1075 		if (*timeout_ns >= delta)
1076 			*timeout_ns -= delta;
1077 	}
1078 
1079 	return ret;
1080 }
1081 
1082 int
1083 vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
1084 		     struct drm_file *file_priv)
1085 {
1086 	struct drm_vc4_wait_seqno *args = data;
1087 
1088 	return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
1089 					       &args->timeout_ns);
1090 }
1091 
1092 int
1093 vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
1094 		  struct drm_file *file_priv)
1095 {
1096 	int ret;
1097 	struct drm_vc4_wait_bo *args = data;
1098 	struct drm_gem_object *gem_obj;
1099 	struct vc4_bo *bo;
1100 
1101 	if (args->pad != 0)
1102 		return -EINVAL;
1103 
1104 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1105 	if (!gem_obj) {
1106 		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1107 		return -EINVAL;
1108 	}
1109 	bo = to_vc4_bo(gem_obj);
1110 
1111 	ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
1112 					      &args->timeout_ns);
1113 
1114 	drm_gem_object_put(gem_obj);
1115 	return ret;
1116 }
1117 
1118 /**
1119  * vc4_submit_cl_ioctl() - Submits a job (frame) to the VC4.
1120  * @dev: DRM device
1121  * @data: ioctl argument
1122  * @file_priv: DRM file for this fd
1123  *
1124  * This is the main entrypoint for userspace to submit a 3D frame to
1125  * the GPU.  Userspace provides the binner command list (if
1126  * applicable), and the kernel sets up the render command list to draw
1127  * to the framebuffer described in the ioctl, using the command lists
1128  * that the 3D engine's binner will produce.
1129  */
1130 int
1131 vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1132 		    struct drm_file *file_priv)
1133 {
1134 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1135 	struct vc4_file *vc4file = file_priv->driver_priv;
1136 	struct drm_vc4_submit_cl *args = data;
1137 	struct drm_syncobj *out_sync = NULL;
1138 	struct vc4_exec_info *exec;
1139 	struct ww_acquire_ctx acquire_ctx;
1140 	struct dma_fence *in_fence;
1141 	int ret = 0;
1142 
1143 	trace_vc4_submit_cl_ioctl(dev, args->bin_cl_size,
1144 				  args->shader_rec_size,
1145 				  args->bo_handle_count);
1146 
1147 	if (!vc4->v3d) {
1148 		DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n");
1149 		return -ENODEV;
1150 	}
1151 
1152 	if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
1153 			     VC4_SUBMIT_CL_FIXED_RCL_ORDER |
1154 			     VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
1155 			     VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) {
1156 		DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags);
1157 		return -EINVAL;
1158 	}
1159 
1160 	if (args->pad2 != 0) {
1161 		DRM_DEBUG("Invalid pad: 0x%08x\n", args->pad2);
1162 		return -EINVAL;
1163 	}
1164 
1165 	exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
1166 	if (!exec) {
1167 		DRM_ERROR("malloc failure on exec struct\n");
1168 		return -ENOMEM;
1169 	}
1170 
1171 	ret = vc4_v3d_pm_get(vc4);
1172 	if (ret) {
1173 		kfree(exec);
1174 		return ret;
1175 	}
1176 
1177 	exec->args = args;
1178 	INIT_LIST_HEAD(&exec->unref_list);
1179 
1180 	ret = vc4_cl_lookup_bos(dev, file_priv, exec);
1181 	if (ret)
1182 		goto fail;
1183 
1184 	if (args->perfmonid) {
1185 		exec->perfmon = vc4_perfmon_find(vc4file,
1186 						 args->perfmonid);
1187 		if (!exec->perfmon) {
1188 			ret = -ENOENT;
1189 			goto fail;
1190 		}
1191 	}
1192 
1193 	if (args->in_sync) {
1194 		ret = drm_syncobj_find_fence(file_priv, args->in_sync,
1195 					     0, 0, &in_fence);
1196 		if (ret)
1197 			goto fail;
1198 
1199 		/* When the fence (or fence array) is exclusively from our
1200 		 * context we can skip the wait since jobs are executed in
1201 		 * order of their submission through this ioctl and this can
1202 		 * only have fences from a prior job.
1203 		 */
1204 		if (!dma_fence_match_context(in_fence,
1205 					     vc4->dma_fence_context)) {
1206 			ret = dma_fence_wait(in_fence, true);
1207 			if (ret) {
1208 				dma_fence_put(in_fence);
1209 				goto fail;
1210 			}
1211 		}
1212 
1213 		dma_fence_put(in_fence);
1214 	}
1215 
1216 	if (exec->args->bin_cl_size != 0) {
1217 		ret = vc4_get_bcl(dev, exec);
1218 		if (ret)
1219 			goto fail;
1220 	} else {
1221 		exec->ct0ca = 0;
1222 		exec->ct0ea = 0;
1223 	}
1224 
1225 	ret = vc4_get_rcl(dev, exec);
1226 	if (ret)
1227 		goto fail;
1228 
1229 	ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx);
1230 	if (ret)
1231 		goto fail;
1232 
1233 	if (args->out_sync) {
1234 		out_sync = drm_syncobj_find(file_priv, args->out_sync);
1235 		if (!out_sync) {
1236 			ret = -EINVAL;
1237 			goto fail;
1238 		}
1239 
1240 		/* We replace the fence in out_sync in vc4_queue_submit since
1241 		 * the render job could execute immediately after that call.
1242 		 * If it finishes before our ioctl processing resumes the
1243 		 * render job fence could already have been freed.
1244 		 */
1245 	}
1246 
1247 	/* Clear this out of the struct we'll be putting in the queue,
1248 	 * since it's part of our stack.
1249 	 */
1250 	exec->args = NULL;
1251 
1252 	ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync);
1253 
1254 	/* The syncobj isn't part of the exec data and we need to free our
1255 	 * reference even if job submission failed.
1256 	 */
1257 	if (out_sync)
1258 		drm_syncobj_put(out_sync);
1259 
1260 	if (ret)
1261 		goto fail;
1262 
1263 	/* Return the seqno for our job. */
1264 	args->seqno = vc4->emit_seqno;
1265 
1266 	return 0;
1267 
1268 fail:
1269 	vc4_complete_exec(&vc4->base, exec);
1270 
1271 	return ret;
1272 }
1273 
1274 static void vc4_gem_destroy(struct drm_device *dev, void *unused);
1275 int vc4_gem_init(struct drm_device *dev)
1276 {
1277 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1278 
1279 	vc4->dma_fence_context = dma_fence_context_alloc(1);
1280 
1281 	INIT_LIST_HEAD(&vc4->bin_job_list);
1282 	INIT_LIST_HEAD(&vc4->render_job_list);
1283 	INIT_LIST_HEAD(&vc4->job_done_list);
1284 	INIT_LIST_HEAD(&vc4->seqno_cb_list);
1285 	spin_lock_init(&vc4->job_lock);
1286 
1287 	INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
1288 	timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
1289 
1290 	INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
1291 
1292 	mutex_init(&vc4->power_lock);
1293 
1294 	INIT_LIST_HEAD(&vc4->purgeable.list);
1295 	mutex_init(&vc4->purgeable.lock);
1296 
1297 	return drmm_add_action_or_reset(dev, vc4_gem_destroy, NULL);
1298 }
1299 
1300 static void vc4_gem_destroy(struct drm_device *dev, void *unused)
1301 {
1302 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1303 
1304 	/* Waiting for exec to finish would need to be done before
1305 	 * unregistering V3D.
1306 	 */
1307 	WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
1308 
1309 	/* V3D should already have disabled its interrupt and cleared
1310 	 * the overflow allocation registers.  Now free the object.
1311 	 */
1312 	if (vc4->bin_bo) {
1313 		drm_gem_object_put(&vc4->bin_bo->base.base);
1314 		vc4->bin_bo = NULL;
1315 	}
1316 
1317 	if (vc4->hang_state)
1318 		vc4_free_hang_state(dev, vc4->hang_state);
1319 }
1320 
1321 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
1322 			  struct drm_file *file_priv)
1323 {
1324 	struct drm_vc4_gem_madvise *args = data;
1325 	struct drm_gem_object *gem_obj;
1326 	struct vc4_bo *bo;
1327 	int ret;
1328 
1329 	switch (args->madv) {
1330 	case VC4_MADV_DONTNEED:
1331 	case VC4_MADV_WILLNEED:
1332 		break;
1333 	default:
1334 		return -EINVAL;
1335 	}
1336 
1337 	if (args->pad != 0)
1338 		return -EINVAL;
1339 
1340 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1341 	if (!gem_obj) {
1342 		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1343 		return -ENOENT;
1344 	}
1345 
1346 	bo = to_vc4_bo(gem_obj);
1347 
1348 	/* Only BOs exposed to userspace can be purged. */
1349 	if (bo->madv == __VC4_MADV_NOTSUPP) {
1350 		DRM_DEBUG("madvise not supported on this BO\n");
1351 		ret = -EINVAL;
1352 		goto out_put_gem;
1353 	}
1354 
1355 	/* Not sure it's safe to purge imported BOs. Let's just assume it's
1356 	 * not until proven otherwise.
1357 	 */
1358 	if (gem_obj->import_attach) {
1359 		DRM_DEBUG("madvise not supported on imported BOs\n");
1360 		ret = -EINVAL;
1361 		goto out_put_gem;
1362 	}
1363 
1364 	mutex_lock(&bo->madv_lock);
1365 
1366 	if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED &&
1367 	    !refcount_read(&bo->usecnt)) {
1368 		/* If the BO is about to be marked as purgeable, is not used
1369 		 * and is not already purgeable or purged, add it to the
1370 		 * purgeable list.
1371 		 */
1372 		vc4_bo_add_to_purgeable_pool(bo);
1373 	} else if (args->madv == VC4_MADV_WILLNEED &&
1374 		   bo->madv == VC4_MADV_DONTNEED &&
1375 		   !refcount_read(&bo->usecnt)) {
1376 		/* The BO has not been purged yet, just remove it from
1377 		 * the purgeable list.
1378 		 */
1379 		vc4_bo_remove_from_purgeable_pool(bo);
1380 	}
1381 
1382 	/* Save the purged state. */
1383 	args->retained = bo->madv != __VC4_MADV_PURGED;
1384 
1385 	/* Update internal madv state only if the bo was not purged. */
1386 	if (bo->madv != __VC4_MADV_PURGED)
1387 		bo->madv = args->madv;
1388 
1389 	mutex_unlock(&bo->madv_lock);
1390 
1391 	ret = 0;
1392 
1393 out_put_gem:
1394 	drm_gem_object_put(gem_obj);
1395 
1396 	return ret;
1397 }
1398