xref: /openbmc/linux/drivers/gpu/drm/vc4/vc4_gem.c (revision cc2f2df4)
1 /*
2  * Copyright © 2014 Broadcom
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/device.h>
28 #include <linux/io.h>
29 #include <linux/sched/signal.h>
30 #include <linux/dma-fence-array.h>
31 
32 #include <drm/drm_syncobj.h>
33 
34 #include "uapi/drm/vc4_drm.h"
35 #include "vc4_drv.h"
36 #include "vc4_regs.h"
37 #include "vc4_trace.h"
38 
39 static void
40 vc4_queue_hangcheck(struct drm_device *dev)
41 {
42 	struct vc4_dev *vc4 = to_vc4_dev(dev);
43 
44 	mod_timer(&vc4->hangcheck.timer,
45 		  round_jiffies_up(jiffies + msecs_to_jiffies(100)));
46 }
47 
48 struct vc4_hang_state {
49 	struct drm_vc4_get_hang_state user_state;
50 
51 	u32 bo_count;
52 	struct drm_gem_object **bo;
53 };
54 
55 static void
56 vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
57 {
58 	unsigned int i;
59 
60 	for (i = 0; i < state->user_state.bo_count; i++)
61 		drm_gem_object_put(state->bo[i]);
62 
63 	kfree(state);
64 }
65 
66 int
67 vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
68 			 struct drm_file *file_priv)
69 {
70 	struct drm_vc4_get_hang_state *get_state = data;
71 	struct drm_vc4_get_hang_state_bo *bo_state;
72 	struct vc4_hang_state *kernel_state;
73 	struct drm_vc4_get_hang_state *state;
74 	struct vc4_dev *vc4 = to_vc4_dev(dev);
75 	unsigned long irqflags;
76 	u32 i;
77 	int ret = 0;
78 
79 	if (!vc4->v3d) {
80 		DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n");
81 		return -ENODEV;
82 	}
83 
84 	spin_lock_irqsave(&vc4->job_lock, irqflags);
85 	kernel_state = vc4->hang_state;
86 	if (!kernel_state) {
87 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
88 		return -ENOENT;
89 	}
90 	state = &kernel_state->user_state;
91 
92 	/* If the user's array isn't big enough, just return the
93 	 * required array size.
94 	 */
95 	if (get_state->bo_count < state->bo_count) {
96 		get_state->bo_count = state->bo_count;
97 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
98 		return 0;
99 	}
100 
101 	vc4->hang_state = NULL;
102 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
103 
104 	/* Save the user's BO pointer, so we don't stomp it with the memcpy. */
105 	state->bo = get_state->bo;
106 	memcpy(get_state, state, sizeof(*state));
107 
108 	bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
109 	if (!bo_state) {
110 		ret = -ENOMEM;
111 		goto err_free;
112 	}
113 
114 	for (i = 0; i < state->bo_count; i++) {
115 		struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
116 		u32 handle;
117 
118 		ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
119 					    &handle);
120 
121 		if (ret) {
122 			state->bo_count = i;
123 			goto err_delete_handle;
124 		}
125 		bo_state[i].handle = handle;
126 		bo_state[i].paddr = vc4_bo->base.paddr;
127 		bo_state[i].size = vc4_bo->base.base.size;
128 	}
129 
130 	if (copy_to_user(u64_to_user_ptr(get_state->bo),
131 			 bo_state,
132 			 state->bo_count * sizeof(*bo_state)))
133 		ret = -EFAULT;
134 
135 err_delete_handle:
136 	if (ret) {
137 		for (i = 0; i < state->bo_count; i++)
138 			drm_gem_handle_delete(file_priv, bo_state[i].handle);
139 	}
140 
141 err_free:
142 	vc4_free_hang_state(dev, kernel_state);
143 	kfree(bo_state);
144 
145 	return ret;
146 }
147 
148 static void
149 vc4_save_hang_state(struct drm_device *dev)
150 {
151 	struct vc4_dev *vc4 = to_vc4_dev(dev);
152 	struct drm_vc4_get_hang_state *state;
153 	struct vc4_hang_state *kernel_state;
154 	struct vc4_exec_info *exec[2];
155 	struct vc4_bo *bo;
156 	unsigned long irqflags;
157 	unsigned int i, j, k, unref_list_count;
158 
159 	kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
160 	if (!kernel_state)
161 		return;
162 
163 	state = &kernel_state->user_state;
164 
165 	spin_lock_irqsave(&vc4->job_lock, irqflags);
166 	exec[0] = vc4_first_bin_job(vc4);
167 	exec[1] = vc4_first_render_job(vc4);
168 	if (!exec[0] && !exec[1]) {
169 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
170 		return;
171 	}
172 
173 	/* Get the bos from both binner and renderer into hang state. */
174 	state->bo_count = 0;
175 	for (i = 0; i < 2; i++) {
176 		if (!exec[i])
177 			continue;
178 
179 		unref_list_count = 0;
180 		list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
181 			unref_list_count++;
182 		state->bo_count += exec[i]->bo_count + unref_list_count;
183 	}
184 
185 	kernel_state->bo = kcalloc(state->bo_count,
186 				   sizeof(*kernel_state->bo), GFP_ATOMIC);
187 
188 	if (!kernel_state->bo) {
189 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
190 		return;
191 	}
192 
193 	k = 0;
194 	for (i = 0; i < 2; i++) {
195 		if (!exec[i])
196 			continue;
197 
198 		for (j = 0; j < exec[i]->bo_count; j++) {
199 			bo = to_vc4_bo(&exec[i]->bo[j]->base);
200 
201 			/* Retain BOs just in case they were marked purgeable.
202 			 * This prevents the BO from being purged before
203 			 * someone had a chance to dump the hang state.
204 			 */
205 			WARN_ON(!refcount_read(&bo->usecnt));
206 			refcount_inc(&bo->usecnt);
207 			drm_gem_object_get(&exec[i]->bo[j]->base);
208 			kernel_state->bo[k++] = &exec[i]->bo[j]->base;
209 		}
210 
211 		list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
212 			/* No need to retain BOs coming from the ->unref_list
213 			 * because they are naturally unpurgeable.
214 			 */
215 			drm_gem_object_get(&bo->base.base);
216 			kernel_state->bo[k++] = &bo->base.base;
217 		}
218 	}
219 
220 	WARN_ON_ONCE(k != state->bo_count);
221 
222 	if (exec[0])
223 		state->start_bin = exec[0]->ct0ca;
224 	if (exec[1])
225 		state->start_render = exec[1]->ct1ca;
226 
227 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
228 
229 	state->ct0ca = V3D_READ(V3D_CTNCA(0));
230 	state->ct0ea = V3D_READ(V3D_CTNEA(0));
231 
232 	state->ct1ca = V3D_READ(V3D_CTNCA(1));
233 	state->ct1ea = V3D_READ(V3D_CTNEA(1));
234 
235 	state->ct0cs = V3D_READ(V3D_CTNCS(0));
236 	state->ct1cs = V3D_READ(V3D_CTNCS(1));
237 
238 	state->ct0ra0 = V3D_READ(V3D_CT00RA0);
239 	state->ct1ra0 = V3D_READ(V3D_CT01RA0);
240 
241 	state->bpca = V3D_READ(V3D_BPCA);
242 	state->bpcs = V3D_READ(V3D_BPCS);
243 	state->bpoa = V3D_READ(V3D_BPOA);
244 	state->bpos = V3D_READ(V3D_BPOS);
245 
246 	state->vpmbase = V3D_READ(V3D_VPMBASE);
247 
248 	state->dbge = V3D_READ(V3D_DBGE);
249 	state->fdbgo = V3D_READ(V3D_FDBGO);
250 	state->fdbgb = V3D_READ(V3D_FDBGB);
251 	state->fdbgr = V3D_READ(V3D_FDBGR);
252 	state->fdbgs = V3D_READ(V3D_FDBGS);
253 	state->errstat = V3D_READ(V3D_ERRSTAT);
254 
255 	/* We need to turn purgeable BOs into unpurgeable ones so that
256 	 * userspace has a chance to dump the hang state before the kernel
257 	 * decides to purge those BOs.
258 	 * Note that BO consistency at dump time cannot be guaranteed. For
259 	 * example, if the owner of these BOs decides to re-use them or mark
260 	 * them purgeable again there's nothing we can do to prevent it.
261 	 */
262 	for (i = 0; i < kernel_state->user_state.bo_count; i++) {
263 		struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]);
264 
265 		if (bo->madv == __VC4_MADV_NOTSUPP)
266 			continue;
267 
268 		mutex_lock(&bo->madv_lock);
269 		if (!WARN_ON(bo->madv == __VC4_MADV_PURGED))
270 			bo->madv = VC4_MADV_WILLNEED;
271 		refcount_dec(&bo->usecnt);
272 		mutex_unlock(&bo->madv_lock);
273 	}
274 
275 	spin_lock_irqsave(&vc4->job_lock, irqflags);
276 	if (vc4->hang_state) {
277 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
278 		vc4_free_hang_state(dev, kernel_state);
279 	} else {
280 		vc4->hang_state = kernel_state;
281 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
282 	}
283 }
284 
285 static void
286 vc4_reset(struct drm_device *dev)
287 {
288 	struct vc4_dev *vc4 = to_vc4_dev(dev);
289 
290 	DRM_INFO("Resetting GPU.\n");
291 
292 	mutex_lock(&vc4->power_lock);
293 	if (vc4->power_refcount) {
294 		/* Power the device off and back on the by dropping the
295 		 * reference on runtime PM.
296 		 */
297 		pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
298 		pm_runtime_get_sync(&vc4->v3d->pdev->dev);
299 	}
300 	mutex_unlock(&vc4->power_lock);
301 
302 	vc4_irq_reset(dev);
303 
304 	/* Rearm the hangcheck -- another job might have been waiting
305 	 * for our hung one to get kicked off, and vc4_irq_reset()
306 	 * would have started it.
307 	 */
308 	vc4_queue_hangcheck(dev);
309 }
310 
311 static void
312 vc4_reset_work(struct work_struct *work)
313 {
314 	struct vc4_dev *vc4 =
315 		container_of(work, struct vc4_dev, hangcheck.reset_work);
316 
317 	vc4_save_hang_state(&vc4->base);
318 
319 	vc4_reset(&vc4->base);
320 }
321 
322 static void
323 vc4_hangcheck_elapsed(struct timer_list *t)
324 {
325 	struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
326 	struct drm_device *dev = &vc4->base;
327 	uint32_t ct0ca, ct1ca;
328 	unsigned long irqflags;
329 	struct vc4_exec_info *bin_exec, *render_exec;
330 
331 	spin_lock_irqsave(&vc4->job_lock, irqflags);
332 
333 	bin_exec = vc4_first_bin_job(vc4);
334 	render_exec = vc4_first_render_job(vc4);
335 
336 	/* If idle, we can stop watching for hangs. */
337 	if (!bin_exec && !render_exec) {
338 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
339 		return;
340 	}
341 
342 	ct0ca = V3D_READ(V3D_CTNCA(0));
343 	ct1ca = V3D_READ(V3D_CTNCA(1));
344 
345 	/* If we've made any progress in execution, rearm the timer
346 	 * and wait.
347 	 */
348 	if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
349 	    (render_exec && ct1ca != render_exec->last_ct1ca)) {
350 		if (bin_exec)
351 			bin_exec->last_ct0ca = ct0ca;
352 		if (render_exec)
353 			render_exec->last_ct1ca = ct1ca;
354 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
355 		vc4_queue_hangcheck(dev);
356 		return;
357 	}
358 
359 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
360 
361 	/* We've gone too long with no progress, reset.  This has to
362 	 * be done from a work struct, since resetting can sleep and
363 	 * this timer hook isn't allowed to.
364 	 */
365 	schedule_work(&vc4->hangcheck.reset_work);
366 }
367 
368 static void
369 submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
370 {
371 	struct vc4_dev *vc4 = to_vc4_dev(dev);
372 
373 	/* Set the current and end address of the control list.
374 	 * Writing the end register is what starts the job.
375 	 */
376 	V3D_WRITE(V3D_CTNCA(thread), start);
377 	V3D_WRITE(V3D_CTNEA(thread), end);
378 }
379 
380 int
381 vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
382 		   bool interruptible)
383 {
384 	struct vc4_dev *vc4 = to_vc4_dev(dev);
385 	int ret = 0;
386 	unsigned long timeout_expire;
387 	DEFINE_WAIT(wait);
388 
389 	if (vc4->finished_seqno >= seqno)
390 		return 0;
391 
392 	if (timeout_ns == 0)
393 		return -ETIME;
394 
395 	timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
396 
397 	trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
398 	for (;;) {
399 		prepare_to_wait(&vc4->job_wait_queue, &wait,
400 				interruptible ? TASK_INTERRUPTIBLE :
401 				TASK_UNINTERRUPTIBLE);
402 
403 		if (interruptible && signal_pending(current)) {
404 			ret = -ERESTARTSYS;
405 			break;
406 		}
407 
408 		if (vc4->finished_seqno >= seqno)
409 			break;
410 
411 		if (timeout_ns != ~0ull) {
412 			if (time_after_eq(jiffies, timeout_expire)) {
413 				ret = -ETIME;
414 				break;
415 			}
416 			schedule_timeout(timeout_expire - jiffies);
417 		} else {
418 			schedule();
419 		}
420 	}
421 
422 	finish_wait(&vc4->job_wait_queue, &wait);
423 	trace_vc4_wait_for_seqno_end(dev, seqno);
424 
425 	return ret;
426 }
427 
428 static void
429 vc4_flush_caches(struct drm_device *dev)
430 {
431 	struct vc4_dev *vc4 = to_vc4_dev(dev);
432 
433 	/* Flush the GPU L2 caches.  These caches sit on top of system
434 	 * L3 (the 128kb or so shared with the CPU), and are
435 	 * non-allocating in the L3.
436 	 */
437 	V3D_WRITE(V3D_L2CACTL,
438 		  V3D_L2CACTL_L2CCLR);
439 
440 	V3D_WRITE(V3D_SLCACTL,
441 		  VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
442 		  VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
443 		  VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
444 		  VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
445 }
446 
447 static void
448 vc4_flush_texture_caches(struct drm_device *dev)
449 {
450 	struct vc4_dev *vc4 = to_vc4_dev(dev);
451 
452 	V3D_WRITE(V3D_L2CACTL,
453 		  V3D_L2CACTL_L2CCLR);
454 
455 	V3D_WRITE(V3D_SLCACTL,
456 		  VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
457 		  VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
458 }
459 
460 /* Sets the registers for the next job to be actually be executed in
461  * the hardware.
462  *
463  * The job_lock should be held during this.
464  */
465 void
466 vc4_submit_next_bin_job(struct drm_device *dev)
467 {
468 	struct vc4_dev *vc4 = to_vc4_dev(dev);
469 	struct vc4_exec_info *exec;
470 
471 again:
472 	exec = vc4_first_bin_job(vc4);
473 	if (!exec)
474 		return;
475 
476 	vc4_flush_caches(dev);
477 
478 	/* Only start the perfmon if it was not already started by a previous
479 	 * job.
480 	 */
481 	if (exec->perfmon && vc4->active_perfmon != exec->perfmon)
482 		vc4_perfmon_start(vc4, exec->perfmon);
483 
484 	/* Either put the job in the binner if it uses the binner, or
485 	 * immediately move it to the to-be-rendered queue.
486 	 */
487 	if (exec->ct0ca != exec->ct0ea) {
488 		trace_vc4_submit_cl(dev, false, exec->seqno, exec->ct0ca,
489 				    exec->ct0ea);
490 		submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
491 	} else {
492 		struct vc4_exec_info *next;
493 
494 		vc4_move_job_to_render(dev, exec);
495 		next = vc4_first_bin_job(vc4);
496 
497 		/* We can't start the next bin job if the previous job had a
498 		 * different perfmon instance attached to it. The same goes
499 		 * if one of them had a perfmon attached to it and the other
500 		 * one doesn't.
501 		 */
502 		if (next && next->perfmon == exec->perfmon)
503 			goto again;
504 	}
505 }
506 
507 void
508 vc4_submit_next_render_job(struct drm_device *dev)
509 {
510 	struct vc4_dev *vc4 = to_vc4_dev(dev);
511 	struct vc4_exec_info *exec = vc4_first_render_job(vc4);
512 
513 	if (!exec)
514 		return;
515 
516 	/* A previous RCL may have written to one of our textures, and
517 	 * our full cache flush at bin time may have occurred before
518 	 * that RCL completed.  Flush the texture cache now, but not
519 	 * the instructions or uniforms (since we don't write those
520 	 * from an RCL).
521 	 */
522 	vc4_flush_texture_caches(dev);
523 
524 	trace_vc4_submit_cl(dev, true, exec->seqno, exec->ct1ca, exec->ct1ea);
525 	submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
526 }
527 
528 void
529 vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
530 {
531 	struct vc4_dev *vc4 = to_vc4_dev(dev);
532 	bool was_empty = list_empty(&vc4->render_job_list);
533 
534 	list_move_tail(&exec->head, &vc4->render_job_list);
535 	if (was_empty)
536 		vc4_submit_next_render_job(dev);
537 }
538 
539 static void
540 vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
541 {
542 	struct vc4_bo *bo;
543 	unsigned i;
544 
545 	for (i = 0; i < exec->bo_count; i++) {
546 		bo = to_vc4_bo(&exec->bo[i]->base);
547 		bo->seqno = seqno;
548 
549 		dma_resv_add_shared_fence(bo->base.base.resv, exec->fence);
550 	}
551 
552 	list_for_each_entry(bo, &exec->unref_list, unref_head) {
553 		bo->seqno = seqno;
554 	}
555 
556 	for (i = 0; i < exec->rcl_write_bo_count; i++) {
557 		bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
558 		bo->write_seqno = seqno;
559 
560 		dma_resv_add_excl_fence(bo->base.base.resv, exec->fence);
561 	}
562 }
563 
564 static void
565 vc4_unlock_bo_reservations(struct drm_device *dev,
566 			   struct vc4_exec_info *exec,
567 			   struct ww_acquire_ctx *acquire_ctx)
568 {
569 	int i;
570 
571 	for (i = 0; i < exec->bo_count; i++) {
572 		struct drm_gem_object *bo = &exec->bo[i]->base;
573 
574 		dma_resv_unlock(bo->resv);
575 	}
576 
577 	ww_acquire_fini(acquire_ctx);
578 }
579 
580 /* Takes the reservation lock on all the BOs being referenced, so that
581  * at queue submit time we can update the reservations.
582  *
583  * We don't lock the RCL the tile alloc/state BOs, or overflow memory
584  * (all of which are on exec->unref_list).  They're entirely private
585  * to vc4, so we don't attach dma-buf fences to them.
586  */
587 static int
588 vc4_lock_bo_reservations(struct drm_device *dev,
589 			 struct vc4_exec_info *exec,
590 			 struct ww_acquire_ctx *acquire_ctx)
591 {
592 	int contended_lock = -1;
593 	int i, ret;
594 	struct drm_gem_object *bo;
595 
596 	ww_acquire_init(acquire_ctx, &reservation_ww_class);
597 
598 retry:
599 	if (contended_lock != -1) {
600 		bo = &exec->bo[contended_lock]->base;
601 		ret = dma_resv_lock_slow_interruptible(bo->resv, acquire_ctx);
602 		if (ret) {
603 			ww_acquire_done(acquire_ctx);
604 			return ret;
605 		}
606 	}
607 
608 	for (i = 0; i < exec->bo_count; i++) {
609 		if (i == contended_lock)
610 			continue;
611 
612 		bo = &exec->bo[i]->base;
613 
614 		ret = dma_resv_lock_interruptible(bo->resv, acquire_ctx);
615 		if (ret) {
616 			int j;
617 
618 			for (j = 0; j < i; j++) {
619 				bo = &exec->bo[j]->base;
620 				dma_resv_unlock(bo->resv);
621 			}
622 
623 			if (contended_lock != -1 && contended_lock >= i) {
624 				bo = &exec->bo[contended_lock]->base;
625 
626 				dma_resv_unlock(bo->resv);
627 			}
628 
629 			if (ret == -EDEADLK) {
630 				contended_lock = i;
631 				goto retry;
632 			}
633 
634 			ww_acquire_done(acquire_ctx);
635 			return ret;
636 		}
637 	}
638 
639 	ww_acquire_done(acquire_ctx);
640 
641 	/* Reserve space for our shared (read-only) fence references,
642 	 * before we commit the CL to the hardware.
643 	 */
644 	for (i = 0; i < exec->bo_count; i++) {
645 		bo = &exec->bo[i]->base;
646 
647 		ret = dma_resv_reserve_shared(bo->resv, 1);
648 		if (ret) {
649 			vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
650 			return ret;
651 		}
652 	}
653 
654 	return 0;
655 }
656 
657 /* Queues a struct vc4_exec_info for execution.  If no job is
658  * currently executing, then submits it.
659  *
660  * Unlike most GPUs, our hardware only handles one command list at a
661  * time.  To queue multiple jobs at once, we'd need to edit the
662  * previous command list to have a jump to the new one at the end, and
663  * then bump the end address.  That's a change for a later date,
664  * though.
665  */
666 static int
667 vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
668 		 struct ww_acquire_ctx *acquire_ctx,
669 		 struct drm_syncobj *out_sync)
670 {
671 	struct vc4_dev *vc4 = to_vc4_dev(dev);
672 	struct vc4_exec_info *renderjob;
673 	uint64_t seqno;
674 	unsigned long irqflags;
675 	struct vc4_fence *fence;
676 
677 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
678 	if (!fence)
679 		return -ENOMEM;
680 	fence->dev = dev;
681 
682 	spin_lock_irqsave(&vc4->job_lock, irqflags);
683 
684 	seqno = ++vc4->emit_seqno;
685 	exec->seqno = seqno;
686 
687 	dma_fence_init(&fence->base, &vc4_fence_ops, &vc4->job_lock,
688 		       vc4->dma_fence_context, exec->seqno);
689 	fence->seqno = exec->seqno;
690 	exec->fence = &fence->base;
691 
692 	if (out_sync)
693 		drm_syncobj_replace_fence(out_sync, exec->fence);
694 
695 	vc4_update_bo_seqnos(exec, seqno);
696 
697 	vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
698 
699 	list_add_tail(&exec->head, &vc4->bin_job_list);
700 
701 	/* If no bin job was executing and if the render job (if any) has the
702 	 * same perfmon as our job attached to it (or if both jobs don't have
703 	 * perfmon activated), then kick ours off.  Otherwise, it'll get
704 	 * started when the previous job's flush/render done interrupt occurs.
705 	 */
706 	renderjob = vc4_first_render_job(vc4);
707 	if (vc4_first_bin_job(vc4) == exec &&
708 	    (!renderjob || renderjob->perfmon == exec->perfmon)) {
709 		vc4_submit_next_bin_job(dev);
710 		vc4_queue_hangcheck(dev);
711 	}
712 
713 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
714 
715 	return 0;
716 }
717 
718 /**
719  * vc4_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
720  * referenced by the job.
721  * @dev: DRM device
722  * @file_priv: DRM file for this fd
723  * @exec: V3D job being set up
724  *
725  * The command validator needs to reference BOs by their index within
726  * the submitted job's BO list.  This does the validation of the job's
727  * BO list and reference counting for the lifetime of the job.
728  */
729 static int
730 vc4_cl_lookup_bos(struct drm_device *dev,
731 		  struct drm_file *file_priv,
732 		  struct vc4_exec_info *exec)
733 {
734 	struct drm_vc4_submit_cl *args = exec->args;
735 	uint32_t *handles;
736 	int ret = 0;
737 	int i;
738 
739 	exec->bo_count = args->bo_handle_count;
740 
741 	if (!exec->bo_count) {
742 		/* See comment on bo_index for why we have to check
743 		 * this.
744 		 */
745 		DRM_DEBUG("Rendering requires BOs to validate\n");
746 		return -EINVAL;
747 	}
748 
749 	exec->bo = kvmalloc_array(exec->bo_count,
750 				    sizeof(struct drm_gem_cma_object *),
751 				    GFP_KERNEL | __GFP_ZERO);
752 	if (!exec->bo) {
753 		DRM_ERROR("Failed to allocate validated BO pointers\n");
754 		return -ENOMEM;
755 	}
756 
757 	handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
758 	if (!handles) {
759 		ret = -ENOMEM;
760 		DRM_ERROR("Failed to allocate incoming GEM handles\n");
761 		goto fail;
762 	}
763 
764 	if (copy_from_user(handles, u64_to_user_ptr(args->bo_handles),
765 			   exec->bo_count * sizeof(uint32_t))) {
766 		ret = -EFAULT;
767 		DRM_ERROR("Failed to copy in GEM handles\n");
768 		goto fail;
769 	}
770 
771 	spin_lock(&file_priv->table_lock);
772 	for (i = 0; i < exec->bo_count; i++) {
773 		struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
774 						     handles[i]);
775 		if (!bo) {
776 			DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
777 				  i, handles[i]);
778 			ret = -EINVAL;
779 			break;
780 		}
781 
782 		drm_gem_object_get(bo);
783 		exec->bo[i] = (struct drm_gem_cma_object *)bo;
784 	}
785 	spin_unlock(&file_priv->table_lock);
786 
787 	if (ret)
788 		goto fail_put_bo;
789 
790 	for (i = 0; i < exec->bo_count; i++) {
791 		ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
792 		if (ret)
793 			goto fail_dec_usecnt;
794 	}
795 
796 	kvfree(handles);
797 	return 0;
798 
799 fail_dec_usecnt:
800 	/* Decrease usecnt on acquired objects.
801 	 * We cannot rely on  vc4_complete_exec() to release resources here,
802 	 * because vc4_complete_exec() has no information about which BO has
803 	 * had its ->usecnt incremented.
804 	 * To make things easier we just free everything explicitly and set
805 	 * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release'
806 	 * step.
807 	 */
808 	for (i-- ; i >= 0; i--)
809 		vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
810 
811 fail_put_bo:
812 	/* Release any reference to acquired objects. */
813 	for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
814 		drm_gem_object_put(&exec->bo[i]->base);
815 
816 fail:
817 	kvfree(handles);
818 	kvfree(exec->bo);
819 	exec->bo = NULL;
820 	return ret;
821 }
822 
823 static int
824 vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
825 {
826 	struct drm_vc4_submit_cl *args = exec->args;
827 	struct vc4_dev *vc4 = to_vc4_dev(dev);
828 	void *temp = NULL;
829 	void *bin;
830 	int ret = 0;
831 	uint32_t bin_offset = 0;
832 	uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
833 					     16);
834 	uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
835 	uint32_t exec_size = uniforms_offset + args->uniforms_size;
836 	uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
837 					  args->shader_rec_count);
838 	struct vc4_bo *bo;
839 
840 	if (shader_rec_offset < args->bin_cl_size ||
841 	    uniforms_offset < shader_rec_offset ||
842 	    exec_size < uniforms_offset ||
843 	    args->shader_rec_count >= (UINT_MAX /
844 					  sizeof(struct vc4_shader_state)) ||
845 	    temp_size < exec_size) {
846 		DRM_DEBUG("overflow in exec arguments\n");
847 		ret = -EINVAL;
848 		goto fail;
849 	}
850 
851 	/* Allocate space where we'll store the copied in user command lists
852 	 * and shader records.
853 	 *
854 	 * We don't just copy directly into the BOs because we need to
855 	 * read the contents back for validation, and I think the
856 	 * bo->vaddr is uncached access.
857 	 */
858 	temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
859 	if (!temp) {
860 		DRM_ERROR("Failed to allocate storage for copying "
861 			  "in bin/render CLs.\n");
862 		ret = -ENOMEM;
863 		goto fail;
864 	}
865 	bin = temp + bin_offset;
866 	exec->shader_rec_u = temp + shader_rec_offset;
867 	exec->uniforms_u = temp + uniforms_offset;
868 	exec->shader_state = temp + exec_size;
869 	exec->shader_state_size = args->shader_rec_count;
870 
871 	if (copy_from_user(bin,
872 			   u64_to_user_ptr(args->bin_cl),
873 			   args->bin_cl_size)) {
874 		ret = -EFAULT;
875 		goto fail;
876 	}
877 
878 	if (copy_from_user(exec->shader_rec_u,
879 			   u64_to_user_ptr(args->shader_rec),
880 			   args->shader_rec_size)) {
881 		ret = -EFAULT;
882 		goto fail;
883 	}
884 
885 	if (copy_from_user(exec->uniforms_u,
886 			   u64_to_user_ptr(args->uniforms),
887 			   args->uniforms_size)) {
888 		ret = -EFAULT;
889 		goto fail;
890 	}
891 
892 	bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL);
893 	if (IS_ERR(bo)) {
894 		DRM_ERROR("Couldn't allocate BO for binning\n");
895 		ret = PTR_ERR(bo);
896 		goto fail;
897 	}
898 	exec->exec_bo = &bo->base;
899 
900 	list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
901 		      &exec->unref_list);
902 
903 	exec->ct0ca = exec->exec_bo->paddr + bin_offset;
904 
905 	exec->bin_u = bin;
906 
907 	exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
908 	exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
909 	exec->shader_rec_size = args->shader_rec_size;
910 
911 	exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
912 	exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
913 	exec->uniforms_size = args->uniforms_size;
914 
915 	ret = vc4_validate_bin_cl(dev,
916 				  exec->exec_bo->vaddr + bin_offset,
917 				  bin,
918 				  exec);
919 	if (ret)
920 		goto fail;
921 
922 	ret = vc4_validate_shader_recs(dev, exec);
923 	if (ret)
924 		goto fail;
925 
926 	if (exec->found_tile_binning_mode_config_packet) {
927 		ret = vc4_v3d_bin_bo_get(vc4, &exec->bin_bo_used);
928 		if (ret)
929 			goto fail;
930 	}
931 
932 	/* Block waiting on any previous rendering into the CS's VBO,
933 	 * IB, or textures, so that pixels are actually written by the
934 	 * time we try to read them.
935 	 */
936 	ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
937 
938 fail:
939 	kvfree(temp);
940 	return ret;
941 }
942 
943 static void
944 vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
945 {
946 	struct vc4_dev *vc4 = to_vc4_dev(dev);
947 	unsigned long irqflags;
948 	unsigned i;
949 
950 	/* If we got force-completed because of GPU reset rather than
951 	 * through our IRQ handler, signal the fence now.
952 	 */
953 	if (exec->fence) {
954 		dma_fence_signal(exec->fence);
955 		dma_fence_put(exec->fence);
956 	}
957 
958 	if (exec->bo) {
959 		for (i = 0; i < exec->bo_count; i++) {
960 			struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
961 
962 			vc4_bo_dec_usecnt(bo);
963 			drm_gem_object_put(&exec->bo[i]->base);
964 		}
965 		kvfree(exec->bo);
966 	}
967 
968 	while (!list_empty(&exec->unref_list)) {
969 		struct vc4_bo *bo = list_first_entry(&exec->unref_list,
970 						     struct vc4_bo, unref_head);
971 		list_del(&bo->unref_head);
972 		drm_gem_object_put(&bo->base.base);
973 	}
974 
975 	/* Free up the allocation of any bin slots we used. */
976 	spin_lock_irqsave(&vc4->job_lock, irqflags);
977 	vc4->bin_alloc_used &= ~exec->bin_slots;
978 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
979 
980 	/* Release the reference on the binner BO if needed. */
981 	if (exec->bin_bo_used)
982 		vc4_v3d_bin_bo_put(vc4);
983 
984 	/* Release the reference we had on the perf monitor. */
985 	vc4_perfmon_put(exec->perfmon);
986 
987 	vc4_v3d_pm_put(vc4);
988 
989 	kfree(exec);
990 }
991 
992 void
993 vc4_job_handle_completed(struct vc4_dev *vc4)
994 {
995 	unsigned long irqflags;
996 	struct vc4_seqno_cb *cb, *cb_temp;
997 
998 	spin_lock_irqsave(&vc4->job_lock, irqflags);
999 	while (!list_empty(&vc4->job_done_list)) {
1000 		struct vc4_exec_info *exec =
1001 			list_first_entry(&vc4->job_done_list,
1002 					 struct vc4_exec_info, head);
1003 		list_del(&exec->head);
1004 
1005 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1006 		vc4_complete_exec(&vc4->base, exec);
1007 		spin_lock_irqsave(&vc4->job_lock, irqflags);
1008 	}
1009 
1010 	list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
1011 		if (cb->seqno <= vc4->finished_seqno) {
1012 			list_del_init(&cb->work.entry);
1013 			schedule_work(&cb->work);
1014 		}
1015 	}
1016 
1017 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1018 }
1019 
1020 static void vc4_seqno_cb_work(struct work_struct *work)
1021 {
1022 	struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
1023 
1024 	cb->func(cb);
1025 }
1026 
1027 int vc4_queue_seqno_cb(struct drm_device *dev,
1028 		       struct vc4_seqno_cb *cb, uint64_t seqno,
1029 		       void (*func)(struct vc4_seqno_cb *cb))
1030 {
1031 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1032 	unsigned long irqflags;
1033 
1034 	cb->func = func;
1035 	INIT_WORK(&cb->work, vc4_seqno_cb_work);
1036 
1037 	spin_lock_irqsave(&vc4->job_lock, irqflags);
1038 	if (seqno > vc4->finished_seqno) {
1039 		cb->seqno = seqno;
1040 		list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
1041 	} else {
1042 		schedule_work(&cb->work);
1043 	}
1044 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1045 
1046 	return 0;
1047 }
1048 
1049 /* Scheduled when any job has been completed, this walks the list of
1050  * jobs that had completed and unrefs their BOs and frees their exec
1051  * structs.
1052  */
1053 static void
1054 vc4_job_done_work(struct work_struct *work)
1055 {
1056 	struct vc4_dev *vc4 =
1057 		container_of(work, struct vc4_dev, job_done_work);
1058 
1059 	vc4_job_handle_completed(vc4);
1060 }
1061 
1062 static int
1063 vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
1064 				uint64_t seqno,
1065 				uint64_t *timeout_ns)
1066 {
1067 	unsigned long start = jiffies;
1068 	int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
1069 
1070 	if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
1071 		uint64_t delta = jiffies_to_nsecs(jiffies - start);
1072 
1073 		if (*timeout_ns >= delta)
1074 			*timeout_ns -= delta;
1075 	}
1076 
1077 	return ret;
1078 }
1079 
1080 int
1081 vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
1082 		     struct drm_file *file_priv)
1083 {
1084 	struct drm_vc4_wait_seqno *args = data;
1085 
1086 	return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
1087 					       &args->timeout_ns);
1088 }
1089 
1090 int
1091 vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
1092 		  struct drm_file *file_priv)
1093 {
1094 	int ret;
1095 	struct drm_vc4_wait_bo *args = data;
1096 	struct drm_gem_object *gem_obj;
1097 	struct vc4_bo *bo;
1098 
1099 	if (args->pad != 0)
1100 		return -EINVAL;
1101 
1102 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1103 	if (!gem_obj) {
1104 		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1105 		return -EINVAL;
1106 	}
1107 	bo = to_vc4_bo(gem_obj);
1108 
1109 	ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
1110 					      &args->timeout_ns);
1111 
1112 	drm_gem_object_put(gem_obj);
1113 	return ret;
1114 }
1115 
1116 /**
1117  * vc4_submit_cl_ioctl() - Submits a job (frame) to the VC4.
1118  * @dev: DRM device
1119  * @data: ioctl argument
1120  * @file_priv: DRM file for this fd
1121  *
1122  * This is the main entrypoint for userspace to submit a 3D frame to
1123  * the GPU.  Userspace provides the binner command list (if
1124  * applicable), and the kernel sets up the render command list to draw
1125  * to the framebuffer described in the ioctl, using the command lists
1126  * that the 3D engine's binner will produce.
1127  */
1128 int
1129 vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1130 		    struct drm_file *file_priv)
1131 {
1132 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1133 	struct vc4_file *vc4file = file_priv->driver_priv;
1134 	struct drm_vc4_submit_cl *args = data;
1135 	struct drm_syncobj *out_sync = NULL;
1136 	struct vc4_exec_info *exec;
1137 	struct ww_acquire_ctx acquire_ctx;
1138 	struct dma_fence *in_fence;
1139 	int ret = 0;
1140 
1141 	trace_vc4_submit_cl_ioctl(dev, args->bin_cl_size,
1142 				  args->shader_rec_size,
1143 				  args->bo_handle_count);
1144 
1145 	if (!vc4->v3d) {
1146 		DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n");
1147 		return -ENODEV;
1148 	}
1149 
1150 	if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
1151 			     VC4_SUBMIT_CL_FIXED_RCL_ORDER |
1152 			     VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
1153 			     VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) {
1154 		DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags);
1155 		return -EINVAL;
1156 	}
1157 
1158 	if (args->pad2 != 0) {
1159 		DRM_DEBUG("Invalid pad: 0x%08x\n", args->pad2);
1160 		return -EINVAL;
1161 	}
1162 
1163 	exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
1164 	if (!exec) {
1165 		DRM_ERROR("malloc failure on exec struct\n");
1166 		return -ENOMEM;
1167 	}
1168 
1169 	ret = vc4_v3d_pm_get(vc4);
1170 	if (ret) {
1171 		kfree(exec);
1172 		return ret;
1173 	}
1174 
1175 	exec->args = args;
1176 	INIT_LIST_HEAD(&exec->unref_list);
1177 
1178 	ret = vc4_cl_lookup_bos(dev, file_priv, exec);
1179 	if (ret)
1180 		goto fail;
1181 
1182 	if (args->perfmonid) {
1183 		exec->perfmon = vc4_perfmon_find(vc4file,
1184 						 args->perfmonid);
1185 		if (!exec->perfmon) {
1186 			ret = -ENOENT;
1187 			goto fail;
1188 		}
1189 	}
1190 
1191 	if (args->in_sync) {
1192 		ret = drm_syncobj_find_fence(file_priv, args->in_sync,
1193 					     0, 0, &in_fence);
1194 		if (ret)
1195 			goto fail;
1196 
1197 		/* When the fence (or fence array) is exclusively from our
1198 		 * context we can skip the wait since jobs are executed in
1199 		 * order of their submission through this ioctl and this can
1200 		 * only have fences from a prior job.
1201 		 */
1202 		if (!dma_fence_match_context(in_fence,
1203 					     vc4->dma_fence_context)) {
1204 			ret = dma_fence_wait(in_fence, true);
1205 			if (ret) {
1206 				dma_fence_put(in_fence);
1207 				goto fail;
1208 			}
1209 		}
1210 
1211 		dma_fence_put(in_fence);
1212 	}
1213 
1214 	if (exec->args->bin_cl_size != 0) {
1215 		ret = vc4_get_bcl(dev, exec);
1216 		if (ret)
1217 			goto fail;
1218 	} else {
1219 		exec->ct0ca = 0;
1220 		exec->ct0ea = 0;
1221 	}
1222 
1223 	ret = vc4_get_rcl(dev, exec);
1224 	if (ret)
1225 		goto fail;
1226 
1227 	ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx);
1228 	if (ret)
1229 		goto fail;
1230 
1231 	if (args->out_sync) {
1232 		out_sync = drm_syncobj_find(file_priv, args->out_sync);
1233 		if (!out_sync) {
1234 			ret = -EINVAL;
1235 			goto fail;
1236 		}
1237 
1238 		/* We replace the fence in out_sync in vc4_queue_submit since
1239 		 * the render job could execute immediately after that call.
1240 		 * If it finishes before our ioctl processing resumes the
1241 		 * render job fence could already have been freed.
1242 		 */
1243 	}
1244 
1245 	/* Clear this out of the struct we'll be putting in the queue,
1246 	 * since it's part of our stack.
1247 	 */
1248 	exec->args = NULL;
1249 
1250 	ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync);
1251 
1252 	/* The syncobj isn't part of the exec data and we need to free our
1253 	 * reference even if job submission failed.
1254 	 */
1255 	if (out_sync)
1256 		drm_syncobj_put(out_sync);
1257 
1258 	if (ret)
1259 		goto fail;
1260 
1261 	/* Return the seqno for our job. */
1262 	args->seqno = vc4->emit_seqno;
1263 
1264 	return 0;
1265 
1266 fail:
1267 	vc4_complete_exec(&vc4->base, exec);
1268 
1269 	return ret;
1270 }
1271 
1272 static void vc4_gem_destroy(struct drm_device *dev, void *unused);
1273 int vc4_gem_init(struct drm_device *dev)
1274 {
1275 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1276 
1277 	vc4->dma_fence_context = dma_fence_context_alloc(1);
1278 
1279 	INIT_LIST_HEAD(&vc4->bin_job_list);
1280 	INIT_LIST_HEAD(&vc4->render_job_list);
1281 	INIT_LIST_HEAD(&vc4->job_done_list);
1282 	INIT_LIST_HEAD(&vc4->seqno_cb_list);
1283 	spin_lock_init(&vc4->job_lock);
1284 
1285 	INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
1286 	timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
1287 
1288 	INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
1289 
1290 	mutex_init(&vc4->power_lock);
1291 
1292 	INIT_LIST_HEAD(&vc4->purgeable.list);
1293 	mutex_init(&vc4->purgeable.lock);
1294 
1295 	return drmm_add_action_or_reset(dev, vc4_gem_destroy, NULL);
1296 }
1297 
1298 static void vc4_gem_destroy(struct drm_device *dev, void *unused)
1299 {
1300 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1301 
1302 	/* Waiting for exec to finish would need to be done before
1303 	 * unregistering V3D.
1304 	 */
1305 	WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
1306 
1307 	/* V3D should already have disabled its interrupt and cleared
1308 	 * the overflow allocation registers.  Now free the object.
1309 	 */
1310 	if (vc4->bin_bo) {
1311 		drm_gem_object_put(&vc4->bin_bo->base.base);
1312 		vc4->bin_bo = NULL;
1313 	}
1314 
1315 	if (vc4->hang_state)
1316 		vc4_free_hang_state(dev, vc4->hang_state);
1317 }
1318 
1319 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
1320 			  struct drm_file *file_priv)
1321 {
1322 	struct drm_vc4_gem_madvise *args = data;
1323 	struct drm_gem_object *gem_obj;
1324 	struct vc4_bo *bo;
1325 	int ret;
1326 
1327 	switch (args->madv) {
1328 	case VC4_MADV_DONTNEED:
1329 	case VC4_MADV_WILLNEED:
1330 		break;
1331 	default:
1332 		return -EINVAL;
1333 	}
1334 
1335 	if (args->pad != 0)
1336 		return -EINVAL;
1337 
1338 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1339 	if (!gem_obj) {
1340 		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1341 		return -ENOENT;
1342 	}
1343 
1344 	bo = to_vc4_bo(gem_obj);
1345 
1346 	/* Only BOs exposed to userspace can be purged. */
1347 	if (bo->madv == __VC4_MADV_NOTSUPP) {
1348 		DRM_DEBUG("madvise not supported on this BO\n");
1349 		ret = -EINVAL;
1350 		goto out_put_gem;
1351 	}
1352 
1353 	/* Not sure it's safe to purge imported BOs. Let's just assume it's
1354 	 * not until proven otherwise.
1355 	 */
1356 	if (gem_obj->import_attach) {
1357 		DRM_DEBUG("madvise not supported on imported BOs\n");
1358 		ret = -EINVAL;
1359 		goto out_put_gem;
1360 	}
1361 
1362 	mutex_lock(&bo->madv_lock);
1363 
1364 	if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED &&
1365 	    !refcount_read(&bo->usecnt)) {
1366 		/* If the BO is about to be marked as purgeable, is not used
1367 		 * and is not already purgeable or purged, add it to the
1368 		 * purgeable list.
1369 		 */
1370 		vc4_bo_add_to_purgeable_pool(bo);
1371 	} else if (args->madv == VC4_MADV_WILLNEED &&
1372 		   bo->madv == VC4_MADV_DONTNEED &&
1373 		   !refcount_read(&bo->usecnt)) {
1374 		/* The BO has not been purged yet, just remove it from
1375 		 * the purgeable list.
1376 		 */
1377 		vc4_bo_remove_from_purgeable_pool(bo);
1378 	}
1379 
1380 	/* Save the purged state. */
1381 	args->retained = bo->madv != __VC4_MADV_PURGED;
1382 
1383 	/* Update internal madv state only if the bo was not purged. */
1384 	if (bo->madv != __VC4_MADV_PURGED)
1385 		bo->madv = args->madv;
1386 
1387 	mutex_unlock(&bo->madv_lock);
1388 
1389 	ret = 0;
1390 
1391 out_put_gem:
1392 	drm_gem_object_put(gem_obj);
1393 
1394 	return ret;
1395 }
1396