xref: /openbmc/linux/drivers/gpu/drm/vc4/vc4_drv.h (revision 407e7517)
1 /*
2  * Copyright (C) 2015 Broadcom
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #include <linux/reservation.h>
10 #include <drm/drmP.h>
11 #include <drm/drm_encoder.h>
12 #include <drm/drm_gem_cma_helper.h>
13 
14 #include "uapi/drm/vc4_drm.h"
15 
16 /* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
17  * this.
18  */
19 enum vc4_kernel_bo_type {
20 	/* Any kernel allocation (gem_create_object hook) before it
21 	 * gets another type set.
22 	 */
23 	VC4_BO_TYPE_KERNEL,
24 	VC4_BO_TYPE_V3D,
25 	VC4_BO_TYPE_V3D_SHADER,
26 	VC4_BO_TYPE_DUMB,
27 	VC4_BO_TYPE_BIN,
28 	VC4_BO_TYPE_RCL,
29 	VC4_BO_TYPE_BCL,
30 	VC4_BO_TYPE_KERNEL_CACHE,
31 	VC4_BO_TYPE_COUNT
32 };
33 
34 /* Performance monitor object. The perform lifetime is controlled by userspace
35  * using perfmon related ioctls. A perfmon can be attached to a submit_cl
36  * request, and when this is the case, HW perf counters will be activated just
37  * before the submit_cl is submitted to the GPU and disabled when the job is
38  * done. This way, only events related to a specific job will be counted.
39  */
40 struct vc4_perfmon {
41 	/* Tracks the number of users of the perfmon, when this counter reaches
42 	 * zero the perfmon is destroyed.
43 	 */
44 	refcount_t refcnt;
45 
46 	/* Number of counters activated in this perfmon instance
47 	 * (should be less than DRM_VC4_MAX_PERF_COUNTERS).
48 	 */
49 	u8 ncounters;
50 
51 	/* Events counted by the HW perf counters. */
52 	u8 events[DRM_VC4_MAX_PERF_COUNTERS];
53 
54 	/* Storage for counter values. Counters are incremented by the HW
55 	 * perf counter values every time the perfmon is attached to a GPU job.
56 	 * This way, perfmon users don't have to retrieve the results after
57 	 * each job if they want to track events covering several submissions.
58 	 * Note that counter values can't be reset, but you can fake a reset by
59 	 * destroying the perfmon and creating a new one.
60 	 */
61 	u64 counters[0];
62 };
63 
64 struct vc4_dev {
65 	struct drm_device *dev;
66 
67 	struct vc4_hdmi *hdmi;
68 	struct vc4_hvs *hvs;
69 	struct vc4_v3d *v3d;
70 	struct vc4_dpi *dpi;
71 	struct vc4_dsi *dsi1;
72 	struct vc4_vec *vec;
73 
74 	struct vc4_hang_state *hang_state;
75 
76 	/* The kernel-space BO cache.  Tracks buffers that have been
77 	 * unreferenced by all other users (refcounts of 0!) but not
78 	 * yet freed, so we can do cheap allocations.
79 	 */
80 	struct vc4_bo_cache {
81 		/* Array of list heads for entries in the BO cache,
82 		 * based on number of pages, so we can do O(1) lookups
83 		 * in the cache when allocating.
84 		 */
85 		struct list_head *size_list;
86 		uint32_t size_list_size;
87 
88 		/* List of all BOs in the cache, ordered by age, so we
89 		 * can do O(1) lookups when trying to free old
90 		 * buffers.
91 		 */
92 		struct list_head time_list;
93 		struct work_struct time_work;
94 		struct timer_list time_timer;
95 	} bo_cache;
96 
97 	u32 num_labels;
98 	struct vc4_label {
99 		const char *name;
100 		u32 num_allocated;
101 		u32 size_allocated;
102 	} *bo_labels;
103 
104 	/* Protects bo_cache and bo_labels. */
105 	struct mutex bo_lock;
106 
107 	/* Purgeable BO pool. All BOs in this pool can have their memory
108 	 * reclaimed if the driver is unable to allocate new BOs. We also
109 	 * keep stats related to the purge mechanism here.
110 	 */
111 	struct {
112 		struct list_head list;
113 		unsigned int num;
114 		size_t size;
115 		unsigned int purged_num;
116 		size_t purged_size;
117 		struct mutex lock;
118 	} purgeable;
119 
120 	uint64_t dma_fence_context;
121 
122 	/* Sequence number for the last job queued in bin_job_list.
123 	 * Starts at 0 (no jobs emitted).
124 	 */
125 	uint64_t emit_seqno;
126 
127 	/* Sequence number for the last completed job on the GPU.
128 	 * Starts at 0 (no jobs completed).
129 	 */
130 	uint64_t finished_seqno;
131 
132 	/* List of all struct vc4_exec_info for jobs to be executed in
133 	 * the binner.  The first job in the list is the one currently
134 	 * programmed into ct0ca for execution.
135 	 */
136 	struct list_head bin_job_list;
137 
138 	/* List of all struct vc4_exec_info for jobs that have
139 	 * completed binning and are ready for rendering.  The first
140 	 * job in the list is the one currently programmed into ct1ca
141 	 * for execution.
142 	 */
143 	struct list_head render_job_list;
144 
145 	/* List of the finished vc4_exec_infos waiting to be freed by
146 	 * job_done_work.
147 	 */
148 	struct list_head job_done_list;
149 	/* Spinlock used to synchronize the job_list and seqno
150 	 * accesses between the IRQ handler and GEM ioctls.
151 	 */
152 	spinlock_t job_lock;
153 	wait_queue_head_t job_wait_queue;
154 	struct work_struct job_done_work;
155 
156 	/* Used to track the active perfmon if any. Access to this field is
157 	 * protected by job_lock.
158 	 */
159 	struct vc4_perfmon *active_perfmon;
160 
161 	/* List of struct vc4_seqno_cb for callbacks to be made from a
162 	 * workqueue when the given seqno is passed.
163 	 */
164 	struct list_head seqno_cb_list;
165 
166 	/* The memory used for storing binner tile alloc, tile state,
167 	 * and overflow memory allocations.  This is freed when V3D
168 	 * powers down.
169 	 */
170 	struct vc4_bo *bin_bo;
171 
172 	/* Size of blocks allocated within bin_bo. */
173 	uint32_t bin_alloc_size;
174 
175 	/* Bitmask of the bin_alloc_size chunks in bin_bo that are
176 	 * used.
177 	 */
178 	uint32_t bin_alloc_used;
179 
180 	/* Bitmask of the current bin_alloc used for overflow memory. */
181 	uint32_t bin_alloc_overflow;
182 
183 	struct work_struct overflow_mem_work;
184 
185 	int power_refcount;
186 
187 	/* Mutex controlling the power refcount. */
188 	struct mutex power_lock;
189 
190 	struct {
191 		struct timer_list timer;
192 		struct work_struct reset_work;
193 	} hangcheck;
194 
195 	struct semaphore async_modeset;
196 };
197 
198 static inline struct vc4_dev *
199 to_vc4_dev(struct drm_device *dev)
200 {
201 	return (struct vc4_dev *)dev->dev_private;
202 }
203 
204 struct vc4_bo {
205 	struct drm_gem_cma_object base;
206 
207 	/* seqno of the last job to render using this BO. */
208 	uint64_t seqno;
209 
210 	/* seqno of the last job to use the RCL to write to this BO.
211 	 *
212 	 * Note that this doesn't include binner overflow memory
213 	 * writes.
214 	 */
215 	uint64_t write_seqno;
216 
217 	bool t_format;
218 
219 	/* List entry for the BO's position in either
220 	 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
221 	 */
222 	struct list_head unref_head;
223 
224 	/* Time in jiffies when the BO was put in vc4->bo_cache. */
225 	unsigned long free_time;
226 
227 	/* List entry for the BO's position in vc4_dev->bo_cache.size_list */
228 	struct list_head size_head;
229 
230 	/* Struct for shader validation state, if created by
231 	 * DRM_IOCTL_VC4_CREATE_SHADER_BO.
232 	 */
233 	struct vc4_validated_shader_info *validated_shader;
234 
235 	/* normally (resv == &_resv) except for imported bo's */
236 	struct reservation_object *resv;
237 	struct reservation_object _resv;
238 
239 	/* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
240 	 * for user-allocated labels.
241 	 */
242 	int label;
243 
244 	/* Count the number of active users. This is needed to determine
245 	 * whether we can move the BO to the purgeable list or not (when the BO
246 	 * is used by the GPU or the display engine we can't purge it).
247 	 */
248 	refcount_t usecnt;
249 
250 	/* Store purgeable/purged state here */
251 	u32 madv;
252 	struct mutex madv_lock;
253 };
254 
255 static inline struct vc4_bo *
256 to_vc4_bo(struct drm_gem_object *bo)
257 {
258 	return (struct vc4_bo *)bo;
259 }
260 
261 struct vc4_fence {
262 	struct dma_fence base;
263 	struct drm_device *dev;
264 	/* vc4 seqno for signaled() test */
265 	uint64_t seqno;
266 };
267 
268 static inline struct vc4_fence *
269 to_vc4_fence(struct dma_fence *fence)
270 {
271 	return (struct vc4_fence *)fence;
272 }
273 
274 struct vc4_seqno_cb {
275 	struct work_struct work;
276 	uint64_t seqno;
277 	void (*func)(struct vc4_seqno_cb *cb);
278 };
279 
280 struct vc4_v3d {
281 	struct vc4_dev *vc4;
282 	struct platform_device *pdev;
283 	void __iomem *regs;
284 	struct clk *clk;
285 };
286 
287 struct vc4_hvs {
288 	struct platform_device *pdev;
289 	void __iomem *regs;
290 	u32 __iomem *dlist;
291 
292 	/* Memory manager for CRTCs to allocate space in the display
293 	 * list.  Units are dwords.
294 	 */
295 	struct drm_mm dlist_mm;
296 	/* Memory manager for the LBM memory used by HVS scaling. */
297 	struct drm_mm lbm_mm;
298 	spinlock_t mm_lock;
299 
300 	struct drm_mm_node mitchell_netravali_filter;
301 };
302 
303 struct vc4_plane {
304 	struct drm_plane base;
305 };
306 
307 static inline struct vc4_plane *
308 to_vc4_plane(struct drm_plane *plane)
309 {
310 	return (struct vc4_plane *)plane;
311 }
312 
313 enum vc4_encoder_type {
314 	VC4_ENCODER_TYPE_NONE,
315 	VC4_ENCODER_TYPE_HDMI,
316 	VC4_ENCODER_TYPE_VEC,
317 	VC4_ENCODER_TYPE_DSI0,
318 	VC4_ENCODER_TYPE_DSI1,
319 	VC4_ENCODER_TYPE_SMI,
320 	VC4_ENCODER_TYPE_DPI,
321 };
322 
323 struct vc4_encoder {
324 	struct drm_encoder base;
325 	enum vc4_encoder_type type;
326 	u32 clock_select;
327 };
328 
329 static inline struct vc4_encoder *
330 to_vc4_encoder(struct drm_encoder *encoder)
331 {
332 	return container_of(encoder, struct vc4_encoder, base);
333 }
334 
335 #define V3D_READ(offset) readl(vc4->v3d->regs + offset)
336 #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
337 #define HVS_READ(offset) readl(vc4->hvs->regs + offset)
338 #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
339 
340 struct vc4_exec_info {
341 	/* Sequence number for this bin/render job. */
342 	uint64_t seqno;
343 
344 	/* Latest write_seqno of any BO that binning depends on. */
345 	uint64_t bin_dep_seqno;
346 
347 	struct dma_fence *fence;
348 
349 	/* Last current addresses the hardware was processing when the
350 	 * hangcheck timer checked on us.
351 	 */
352 	uint32_t last_ct0ca, last_ct1ca;
353 
354 	/* Kernel-space copy of the ioctl arguments */
355 	struct drm_vc4_submit_cl *args;
356 
357 	/* This is the array of BOs that were looked up at the start of exec.
358 	 * Command validation will use indices into this array.
359 	 */
360 	struct drm_gem_cma_object **bo;
361 	uint32_t bo_count;
362 
363 	/* List of BOs that are being written by the RCL.  Other than
364 	 * the binner temporary storage, this is all the BOs written
365 	 * by the job.
366 	 */
367 	struct drm_gem_cma_object *rcl_write_bo[4];
368 	uint32_t rcl_write_bo_count;
369 
370 	/* Pointers for our position in vc4->job_list */
371 	struct list_head head;
372 
373 	/* List of other BOs used in the job that need to be released
374 	 * once the job is complete.
375 	 */
376 	struct list_head unref_list;
377 
378 	/* Current unvalidated indices into @bo loaded by the non-hardware
379 	 * VC4_PACKET_GEM_HANDLES.
380 	 */
381 	uint32_t bo_index[2];
382 
383 	/* This is the BO where we store the validated command lists, shader
384 	 * records, and uniforms.
385 	 */
386 	struct drm_gem_cma_object *exec_bo;
387 
388 	/**
389 	 * This tracks the per-shader-record state (packet 64) that
390 	 * determines the length of the shader record and the offset
391 	 * it's expected to be found at.  It gets read in from the
392 	 * command lists.
393 	 */
394 	struct vc4_shader_state {
395 		uint32_t addr;
396 		/* Maximum vertex index referenced by any primitive using this
397 		 * shader state.
398 		 */
399 		uint32_t max_index;
400 	} *shader_state;
401 
402 	/** How many shader states the user declared they were using. */
403 	uint32_t shader_state_size;
404 	/** How many shader state records the validator has seen. */
405 	uint32_t shader_state_count;
406 
407 	bool found_tile_binning_mode_config_packet;
408 	bool found_start_tile_binning_packet;
409 	bool found_increment_semaphore_packet;
410 	bool found_flush;
411 	uint8_t bin_tiles_x, bin_tiles_y;
412 	/* Physical address of the start of the tile alloc array
413 	 * (where each tile's binned CL will start)
414 	 */
415 	uint32_t tile_alloc_offset;
416 	/* Bitmask of which binner slots are freed when this job completes. */
417 	uint32_t bin_slots;
418 
419 	/**
420 	 * Computed addresses pointing into exec_bo where we start the
421 	 * bin thread (ct0) and render thread (ct1).
422 	 */
423 	uint32_t ct0ca, ct0ea;
424 	uint32_t ct1ca, ct1ea;
425 
426 	/* Pointer to the unvalidated bin CL (if present). */
427 	void *bin_u;
428 
429 	/* Pointers to the shader recs.  These paddr gets incremented as CL
430 	 * packets are relocated in validate_gl_shader_state, and the vaddrs
431 	 * (u and v) get incremented and size decremented as the shader recs
432 	 * themselves are validated.
433 	 */
434 	void *shader_rec_u;
435 	void *shader_rec_v;
436 	uint32_t shader_rec_p;
437 	uint32_t shader_rec_size;
438 
439 	/* Pointers to the uniform data.  These pointers are incremented, and
440 	 * size decremented, as each batch of uniforms is uploaded.
441 	 */
442 	void *uniforms_u;
443 	void *uniforms_v;
444 	uint32_t uniforms_p;
445 	uint32_t uniforms_size;
446 
447 	/* Pointer to a performance monitor object if the user requested it,
448 	 * NULL otherwise.
449 	 */
450 	struct vc4_perfmon *perfmon;
451 };
452 
453 /* Per-open file private data. Any driver-specific resource that has to be
454  * released when the DRM file is closed should be placed here.
455  */
456 struct vc4_file {
457 	struct {
458 		struct idr idr;
459 		struct mutex lock;
460 	} perfmon;
461 };
462 
463 static inline struct vc4_exec_info *
464 vc4_first_bin_job(struct vc4_dev *vc4)
465 {
466 	return list_first_entry_or_null(&vc4->bin_job_list,
467 					struct vc4_exec_info, head);
468 }
469 
470 static inline struct vc4_exec_info *
471 vc4_first_render_job(struct vc4_dev *vc4)
472 {
473 	return list_first_entry_or_null(&vc4->render_job_list,
474 					struct vc4_exec_info, head);
475 }
476 
477 static inline struct vc4_exec_info *
478 vc4_last_render_job(struct vc4_dev *vc4)
479 {
480 	if (list_empty(&vc4->render_job_list))
481 		return NULL;
482 	return list_last_entry(&vc4->render_job_list,
483 			       struct vc4_exec_info, head);
484 }
485 
486 /**
487  * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
488  * setup parameters.
489  *
490  * This will be used at draw time to relocate the reference to the texture
491  * contents in p0, and validate that the offset combined with
492  * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
493  * Note that the hardware treats unprovided config parameters as 0, so not all
494  * of them need to be set up for every texure sample, and we'll store ~0 as
495  * the offset to mark the unused ones.
496  *
497  * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
498  * Setup") for definitions of the texture parameters.
499  */
500 struct vc4_texture_sample_info {
501 	bool is_direct;
502 	uint32_t p_offset[4];
503 };
504 
505 /**
506  * struct vc4_validated_shader_info - information about validated shaders that
507  * needs to be used from command list validation.
508  *
509  * For a given shader, each time a shader state record references it, we need
510  * to verify that the shader doesn't read more uniforms than the shader state
511  * record's uniform BO pointer can provide, and we need to apply relocations
512  * and validate the shader state record's uniforms that define the texture
513  * samples.
514  */
515 struct vc4_validated_shader_info {
516 	uint32_t uniforms_size;
517 	uint32_t uniforms_src_size;
518 	uint32_t num_texture_samples;
519 	struct vc4_texture_sample_info *texture_samples;
520 
521 	uint32_t num_uniform_addr_offsets;
522 	uint32_t *uniform_addr_offsets;
523 
524 	bool is_threaded;
525 };
526 
527 /**
528  * _wait_for - magic (register) wait macro
529  *
530  * Does the right thing for modeset paths when run under kdgb or similar atomic
531  * contexts. Note that it's important that we check the condition again after
532  * having timed out, since the timeout could be due to preemption or similar and
533  * we've never had a chance to check the condition before the timeout.
534  */
535 #define _wait_for(COND, MS, W) ({ \
536 	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1;	\
537 	int ret__ = 0;							\
538 	while (!(COND)) {						\
539 		if (time_after(jiffies, timeout__)) {			\
540 			if (!(COND))					\
541 				ret__ = -ETIMEDOUT;			\
542 			break;						\
543 		}							\
544 		if (W && drm_can_sleep())  {				\
545 			msleep(W);					\
546 		} else {						\
547 			cpu_relax();					\
548 		}							\
549 	}								\
550 	ret__;								\
551 })
552 
553 #define wait_for(COND, MS) _wait_for(COND, MS, 1)
554 
555 /* vc4_bo.c */
556 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
557 void vc4_free_object(struct drm_gem_object *gem_obj);
558 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
559 			     bool from_cache, enum vc4_kernel_bo_type type);
560 int vc4_dumb_create(struct drm_file *file_priv,
561 		    struct drm_device *dev,
562 		    struct drm_mode_create_dumb *args);
563 struct dma_buf *vc4_prime_export(struct drm_device *dev,
564 				 struct drm_gem_object *obj, int flags);
565 int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
566 			struct drm_file *file_priv);
567 int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
568 			       struct drm_file *file_priv);
569 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
570 		      struct drm_file *file_priv);
571 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
572 			 struct drm_file *file_priv);
573 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
574 			 struct drm_file *file_priv);
575 int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
576 			     struct drm_file *file_priv);
577 int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
578 		       struct drm_file *file_priv);
579 int vc4_fault(struct vm_fault *vmf);
580 int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
581 struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
582 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
583 struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
584 						 struct dma_buf_attachment *attach,
585 						 struct sg_table *sgt);
586 void *vc4_prime_vmap(struct drm_gem_object *obj);
587 int vc4_bo_cache_init(struct drm_device *dev);
588 void vc4_bo_cache_destroy(struct drm_device *dev);
589 int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
590 int vc4_bo_inc_usecnt(struct vc4_bo *bo);
591 void vc4_bo_dec_usecnt(struct vc4_bo *bo);
592 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
593 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
594 
595 /* vc4_crtc.c */
596 extern struct platform_driver vc4_crtc_driver;
597 int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
598 bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
599 			     bool in_vblank_irq, int *vpos, int *hpos,
600 			     ktime_t *stime, ktime_t *etime,
601 			     const struct drm_display_mode *mode);
602 
603 /* vc4_debugfs.c */
604 int vc4_debugfs_init(struct drm_minor *minor);
605 
606 /* vc4_drv.c */
607 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
608 
609 /* vc4_dpi.c */
610 extern struct platform_driver vc4_dpi_driver;
611 int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused);
612 
613 /* vc4_dsi.c */
614 extern struct platform_driver vc4_dsi_driver;
615 int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused);
616 
617 /* vc4_fence.c */
618 extern const struct dma_fence_ops vc4_fence_ops;
619 
620 /* vc4_gem.c */
621 void vc4_gem_init(struct drm_device *dev);
622 void vc4_gem_destroy(struct drm_device *dev);
623 int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
624 			struct drm_file *file_priv);
625 int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
626 			 struct drm_file *file_priv);
627 int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
628 		      struct drm_file *file_priv);
629 void vc4_submit_next_bin_job(struct drm_device *dev);
630 void vc4_submit_next_render_job(struct drm_device *dev);
631 void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
632 int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
633 		       uint64_t timeout_ns, bool interruptible);
634 void vc4_job_handle_completed(struct vc4_dev *vc4);
635 int vc4_queue_seqno_cb(struct drm_device *dev,
636 		       struct vc4_seqno_cb *cb, uint64_t seqno,
637 		       void (*func)(struct vc4_seqno_cb *cb));
638 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
639 			  struct drm_file *file_priv);
640 
641 /* vc4_hdmi.c */
642 extern struct platform_driver vc4_hdmi_driver;
643 int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
644 
645 /* vc4_vec.c */
646 extern struct platform_driver vc4_vec_driver;
647 int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
648 
649 /* vc4_irq.c */
650 irqreturn_t vc4_irq(int irq, void *arg);
651 void vc4_irq_preinstall(struct drm_device *dev);
652 int vc4_irq_postinstall(struct drm_device *dev);
653 void vc4_irq_uninstall(struct drm_device *dev);
654 void vc4_irq_reset(struct drm_device *dev);
655 
656 /* vc4_hvs.c */
657 extern struct platform_driver vc4_hvs_driver;
658 void vc4_hvs_dump_state(struct drm_device *dev);
659 int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused);
660 
661 /* vc4_kms.c */
662 int vc4_kms_load(struct drm_device *dev);
663 
664 /* vc4_plane.c */
665 struct drm_plane *vc4_plane_init(struct drm_device *dev,
666 				 enum drm_plane_type type);
667 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
668 u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
669 void vc4_plane_async_set_fb(struct drm_plane *plane,
670 			    struct drm_framebuffer *fb);
671 
672 /* vc4_v3d.c */
673 extern struct platform_driver vc4_v3d_driver;
674 int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
675 int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
676 int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
677 
678 /* vc4_validate.c */
679 int
680 vc4_validate_bin_cl(struct drm_device *dev,
681 		    void *validated,
682 		    void *unvalidated,
683 		    struct vc4_exec_info *exec);
684 
685 int
686 vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
687 
688 struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
689 				      uint32_t hindex);
690 
691 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
692 
693 bool vc4_check_tex_size(struct vc4_exec_info *exec,
694 			struct drm_gem_cma_object *fbo,
695 			uint32_t offset, uint8_t tiling_format,
696 			uint32_t width, uint32_t height, uint8_t cpp);
697 
698 /* vc4_validate_shader.c */
699 struct vc4_validated_shader_info *
700 vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
701 
702 /* vc4_perfmon.c */
703 void vc4_perfmon_get(struct vc4_perfmon *perfmon);
704 void vc4_perfmon_put(struct vc4_perfmon *perfmon);
705 void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon);
706 void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
707 		      bool capture);
708 struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id);
709 void vc4_perfmon_open_file(struct vc4_file *vc4file);
710 void vc4_perfmon_close_file(struct vc4_file *vc4file);
711 int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
712 			     struct drm_file *file_priv);
713 int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
714 			      struct drm_file *file_priv);
715 int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
716 				 struct drm_file *file_priv);
717