1c8b75bcaSEric Anholt /* 2c8b75bcaSEric Anholt * Copyright (C) 2015 Broadcom 3c8b75bcaSEric Anholt * 4c8b75bcaSEric Anholt * This program is free software; you can redistribute it and/or modify 5c8b75bcaSEric Anholt * it under the terms of the GNU General Public License version 2 as 6c8b75bcaSEric Anholt * published by the Free Software Foundation. 7c8b75bcaSEric Anholt */ 8c8b75bcaSEric Anholt 9c8b75bcaSEric Anholt #include "drmP.h" 10c8b75bcaSEric Anholt #include "drm_gem_cma_helper.h" 11c8b75bcaSEric Anholt 12c8b75bcaSEric Anholt struct vc4_dev { 13c8b75bcaSEric Anholt struct drm_device *dev; 14c8b75bcaSEric Anholt 15c8b75bcaSEric Anholt struct vc4_hdmi *hdmi; 16c8b75bcaSEric Anholt struct vc4_hvs *hvs; 17c8b75bcaSEric Anholt struct vc4_crtc *crtc[3]; 18d3f5168aSEric Anholt struct vc4_v3d *v3d; 1908302c35SEric Anholt struct vc4_dpi *dpi; 2048666d56SDerek Foreman 2148666d56SDerek Foreman struct drm_fbdev_cma *fbdev; 22c826a6e1SEric Anholt 2321461365SEric Anholt struct vc4_hang_state *hang_state; 2421461365SEric Anholt 25c826a6e1SEric Anholt /* The kernel-space BO cache. Tracks buffers that have been 26c826a6e1SEric Anholt * unreferenced by all other users (refcounts of 0!) but not 27c826a6e1SEric Anholt * yet freed, so we can do cheap allocations. 28c826a6e1SEric Anholt */ 29c826a6e1SEric Anholt struct vc4_bo_cache { 30c826a6e1SEric Anholt /* Array of list heads for entries in the BO cache, 31c826a6e1SEric Anholt * based on number of pages, so we can do O(1) lookups 32c826a6e1SEric Anholt * in the cache when allocating. 33c826a6e1SEric Anholt */ 34c826a6e1SEric Anholt struct list_head *size_list; 35c826a6e1SEric Anholt uint32_t size_list_size; 36c826a6e1SEric Anholt 37c826a6e1SEric Anholt /* List of all BOs in the cache, ordered by age, so we 38c826a6e1SEric Anholt * can do O(1) lookups when trying to free old 39c826a6e1SEric Anholt * buffers. 40c826a6e1SEric Anholt */ 41c826a6e1SEric Anholt struct list_head time_list; 42c826a6e1SEric Anholt struct work_struct time_work; 43c826a6e1SEric Anholt struct timer_list time_timer; 44c826a6e1SEric Anholt } bo_cache; 45c826a6e1SEric Anholt 46c826a6e1SEric Anholt struct vc4_bo_stats { 47c826a6e1SEric Anholt u32 num_allocated; 48c826a6e1SEric Anholt u32 size_allocated; 49c826a6e1SEric Anholt u32 num_cached; 50c826a6e1SEric Anholt u32 size_cached; 51c826a6e1SEric Anholt } bo_stats; 52c826a6e1SEric Anholt 53c826a6e1SEric Anholt /* Protects bo_cache and the BO stats. */ 54c826a6e1SEric Anholt struct mutex bo_lock; 55d5b1a78aSEric Anholt 56ca26d28bSVarad Gautam /* Sequence number for the last job queued in bin_job_list. 57d5b1a78aSEric Anholt * Starts at 0 (no jobs emitted). 58d5b1a78aSEric Anholt */ 59d5b1a78aSEric Anholt uint64_t emit_seqno; 60d5b1a78aSEric Anholt 61d5b1a78aSEric Anholt /* Sequence number for the last completed job on the GPU. 62d5b1a78aSEric Anholt * Starts at 0 (no jobs completed). 63d5b1a78aSEric Anholt */ 64d5b1a78aSEric Anholt uint64_t finished_seqno; 65d5b1a78aSEric Anholt 66ca26d28bSVarad Gautam /* List of all struct vc4_exec_info for jobs to be executed in 67ca26d28bSVarad Gautam * the binner. The first job in the list is the one currently 68ca26d28bSVarad Gautam * programmed into ct0ca for execution. 69d5b1a78aSEric Anholt */ 70ca26d28bSVarad Gautam struct list_head bin_job_list; 71ca26d28bSVarad Gautam 72ca26d28bSVarad Gautam /* List of all struct vc4_exec_info for jobs that have 73ca26d28bSVarad Gautam * completed binning and are ready for rendering. The first 74ca26d28bSVarad Gautam * job in the list is the one currently programmed into ct1ca 75ca26d28bSVarad Gautam * for execution. 76ca26d28bSVarad Gautam */ 77ca26d28bSVarad Gautam struct list_head render_job_list; 78ca26d28bSVarad Gautam 79d5b1a78aSEric Anholt /* List of the finished vc4_exec_infos waiting to be freed by 80d5b1a78aSEric Anholt * job_done_work. 81d5b1a78aSEric Anholt */ 82d5b1a78aSEric Anholt struct list_head job_done_list; 83d5b1a78aSEric Anholt /* Spinlock used to synchronize the job_list and seqno 84d5b1a78aSEric Anholt * accesses between the IRQ handler and GEM ioctls. 85d5b1a78aSEric Anholt */ 86d5b1a78aSEric Anholt spinlock_t job_lock; 87d5b1a78aSEric Anholt wait_queue_head_t job_wait_queue; 88d5b1a78aSEric Anholt struct work_struct job_done_work; 89d5b1a78aSEric Anholt 90b501baccSEric Anholt /* List of struct vc4_seqno_cb for callbacks to be made from a 91b501baccSEric Anholt * workqueue when the given seqno is passed. 92b501baccSEric Anholt */ 93b501baccSEric Anholt struct list_head seqno_cb_list; 94b501baccSEric Anholt 95d5b1a78aSEric Anholt /* The binner overflow memory that's currently set up in 96d5b1a78aSEric Anholt * BPOA/BPOS registers. When overflow occurs and a new one is 97d5b1a78aSEric Anholt * allocated, the previous one will be moved to 98d5b1a78aSEric Anholt * vc4->current_exec's free list. 99d5b1a78aSEric Anholt */ 100d5b1a78aSEric Anholt struct vc4_bo *overflow_mem; 101d5b1a78aSEric Anholt struct work_struct overflow_mem_work; 102d5b1a78aSEric Anholt 10336cb6253SEric Anholt int power_refcount; 10436cb6253SEric Anholt 10536cb6253SEric Anholt /* Mutex controlling the power refcount. */ 10636cb6253SEric Anholt struct mutex power_lock; 10736cb6253SEric Anholt 108d5b1a78aSEric Anholt struct { 109d5b1a78aSEric Anholt struct timer_list timer; 110d5b1a78aSEric Anholt struct work_struct reset_work; 111d5b1a78aSEric Anholt } hangcheck; 112d5b1a78aSEric Anholt 113d5b1a78aSEric Anholt struct semaphore async_modeset; 114c8b75bcaSEric Anholt }; 115c8b75bcaSEric Anholt 116c8b75bcaSEric Anholt static inline struct vc4_dev * 117c8b75bcaSEric Anholt to_vc4_dev(struct drm_device *dev) 118c8b75bcaSEric Anholt { 119c8b75bcaSEric Anholt return (struct vc4_dev *)dev->dev_private; 120c8b75bcaSEric Anholt } 121c8b75bcaSEric Anholt 122c8b75bcaSEric Anholt struct vc4_bo { 123c8b75bcaSEric Anholt struct drm_gem_cma_object base; 124c826a6e1SEric Anholt 125d5b1a78aSEric Anholt /* seqno of the last job to render to this BO. */ 126d5b1a78aSEric Anholt uint64_t seqno; 127d5b1a78aSEric Anholt 128c826a6e1SEric Anholt /* List entry for the BO's position in either 129c826a6e1SEric Anholt * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list 130c826a6e1SEric Anholt */ 131c826a6e1SEric Anholt struct list_head unref_head; 132c826a6e1SEric Anholt 133c826a6e1SEric Anholt /* Time in jiffies when the BO was put in vc4->bo_cache. */ 134c826a6e1SEric Anholt unsigned long free_time; 135c826a6e1SEric Anholt 136c826a6e1SEric Anholt /* List entry for the BO's position in vc4_dev->bo_cache.size_list */ 137c826a6e1SEric Anholt struct list_head size_head; 138463873d5SEric Anholt 139463873d5SEric Anholt /* Struct for shader validation state, if created by 140463873d5SEric Anholt * DRM_IOCTL_VC4_CREATE_SHADER_BO. 141463873d5SEric Anholt */ 142463873d5SEric Anholt struct vc4_validated_shader_info *validated_shader; 143c8b75bcaSEric Anholt }; 144c8b75bcaSEric Anholt 145c8b75bcaSEric Anholt static inline struct vc4_bo * 146c8b75bcaSEric Anholt to_vc4_bo(struct drm_gem_object *bo) 147c8b75bcaSEric Anholt { 148c8b75bcaSEric Anholt return (struct vc4_bo *)bo; 149c8b75bcaSEric Anholt } 150c8b75bcaSEric Anholt 151b501baccSEric Anholt struct vc4_seqno_cb { 152b501baccSEric Anholt struct work_struct work; 153b501baccSEric Anholt uint64_t seqno; 154b501baccSEric Anholt void (*func)(struct vc4_seqno_cb *cb); 155b501baccSEric Anholt }; 156b501baccSEric Anholt 157d3f5168aSEric Anholt struct vc4_v3d { 158001bdb55SEric Anholt struct vc4_dev *vc4; 159d3f5168aSEric Anholt struct platform_device *pdev; 160d3f5168aSEric Anholt void __iomem *regs; 161d3f5168aSEric Anholt }; 162d3f5168aSEric Anholt 163c8b75bcaSEric Anholt struct vc4_hvs { 164c8b75bcaSEric Anholt struct platform_device *pdev; 165c8b75bcaSEric Anholt void __iomem *regs; 166d8dbf44fSEric Anholt u32 __iomem *dlist; 167d8dbf44fSEric Anholt 168d8dbf44fSEric Anholt /* Memory manager for CRTCs to allocate space in the display 169d8dbf44fSEric Anholt * list. Units are dwords. 170d8dbf44fSEric Anholt */ 171d8dbf44fSEric Anholt struct drm_mm dlist_mm; 17221af94cfSEric Anholt /* Memory manager for the LBM memory used by HVS scaling. */ 17321af94cfSEric Anholt struct drm_mm lbm_mm; 174d8dbf44fSEric Anholt spinlock_t mm_lock; 17521af94cfSEric Anholt 17621af94cfSEric Anholt struct drm_mm_node mitchell_netravali_filter; 177c8b75bcaSEric Anholt }; 178c8b75bcaSEric Anholt 179c8b75bcaSEric Anholt struct vc4_plane { 180c8b75bcaSEric Anholt struct drm_plane base; 181c8b75bcaSEric Anholt }; 182c8b75bcaSEric Anholt 183c8b75bcaSEric Anholt static inline struct vc4_plane * 184c8b75bcaSEric Anholt to_vc4_plane(struct drm_plane *plane) 185c8b75bcaSEric Anholt { 186c8b75bcaSEric Anholt return (struct vc4_plane *)plane; 187c8b75bcaSEric Anholt } 188c8b75bcaSEric Anholt 189c8b75bcaSEric Anholt enum vc4_encoder_type { 190c8b75bcaSEric Anholt VC4_ENCODER_TYPE_HDMI, 191c8b75bcaSEric Anholt VC4_ENCODER_TYPE_VEC, 192c8b75bcaSEric Anholt VC4_ENCODER_TYPE_DSI0, 193c8b75bcaSEric Anholt VC4_ENCODER_TYPE_DSI1, 194c8b75bcaSEric Anholt VC4_ENCODER_TYPE_SMI, 195c8b75bcaSEric Anholt VC4_ENCODER_TYPE_DPI, 196c8b75bcaSEric Anholt }; 197c8b75bcaSEric Anholt 198c8b75bcaSEric Anholt struct vc4_encoder { 199c8b75bcaSEric Anholt struct drm_encoder base; 200c8b75bcaSEric Anholt enum vc4_encoder_type type; 201c8b75bcaSEric Anholt u32 clock_select; 202c8b75bcaSEric Anholt }; 203c8b75bcaSEric Anholt 204c8b75bcaSEric Anholt static inline struct vc4_encoder * 205c8b75bcaSEric Anholt to_vc4_encoder(struct drm_encoder *encoder) 206c8b75bcaSEric Anholt { 207c8b75bcaSEric Anholt return container_of(encoder, struct vc4_encoder, base); 208c8b75bcaSEric Anholt } 209c8b75bcaSEric Anholt 210d3f5168aSEric Anholt #define V3D_READ(offset) readl(vc4->v3d->regs + offset) 211d3f5168aSEric Anholt #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset) 212c8b75bcaSEric Anholt #define HVS_READ(offset) readl(vc4->hvs->regs + offset) 213c8b75bcaSEric Anholt #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset) 214c8b75bcaSEric Anholt 215d5b1a78aSEric Anholt struct vc4_exec_info { 216d5b1a78aSEric Anholt /* Sequence number for this bin/render job. */ 217d5b1a78aSEric Anholt uint64_t seqno; 218d5b1a78aSEric Anholt 219c4ce60dcSEric Anholt /* Last current addresses the hardware was processing when the 220c4ce60dcSEric Anholt * hangcheck timer checked on us. 221c4ce60dcSEric Anholt */ 222c4ce60dcSEric Anholt uint32_t last_ct0ca, last_ct1ca; 223c4ce60dcSEric Anholt 224d5b1a78aSEric Anholt /* Kernel-space copy of the ioctl arguments */ 225d5b1a78aSEric Anholt struct drm_vc4_submit_cl *args; 226d5b1a78aSEric Anholt 227d5b1a78aSEric Anholt /* This is the array of BOs that were looked up at the start of exec. 228d5b1a78aSEric Anholt * Command validation will use indices into this array. 229d5b1a78aSEric Anholt */ 230d5b1a78aSEric Anholt struct drm_gem_cma_object **bo; 231d5b1a78aSEric Anholt uint32_t bo_count; 232d5b1a78aSEric Anholt 233d5b1a78aSEric Anholt /* Pointers for our position in vc4->job_list */ 234d5b1a78aSEric Anholt struct list_head head; 235d5b1a78aSEric Anholt 236d5b1a78aSEric Anholt /* List of other BOs used in the job that need to be released 237d5b1a78aSEric Anholt * once the job is complete. 238d5b1a78aSEric Anholt */ 239d5b1a78aSEric Anholt struct list_head unref_list; 240d5b1a78aSEric Anholt 241d5b1a78aSEric Anholt /* Current unvalidated indices into @bo loaded by the non-hardware 242d5b1a78aSEric Anholt * VC4_PACKET_GEM_HANDLES. 243d5b1a78aSEric Anholt */ 244d5b1a78aSEric Anholt uint32_t bo_index[2]; 245d5b1a78aSEric Anholt 246d5b1a78aSEric Anholt /* This is the BO where we store the validated command lists, shader 247d5b1a78aSEric Anholt * records, and uniforms. 248d5b1a78aSEric Anholt */ 249d5b1a78aSEric Anholt struct drm_gem_cma_object *exec_bo; 250d5b1a78aSEric Anholt 251d5b1a78aSEric Anholt /** 252d5b1a78aSEric Anholt * This tracks the per-shader-record state (packet 64) that 253d5b1a78aSEric Anholt * determines the length of the shader record and the offset 254d5b1a78aSEric Anholt * it's expected to be found at. It gets read in from the 255d5b1a78aSEric Anholt * command lists. 256d5b1a78aSEric Anholt */ 257d5b1a78aSEric Anholt struct vc4_shader_state { 258d5b1a78aSEric Anholt uint32_t addr; 259d5b1a78aSEric Anholt /* Maximum vertex index referenced by any primitive using this 260d5b1a78aSEric Anholt * shader state. 261d5b1a78aSEric Anholt */ 262d5b1a78aSEric Anholt uint32_t max_index; 263d5b1a78aSEric Anholt } *shader_state; 264d5b1a78aSEric Anholt 265d5b1a78aSEric Anholt /** How many shader states the user declared they were using. */ 266d5b1a78aSEric Anholt uint32_t shader_state_size; 267d5b1a78aSEric Anholt /** How many shader state records the validator has seen. */ 268d5b1a78aSEric Anholt uint32_t shader_state_count; 269d5b1a78aSEric Anholt 270d5b1a78aSEric Anholt bool found_tile_binning_mode_config_packet; 271d5b1a78aSEric Anholt bool found_start_tile_binning_packet; 272d5b1a78aSEric Anholt bool found_increment_semaphore_packet; 273d5b1a78aSEric Anholt bool found_flush; 274d5b1a78aSEric Anholt uint8_t bin_tiles_x, bin_tiles_y; 275d5b1a78aSEric Anholt struct drm_gem_cma_object *tile_bo; 276d5b1a78aSEric Anholt uint32_t tile_alloc_offset; 277d5b1a78aSEric Anholt 278d5b1a78aSEric Anholt /** 279d5b1a78aSEric Anholt * Computed addresses pointing into exec_bo where we start the 280d5b1a78aSEric Anholt * bin thread (ct0) and render thread (ct1). 281d5b1a78aSEric Anholt */ 282d5b1a78aSEric Anholt uint32_t ct0ca, ct0ea; 283d5b1a78aSEric Anholt uint32_t ct1ca, ct1ea; 284d5b1a78aSEric Anholt 285d5b1a78aSEric Anholt /* Pointer to the unvalidated bin CL (if present). */ 286d5b1a78aSEric Anholt void *bin_u; 287d5b1a78aSEric Anholt 288d5b1a78aSEric Anholt /* Pointers to the shader recs. These paddr gets incremented as CL 289d5b1a78aSEric Anholt * packets are relocated in validate_gl_shader_state, and the vaddrs 290d5b1a78aSEric Anholt * (u and v) get incremented and size decremented as the shader recs 291d5b1a78aSEric Anholt * themselves are validated. 292d5b1a78aSEric Anholt */ 293d5b1a78aSEric Anholt void *shader_rec_u; 294d5b1a78aSEric Anholt void *shader_rec_v; 295d5b1a78aSEric Anholt uint32_t shader_rec_p; 296d5b1a78aSEric Anholt uint32_t shader_rec_size; 297d5b1a78aSEric Anholt 298d5b1a78aSEric Anholt /* Pointers to the uniform data. These pointers are incremented, and 299d5b1a78aSEric Anholt * size decremented, as each batch of uniforms is uploaded. 300d5b1a78aSEric Anholt */ 301d5b1a78aSEric Anholt void *uniforms_u; 302d5b1a78aSEric Anholt void *uniforms_v; 303d5b1a78aSEric Anholt uint32_t uniforms_p; 304d5b1a78aSEric Anholt uint32_t uniforms_size; 305d5b1a78aSEric Anholt }; 306d5b1a78aSEric Anholt 307d5b1a78aSEric Anholt static inline struct vc4_exec_info * 308ca26d28bSVarad Gautam vc4_first_bin_job(struct vc4_dev *vc4) 309d5b1a78aSEric Anholt { 310ca26d28bSVarad Gautam if (list_empty(&vc4->bin_job_list)) 311d5b1a78aSEric Anholt return NULL; 312ca26d28bSVarad Gautam return list_first_entry(&vc4->bin_job_list, struct vc4_exec_info, head); 313ca26d28bSVarad Gautam } 314ca26d28bSVarad Gautam 315ca26d28bSVarad Gautam static inline struct vc4_exec_info * 316ca26d28bSVarad Gautam vc4_first_render_job(struct vc4_dev *vc4) 317ca26d28bSVarad Gautam { 318ca26d28bSVarad Gautam if (list_empty(&vc4->render_job_list)) 319ca26d28bSVarad Gautam return NULL; 320ca26d28bSVarad Gautam return list_first_entry(&vc4->render_job_list, 321ca26d28bSVarad Gautam struct vc4_exec_info, head); 322d5b1a78aSEric Anholt } 323d5b1a78aSEric Anholt 324c8b75bcaSEric Anholt /** 325463873d5SEric Anholt * struct vc4_texture_sample_info - saves the offsets into the UBO for texture 326463873d5SEric Anholt * setup parameters. 327463873d5SEric Anholt * 328463873d5SEric Anholt * This will be used at draw time to relocate the reference to the texture 329463873d5SEric Anholt * contents in p0, and validate that the offset combined with 330463873d5SEric Anholt * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO. 331463873d5SEric Anholt * Note that the hardware treats unprovided config parameters as 0, so not all 332463873d5SEric Anholt * of them need to be set up for every texure sample, and we'll store ~0 as 333463873d5SEric Anholt * the offset to mark the unused ones. 334463873d5SEric Anholt * 335463873d5SEric Anholt * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit 336463873d5SEric Anholt * Setup") for definitions of the texture parameters. 337463873d5SEric Anholt */ 338463873d5SEric Anholt struct vc4_texture_sample_info { 339463873d5SEric Anholt bool is_direct; 340463873d5SEric Anholt uint32_t p_offset[4]; 341463873d5SEric Anholt }; 342463873d5SEric Anholt 343463873d5SEric Anholt /** 344463873d5SEric Anholt * struct vc4_validated_shader_info - information about validated shaders that 345463873d5SEric Anholt * needs to be used from command list validation. 346463873d5SEric Anholt * 347463873d5SEric Anholt * For a given shader, each time a shader state record references it, we need 348463873d5SEric Anholt * to verify that the shader doesn't read more uniforms than the shader state 349463873d5SEric Anholt * record's uniform BO pointer can provide, and we need to apply relocations 350463873d5SEric Anholt * and validate the shader state record's uniforms that define the texture 351463873d5SEric Anholt * samples. 352463873d5SEric Anholt */ 353463873d5SEric Anholt struct vc4_validated_shader_info { 354463873d5SEric Anholt uint32_t uniforms_size; 355463873d5SEric Anholt uint32_t uniforms_src_size; 356463873d5SEric Anholt uint32_t num_texture_samples; 357463873d5SEric Anholt struct vc4_texture_sample_info *texture_samples; 358463873d5SEric Anholt }; 359463873d5SEric Anholt 360463873d5SEric Anholt /** 361c8b75bcaSEric Anholt * _wait_for - magic (register) wait macro 362c8b75bcaSEric Anholt * 363c8b75bcaSEric Anholt * Does the right thing for modeset paths when run under kdgb or similar atomic 364c8b75bcaSEric Anholt * contexts. Note that it's important that we check the condition again after 365c8b75bcaSEric Anholt * having timed out, since the timeout could be due to preemption or similar and 366c8b75bcaSEric Anholt * we've never had a chance to check the condition before the timeout. 367c8b75bcaSEric Anholt */ 368c8b75bcaSEric Anholt #define _wait_for(COND, MS, W) ({ \ 369c8b75bcaSEric Anholt unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \ 370c8b75bcaSEric Anholt int ret__ = 0; \ 371c8b75bcaSEric Anholt while (!(COND)) { \ 372c8b75bcaSEric Anholt if (time_after(jiffies, timeout__)) { \ 373c8b75bcaSEric Anholt if (!(COND)) \ 374c8b75bcaSEric Anholt ret__ = -ETIMEDOUT; \ 375c8b75bcaSEric Anholt break; \ 376c8b75bcaSEric Anholt } \ 377c8b75bcaSEric Anholt if (W && drm_can_sleep()) { \ 378c8b75bcaSEric Anholt msleep(W); \ 379c8b75bcaSEric Anholt } else { \ 380c8b75bcaSEric Anholt cpu_relax(); \ 381c8b75bcaSEric Anholt } \ 382c8b75bcaSEric Anholt } \ 383c8b75bcaSEric Anholt ret__; \ 384c8b75bcaSEric Anholt }) 385c8b75bcaSEric Anholt 386c8b75bcaSEric Anholt #define wait_for(COND, MS) _wait_for(COND, MS, 1) 387c8b75bcaSEric Anholt 388c8b75bcaSEric Anholt /* vc4_bo.c */ 389c826a6e1SEric Anholt struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size); 390c8b75bcaSEric Anholt void vc4_free_object(struct drm_gem_object *gem_obj); 391c826a6e1SEric Anholt struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size, 392c826a6e1SEric Anholt bool from_cache); 393c8b75bcaSEric Anholt int vc4_dumb_create(struct drm_file *file_priv, 394c8b75bcaSEric Anholt struct drm_device *dev, 395c8b75bcaSEric Anholt struct drm_mode_create_dumb *args); 396c8b75bcaSEric Anholt struct dma_buf *vc4_prime_export(struct drm_device *dev, 397c8b75bcaSEric Anholt struct drm_gem_object *obj, int flags); 398d5bc60f6SEric Anholt int vc4_create_bo_ioctl(struct drm_device *dev, void *data, 399d5bc60f6SEric Anholt struct drm_file *file_priv); 400463873d5SEric Anholt int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, 401463873d5SEric Anholt struct drm_file *file_priv); 402d5bc60f6SEric Anholt int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, 403d5bc60f6SEric Anholt struct drm_file *file_priv); 40421461365SEric Anholt int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, 40521461365SEric Anholt struct drm_file *file_priv); 406463873d5SEric Anholt int vc4_mmap(struct file *filp, struct vm_area_struct *vma); 407463873d5SEric Anholt int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 408463873d5SEric Anholt void *vc4_prime_vmap(struct drm_gem_object *obj); 409c826a6e1SEric Anholt void vc4_bo_cache_init(struct drm_device *dev); 410c826a6e1SEric Anholt void vc4_bo_cache_destroy(struct drm_device *dev); 411c826a6e1SEric Anholt int vc4_bo_stats_debugfs(struct seq_file *m, void *arg); 412c8b75bcaSEric Anholt 413c8b75bcaSEric Anholt /* vc4_crtc.c */ 414c8b75bcaSEric Anholt extern struct platform_driver vc4_crtc_driver; 4151f43710aSDave Airlie int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id); 4161f43710aSDave Airlie void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id); 417c8b75bcaSEric Anholt int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg); 418c8b75bcaSEric Anholt 419c8b75bcaSEric Anholt /* vc4_debugfs.c */ 420c8b75bcaSEric Anholt int vc4_debugfs_init(struct drm_minor *minor); 421c8b75bcaSEric Anholt void vc4_debugfs_cleanup(struct drm_minor *minor); 422c8b75bcaSEric Anholt 423c8b75bcaSEric Anholt /* vc4_drv.c */ 424c8b75bcaSEric Anholt void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index); 425c8b75bcaSEric Anholt 42608302c35SEric Anholt /* vc4_dpi.c */ 42708302c35SEric Anholt extern struct platform_driver vc4_dpi_driver; 42808302c35SEric Anholt int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused); 42908302c35SEric Anholt 430d5b1a78aSEric Anholt /* vc4_gem.c */ 431d5b1a78aSEric Anholt void vc4_gem_init(struct drm_device *dev); 432d5b1a78aSEric Anholt void vc4_gem_destroy(struct drm_device *dev); 433d5b1a78aSEric Anholt int vc4_submit_cl_ioctl(struct drm_device *dev, void *data, 434d5b1a78aSEric Anholt struct drm_file *file_priv); 435d5b1a78aSEric Anholt int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, 436d5b1a78aSEric Anholt struct drm_file *file_priv); 437d5b1a78aSEric Anholt int vc4_wait_bo_ioctl(struct drm_device *dev, void *data, 438d5b1a78aSEric Anholt struct drm_file *file_priv); 439ca26d28bSVarad Gautam void vc4_submit_next_bin_job(struct drm_device *dev); 440ca26d28bSVarad Gautam void vc4_submit_next_render_job(struct drm_device *dev); 441ca26d28bSVarad Gautam void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec); 442d5b1a78aSEric Anholt int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, 443d5b1a78aSEric Anholt uint64_t timeout_ns, bool interruptible); 444d5b1a78aSEric Anholt void vc4_job_handle_completed(struct vc4_dev *vc4); 445b501baccSEric Anholt int vc4_queue_seqno_cb(struct drm_device *dev, 446b501baccSEric Anholt struct vc4_seqno_cb *cb, uint64_t seqno, 447b501baccSEric Anholt void (*func)(struct vc4_seqno_cb *cb)); 448d5b1a78aSEric Anholt 449c8b75bcaSEric Anholt /* vc4_hdmi.c */ 450c8b75bcaSEric Anholt extern struct platform_driver vc4_hdmi_driver; 451c8b75bcaSEric Anholt int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused); 452c8b75bcaSEric Anholt 453d5b1a78aSEric Anholt /* vc4_irq.c */ 454d5b1a78aSEric Anholt irqreturn_t vc4_irq(int irq, void *arg); 455d5b1a78aSEric Anholt void vc4_irq_preinstall(struct drm_device *dev); 456d5b1a78aSEric Anholt int vc4_irq_postinstall(struct drm_device *dev); 457d5b1a78aSEric Anholt void vc4_irq_uninstall(struct drm_device *dev); 458d5b1a78aSEric Anholt void vc4_irq_reset(struct drm_device *dev); 459d5b1a78aSEric Anholt 460c8b75bcaSEric Anholt /* vc4_hvs.c */ 461c8b75bcaSEric Anholt extern struct platform_driver vc4_hvs_driver; 462c8b75bcaSEric Anholt void vc4_hvs_dump_state(struct drm_device *dev); 463c8b75bcaSEric Anholt int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused); 464c8b75bcaSEric Anholt 465c8b75bcaSEric Anholt /* vc4_kms.c */ 466c8b75bcaSEric Anholt int vc4_kms_load(struct drm_device *dev); 467c8b75bcaSEric Anholt 468c8b75bcaSEric Anholt /* vc4_plane.c */ 469c8b75bcaSEric Anholt struct drm_plane *vc4_plane_init(struct drm_device *dev, 470c8b75bcaSEric Anholt enum drm_plane_type type); 471c8b75bcaSEric Anholt u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); 4722f196b7cSDaniel Vetter u32 vc4_plane_dlist_size(const struct drm_plane_state *state); 473b501baccSEric Anholt void vc4_plane_async_set_fb(struct drm_plane *plane, 474b501baccSEric Anholt struct drm_framebuffer *fb); 475463873d5SEric Anholt 476d3f5168aSEric Anholt /* vc4_v3d.c */ 477d3f5168aSEric Anholt extern struct platform_driver vc4_v3d_driver; 478d3f5168aSEric Anholt int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused); 479d3f5168aSEric Anholt int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused); 480d5b1a78aSEric Anholt 481d5b1a78aSEric Anholt /* vc4_validate.c */ 482d5b1a78aSEric Anholt int 483d5b1a78aSEric Anholt vc4_validate_bin_cl(struct drm_device *dev, 484d5b1a78aSEric Anholt void *validated, 485d5b1a78aSEric Anholt void *unvalidated, 486d5b1a78aSEric Anholt struct vc4_exec_info *exec); 487d5b1a78aSEric Anholt 488d5b1a78aSEric Anholt int 489d5b1a78aSEric Anholt vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec); 490d5b1a78aSEric Anholt 491d5b1a78aSEric Anholt struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec, 492d5b1a78aSEric Anholt uint32_t hindex); 493d5b1a78aSEric Anholt 494d5b1a78aSEric Anholt int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec); 495d5b1a78aSEric Anholt 496d5b1a78aSEric Anholt bool vc4_check_tex_size(struct vc4_exec_info *exec, 497d5b1a78aSEric Anholt struct drm_gem_cma_object *fbo, 498d5b1a78aSEric Anholt uint32_t offset, uint8_t tiling_format, 499d5b1a78aSEric Anholt uint32_t width, uint32_t height, uint8_t cpp); 500d3f5168aSEric Anholt 501463873d5SEric Anholt /* vc4_validate_shader.c */ 502463873d5SEric Anholt struct vc4_validated_shader_info * 503463873d5SEric Anholt vc4_validate_shader(struct drm_gem_cma_object *shader_obj); 504