1 /* 2 * Copyright (C) 2015 Broadcom 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 #include <linux/mm_types.h> 10 #include <linux/reservation.h> 11 #include <drm/drmP.h> 12 #include <drm/drm_encoder.h> 13 #include <drm/drm_gem_cma_helper.h> 14 #include <drm/drm_atomic.h> 15 #include <drm/drm_syncobj.h> 16 17 #include "uapi/drm/vc4_drm.h" 18 19 /* Don't forget to update vc4_bo.c: bo_type_names[] when adding to 20 * this. 21 */ 22 enum vc4_kernel_bo_type { 23 /* Any kernel allocation (gem_create_object hook) before it 24 * gets another type set. 25 */ 26 VC4_BO_TYPE_KERNEL, 27 VC4_BO_TYPE_V3D, 28 VC4_BO_TYPE_V3D_SHADER, 29 VC4_BO_TYPE_DUMB, 30 VC4_BO_TYPE_BIN, 31 VC4_BO_TYPE_RCL, 32 VC4_BO_TYPE_BCL, 33 VC4_BO_TYPE_KERNEL_CACHE, 34 VC4_BO_TYPE_COUNT 35 }; 36 37 /* Performance monitor object. The perform lifetime is controlled by userspace 38 * using perfmon related ioctls. A perfmon can be attached to a submit_cl 39 * request, and when this is the case, HW perf counters will be activated just 40 * before the submit_cl is submitted to the GPU and disabled when the job is 41 * done. This way, only events related to a specific job will be counted. 42 */ 43 struct vc4_perfmon { 44 /* Tracks the number of users of the perfmon, when this counter reaches 45 * zero the perfmon is destroyed. 46 */ 47 refcount_t refcnt; 48 49 /* Number of counters activated in this perfmon instance 50 * (should be less than DRM_VC4_MAX_PERF_COUNTERS). 51 */ 52 u8 ncounters; 53 54 /* Events counted by the HW perf counters. */ 55 u8 events[DRM_VC4_MAX_PERF_COUNTERS]; 56 57 /* Storage for counter values. Counters are incremented by the HW 58 * perf counter values every time the perfmon is attached to a GPU job. 59 * This way, perfmon users don't have to retrieve the results after 60 * each job if they want to track events covering several submissions. 61 * Note that counter values can't be reset, but you can fake a reset by 62 * destroying the perfmon and creating a new one. 63 */ 64 u64 counters[0]; 65 }; 66 67 struct vc4_dev { 68 struct drm_device *dev; 69 70 struct vc4_hdmi *hdmi; 71 struct vc4_hvs *hvs; 72 struct vc4_v3d *v3d; 73 struct vc4_dpi *dpi; 74 struct vc4_dsi *dsi1; 75 struct vc4_vec *vec; 76 77 struct vc4_hang_state *hang_state; 78 79 /* The kernel-space BO cache. Tracks buffers that have been 80 * unreferenced by all other users (refcounts of 0!) but not 81 * yet freed, so we can do cheap allocations. 82 */ 83 struct vc4_bo_cache { 84 /* Array of list heads for entries in the BO cache, 85 * based on number of pages, so we can do O(1) lookups 86 * in the cache when allocating. 87 */ 88 struct list_head *size_list; 89 uint32_t size_list_size; 90 91 /* List of all BOs in the cache, ordered by age, so we 92 * can do O(1) lookups when trying to free old 93 * buffers. 94 */ 95 struct list_head time_list; 96 struct work_struct time_work; 97 struct timer_list time_timer; 98 } bo_cache; 99 100 u32 num_labels; 101 struct vc4_label { 102 const char *name; 103 u32 num_allocated; 104 u32 size_allocated; 105 } *bo_labels; 106 107 /* Protects bo_cache and bo_labels. */ 108 struct mutex bo_lock; 109 110 /* Purgeable BO pool. All BOs in this pool can have their memory 111 * reclaimed if the driver is unable to allocate new BOs. We also 112 * keep stats related to the purge mechanism here. 113 */ 114 struct { 115 struct list_head list; 116 unsigned int num; 117 size_t size; 118 unsigned int purged_num; 119 size_t purged_size; 120 struct mutex lock; 121 } purgeable; 122 123 uint64_t dma_fence_context; 124 125 /* Sequence number for the last job queued in bin_job_list. 126 * Starts at 0 (no jobs emitted). 127 */ 128 uint64_t emit_seqno; 129 130 /* Sequence number for the last completed job on the GPU. 131 * Starts at 0 (no jobs completed). 132 */ 133 uint64_t finished_seqno; 134 135 /* List of all struct vc4_exec_info for jobs to be executed in 136 * the binner. The first job in the list is the one currently 137 * programmed into ct0ca for execution. 138 */ 139 struct list_head bin_job_list; 140 141 /* List of all struct vc4_exec_info for jobs that have 142 * completed binning and are ready for rendering. The first 143 * job in the list is the one currently programmed into ct1ca 144 * for execution. 145 */ 146 struct list_head render_job_list; 147 148 /* List of the finished vc4_exec_infos waiting to be freed by 149 * job_done_work. 150 */ 151 struct list_head job_done_list; 152 /* Spinlock used to synchronize the job_list and seqno 153 * accesses between the IRQ handler and GEM ioctls. 154 */ 155 spinlock_t job_lock; 156 wait_queue_head_t job_wait_queue; 157 struct work_struct job_done_work; 158 159 /* Used to track the active perfmon if any. Access to this field is 160 * protected by job_lock. 161 */ 162 struct vc4_perfmon *active_perfmon; 163 164 /* List of struct vc4_seqno_cb for callbacks to be made from a 165 * workqueue when the given seqno is passed. 166 */ 167 struct list_head seqno_cb_list; 168 169 /* The memory used for storing binner tile alloc, tile state, 170 * and overflow memory allocations. This is freed when V3D 171 * powers down. 172 */ 173 struct vc4_bo *bin_bo; 174 175 /* Size of blocks allocated within bin_bo. */ 176 uint32_t bin_alloc_size; 177 178 /* Bitmask of the bin_alloc_size chunks in bin_bo that are 179 * used. 180 */ 181 uint32_t bin_alloc_used; 182 183 /* Bitmask of the current bin_alloc used for overflow memory. */ 184 uint32_t bin_alloc_overflow; 185 186 struct work_struct overflow_mem_work; 187 188 int power_refcount; 189 190 /* Mutex controlling the power refcount. */ 191 struct mutex power_lock; 192 193 struct { 194 struct timer_list timer; 195 struct work_struct reset_work; 196 } hangcheck; 197 198 struct semaphore async_modeset; 199 200 struct drm_modeset_lock ctm_state_lock; 201 struct drm_private_obj ctm_manager; 202 }; 203 204 static inline struct vc4_dev * 205 to_vc4_dev(struct drm_device *dev) 206 { 207 return (struct vc4_dev *)dev->dev_private; 208 } 209 210 struct vc4_bo { 211 struct drm_gem_cma_object base; 212 213 /* seqno of the last job to render using this BO. */ 214 uint64_t seqno; 215 216 /* seqno of the last job to use the RCL to write to this BO. 217 * 218 * Note that this doesn't include binner overflow memory 219 * writes. 220 */ 221 uint64_t write_seqno; 222 223 bool t_format; 224 225 /* List entry for the BO's position in either 226 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list 227 */ 228 struct list_head unref_head; 229 230 /* Time in jiffies when the BO was put in vc4->bo_cache. */ 231 unsigned long free_time; 232 233 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */ 234 struct list_head size_head; 235 236 /* Struct for shader validation state, if created by 237 * DRM_IOCTL_VC4_CREATE_SHADER_BO. 238 */ 239 struct vc4_validated_shader_info *validated_shader; 240 241 /* normally (resv == &_resv) except for imported bo's */ 242 struct reservation_object *resv; 243 struct reservation_object _resv; 244 245 /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i 246 * for user-allocated labels. 247 */ 248 int label; 249 250 /* Count the number of active users. This is needed to determine 251 * whether we can move the BO to the purgeable list or not (when the BO 252 * is used by the GPU or the display engine we can't purge it). 253 */ 254 refcount_t usecnt; 255 256 /* Store purgeable/purged state here */ 257 u32 madv; 258 struct mutex madv_lock; 259 }; 260 261 static inline struct vc4_bo * 262 to_vc4_bo(struct drm_gem_object *bo) 263 { 264 return (struct vc4_bo *)bo; 265 } 266 267 struct vc4_fence { 268 struct dma_fence base; 269 struct drm_device *dev; 270 /* vc4 seqno for signaled() test */ 271 uint64_t seqno; 272 }; 273 274 static inline struct vc4_fence * 275 to_vc4_fence(struct dma_fence *fence) 276 { 277 return (struct vc4_fence *)fence; 278 } 279 280 struct vc4_seqno_cb { 281 struct work_struct work; 282 uint64_t seqno; 283 void (*func)(struct vc4_seqno_cb *cb); 284 }; 285 286 struct vc4_v3d { 287 struct vc4_dev *vc4; 288 struct platform_device *pdev; 289 void __iomem *regs; 290 struct clk *clk; 291 }; 292 293 struct vc4_hvs { 294 struct platform_device *pdev; 295 void __iomem *regs; 296 u32 __iomem *dlist; 297 298 /* Memory manager for CRTCs to allocate space in the display 299 * list. Units are dwords. 300 */ 301 struct drm_mm dlist_mm; 302 /* Memory manager for the LBM memory used by HVS scaling. */ 303 struct drm_mm lbm_mm; 304 spinlock_t mm_lock; 305 306 struct drm_mm_node mitchell_netravali_filter; 307 }; 308 309 struct vc4_plane { 310 struct drm_plane base; 311 }; 312 313 static inline struct vc4_plane * 314 to_vc4_plane(struct drm_plane *plane) 315 { 316 return (struct vc4_plane *)plane; 317 } 318 319 enum vc4_scaling_mode { 320 VC4_SCALING_NONE, 321 VC4_SCALING_TPZ, 322 VC4_SCALING_PPF, 323 }; 324 325 struct vc4_plane_state { 326 struct drm_plane_state base; 327 /* System memory copy of the display list for this element, computed 328 * at atomic_check time. 329 */ 330 u32 *dlist; 331 u32 dlist_size; /* Number of dwords allocated for the display list */ 332 u32 dlist_count; /* Number of used dwords in the display list. */ 333 334 /* Offset in the dlist to various words, for pageflip or 335 * cursor updates. 336 */ 337 u32 pos0_offset; 338 u32 pos2_offset; 339 u32 ptr0_offset; 340 341 /* Offset where the plane's dlist was last stored in the 342 * hardware at vc4_crtc_atomic_flush() time. 343 */ 344 u32 __iomem *hw_dlist; 345 346 /* Clipped coordinates of the plane on the display. */ 347 int crtc_x, crtc_y, crtc_w, crtc_h; 348 /* Clipped area being scanned from in the FB. */ 349 u32 src_x, src_y; 350 351 u32 src_w[2], src_h[2]; 352 353 /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */ 354 enum vc4_scaling_mode x_scaling[2], y_scaling[2]; 355 bool is_unity; 356 bool is_yuv; 357 358 /* Offset to start scanning out from the start of the plane's 359 * BO. 360 */ 361 u32 offsets[3]; 362 363 /* Our allocation in LBM for temporary storage during scaling. */ 364 struct drm_mm_node lbm; 365 366 /* Set when the plane has per-pixel alpha content or does not cover 367 * the entire screen. This is a hint to the CRTC that it might need 368 * to enable background color fill. 369 */ 370 bool needs_bg_fill; 371 }; 372 373 static inline struct vc4_plane_state * 374 to_vc4_plane_state(struct drm_plane_state *state) 375 { 376 return (struct vc4_plane_state *)state; 377 } 378 379 enum vc4_encoder_type { 380 VC4_ENCODER_TYPE_NONE, 381 VC4_ENCODER_TYPE_HDMI, 382 VC4_ENCODER_TYPE_VEC, 383 VC4_ENCODER_TYPE_DSI0, 384 VC4_ENCODER_TYPE_DSI1, 385 VC4_ENCODER_TYPE_SMI, 386 VC4_ENCODER_TYPE_DPI, 387 }; 388 389 struct vc4_encoder { 390 struct drm_encoder base; 391 enum vc4_encoder_type type; 392 u32 clock_select; 393 }; 394 395 static inline struct vc4_encoder * 396 to_vc4_encoder(struct drm_encoder *encoder) 397 { 398 return container_of(encoder, struct vc4_encoder, base); 399 } 400 401 struct vc4_crtc_data { 402 /* Which channel of the HVS this pixelvalve sources from. */ 403 int hvs_channel; 404 405 enum vc4_encoder_type encoder_types[4]; 406 }; 407 408 struct vc4_crtc { 409 struct drm_crtc base; 410 const struct vc4_crtc_data *data; 411 void __iomem *regs; 412 413 /* Timestamp at start of vblank irq - unaffected by lock delays. */ 414 ktime_t t_vblank; 415 416 /* Which HVS channel we're using for our CRTC. */ 417 int channel; 418 419 u8 lut_r[256]; 420 u8 lut_g[256]; 421 u8 lut_b[256]; 422 /* Size in pixels of the COB memory allocated to this CRTC. */ 423 u32 cob_size; 424 425 struct drm_pending_vblank_event *event; 426 }; 427 428 static inline struct vc4_crtc * 429 to_vc4_crtc(struct drm_crtc *crtc) 430 { 431 return (struct vc4_crtc *)crtc; 432 } 433 434 #define V3D_READ(offset) readl(vc4->v3d->regs + offset) 435 #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset) 436 #define HVS_READ(offset) readl(vc4->hvs->regs + offset) 437 #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset) 438 439 struct vc4_exec_info { 440 /* Sequence number for this bin/render job. */ 441 uint64_t seqno; 442 443 /* Latest write_seqno of any BO that binning depends on. */ 444 uint64_t bin_dep_seqno; 445 446 struct dma_fence *fence; 447 448 /* Last current addresses the hardware was processing when the 449 * hangcheck timer checked on us. 450 */ 451 uint32_t last_ct0ca, last_ct1ca; 452 453 /* Kernel-space copy of the ioctl arguments */ 454 struct drm_vc4_submit_cl *args; 455 456 /* This is the array of BOs that were looked up at the start of exec. 457 * Command validation will use indices into this array. 458 */ 459 struct drm_gem_cma_object **bo; 460 uint32_t bo_count; 461 462 /* List of BOs that are being written by the RCL. Other than 463 * the binner temporary storage, this is all the BOs written 464 * by the job. 465 */ 466 struct drm_gem_cma_object *rcl_write_bo[4]; 467 uint32_t rcl_write_bo_count; 468 469 /* Pointers for our position in vc4->job_list */ 470 struct list_head head; 471 472 /* List of other BOs used in the job that need to be released 473 * once the job is complete. 474 */ 475 struct list_head unref_list; 476 477 /* Current unvalidated indices into @bo loaded by the non-hardware 478 * VC4_PACKET_GEM_HANDLES. 479 */ 480 uint32_t bo_index[2]; 481 482 /* This is the BO where we store the validated command lists, shader 483 * records, and uniforms. 484 */ 485 struct drm_gem_cma_object *exec_bo; 486 487 /** 488 * This tracks the per-shader-record state (packet 64) that 489 * determines the length of the shader record and the offset 490 * it's expected to be found at. It gets read in from the 491 * command lists. 492 */ 493 struct vc4_shader_state { 494 uint32_t addr; 495 /* Maximum vertex index referenced by any primitive using this 496 * shader state. 497 */ 498 uint32_t max_index; 499 } *shader_state; 500 501 /** How many shader states the user declared they were using. */ 502 uint32_t shader_state_size; 503 /** How many shader state records the validator has seen. */ 504 uint32_t shader_state_count; 505 506 bool found_tile_binning_mode_config_packet; 507 bool found_start_tile_binning_packet; 508 bool found_increment_semaphore_packet; 509 bool found_flush; 510 uint8_t bin_tiles_x, bin_tiles_y; 511 /* Physical address of the start of the tile alloc array 512 * (where each tile's binned CL will start) 513 */ 514 uint32_t tile_alloc_offset; 515 /* Bitmask of which binner slots are freed when this job completes. */ 516 uint32_t bin_slots; 517 518 /** 519 * Computed addresses pointing into exec_bo where we start the 520 * bin thread (ct0) and render thread (ct1). 521 */ 522 uint32_t ct0ca, ct0ea; 523 uint32_t ct1ca, ct1ea; 524 525 /* Pointer to the unvalidated bin CL (if present). */ 526 void *bin_u; 527 528 /* Pointers to the shader recs. These paddr gets incremented as CL 529 * packets are relocated in validate_gl_shader_state, and the vaddrs 530 * (u and v) get incremented and size decremented as the shader recs 531 * themselves are validated. 532 */ 533 void *shader_rec_u; 534 void *shader_rec_v; 535 uint32_t shader_rec_p; 536 uint32_t shader_rec_size; 537 538 /* Pointers to the uniform data. These pointers are incremented, and 539 * size decremented, as each batch of uniforms is uploaded. 540 */ 541 void *uniforms_u; 542 void *uniforms_v; 543 uint32_t uniforms_p; 544 uint32_t uniforms_size; 545 546 /* Pointer to a performance monitor object if the user requested it, 547 * NULL otherwise. 548 */ 549 struct vc4_perfmon *perfmon; 550 }; 551 552 /* Per-open file private data. Any driver-specific resource that has to be 553 * released when the DRM file is closed should be placed here. 554 */ 555 struct vc4_file { 556 struct { 557 struct idr idr; 558 struct mutex lock; 559 } perfmon; 560 }; 561 562 static inline struct vc4_exec_info * 563 vc4_first_bin_job(struct vc4_dev *vc4) 564 { 565 return list_first_entry_or_null(&vc4->bin_job_list, 566 struct vc4_exec_info, head); 567 } 568 569 static inline struct vc4_exec_info * 570 vc4_first_render_job(struct vc4_dev *vc4) 571 { 572 return list_first_entry_or_null(&vc4->render_job_list, 573 struct vc4_exec_info, head); 574 } 575 576 static inline struct vc4_exec_info * 577 vc4_last_render_job(struct vc4_dev *vc4) 578 { 579 if (list_empty(&vc4->render_job_list)) 580 return NULL; 581 return list_last_entry(&vc4->render_job_list, 582 struct vc4_exec_info, head); 583 } 584 585 /** 586 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture 587 * setup parameters. 588 * 589 * This will be used at draw time to relocate the reference to the texture 590 * contents in p0, and validate that the offset combined with 591 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO. 592 * Note that the hardware treats unprovided config parameters as 0, so not all 593 * of them need to be set up for every texure sample, and we'll store ~0 as 594 * the offset to mark the unused ones. 595 * 596 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit 597 * Setup") for definitions of the texture parameters. 598 */ 599 struct vc4_texture_sample_info { 600 bool is_direct; 601 uint32_t p_offset[4]; 602 }; 603 604 /** 605 * struct vc4_validated_shader_info - information about validated shaders that 606 * needs to be used from command list validation. 607 * 608 * For a given shader, each time a shader state record references it, we need 609 * to verify that the shader doesn't read more uniforms than the shader state 610 * record's uniform BO pointer can provide, and we need to apply relocations 611 * and validate the shader state record's uniforms that define the texture 612 * samples. 613 */ 614 struct vc4_validated_shader_info { 615 uint32_t uniforms_size; 616 uint32_t uniforms_src_size; 617 uint32_t num_texture_samples; 618 struct vc4_texture_sample_info *texture_samples; 619 620 uint32_t num_uniform_addr_offsets; 621 uint32_t *uniform_addr_offsets; 622 623 bool is_threaded; 624 }; 625 626 /** 627 * _wait_for - magic (register) wait macro 628 * 629 * Does the right thing for modeset paths when run under kdgb or similar atomic 630 * contexts. Note that it's important that we check the condition again after 631 * having timed out, since the timeout could be due to preemption or similar and 632 * we've never had a chance to check the condition before the timeout. 633 */ 634 #define _wait_for(COND, MS, W) ({ \ 635 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \ 636 int ret__ = 0; \ 637 while (!(COND)) { \ 638 if (time_after(jiffies, timeout__)) { \ 639 if (!(COND)) \ 640 ret__ = -ETIMEDOUT; \ 641 break; \ 642 } \ 643 if (W && drm_can_sleep()) { \ 644 msleep(W); \ 645 } else { \ 646 cpu_relax(); \ 647 } \ 648 } \ 649 ret__; \ 650 }) 651 652 #define wait_for(COND, MS) _wait_for(COND, MS, 1) 653 654 /* vc4_bo.c */ 655 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size); 656 void vc4_free_object(struct drm_gem_object *gem_obj); 657 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size, 658 bool from_cache, enum vc4_kernel_bo_type type); 659 int vc4_dumb_create(struct drm_file *file_priv, 660 struct drm_device *dev, 661 struct drm_mode_create_dumb *args); 662 struct dma_buf *vc4_prime_export(struct drm_device *dev, 663 struct drm_gem_object *obj, int flags); 664 int vc4_create_bo_ioctl(struct drm_device *dev, void *data, 665 struct drm_file *file_priv); 666 int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, 667 struct drm_file *file_priv); 668 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, 669 struct drm_file *file_priv); 670 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, 671 struct drm_file *file_priv); 672 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, 673 struct drm_file *file_priv); 674 int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, 675 struct drm_file *file_priv); 676 int vc4_label_bo_ioctl(struct drm_device *dev, void *data, 677 struct drm_file *file_priv); 678 vm_fault_t vc4_fault(struct vm_fault *vmf); 679 int vc4_mmap(struct file *filp, struct vm_area_struct *vma); 680 struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj); 681 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 682 struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev, 683 struct dma_buf_attachment *attach, 684 struct sg_table *sgt); 685 void *vc4_prime_vmap(struct drm_gem_object *obj); 686 int vc4_bo_cache_init(struct drm_device *dev); 687 void vc4_bo_cache_destroy(struct drm_device *dev); 688 int vc4_bo_stats_debugfs(struct seq_file *m, void *arg); 689 int vc4_bo_inc_usecnt(struct vc4_bo *bo); 690 void vc4_bo_dec_usecnt(struct vc4_bo *bo); 691 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo); 692 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo); 693 694 /* vc4_crtc.c */ 695 extern struct platform_driver vc4_crtc_driver; 696 int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg); 697 bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id, 698 bool in_vblank_irq, int *vpos, int *hpos, 699 ktime_t *stime, ktime_t *etime, 700 const struct drm_display_mode *mode); 701 702 /* vc4_debugfs.c */ 703 int vc4_debugfs_init(struct drm_minor *minor); 704 705 /* vc4_drv.c */ 706 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index); 707 708 /* vc4_dpi.c */ 709 extern struct platform_driver vc4_dpi_driver; 710 int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused); 711 712 /* vc4_dsi.c */ 713 extern struct platform_driver vc4_dsi_driver; 714 int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused); 715 716 /* vc4_fence.c */ 717 extern const struct dma_fence_ops vc4_fence_ops; 718 719 /* vc4_gem.c */ 720 void vc4_gem_init(struct drm_device *dev); 721 void vc4_gem_destroy(struct drm_device *dev); 722 int vc4_submit_cl_ioctl(struct drm_device *dev, void *data, 723 struct drm_file *file_priv); 724 int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, 725 struct drm_file *file_priv); 726 int vc4_wait_bo_ioctl(struct drm_device *dev, void *data, 727 struct drm_file *file_priv); 728 void vc4_submit_next_bin_job(struct drm_device *dev); 729 void vc4_submit_next_render_job(struct drm_device *dev); 730 void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec); 731 int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, 732 uint64_t timeout_ns, bool interruptible); 733 void vc4_job_handle_completed(struct vc4_dev *vc4); 734 int vc4_queue_seqno_cb(struct drm_device *dev, 735 struct vc4_seqno_cb *cb, uint64_t seqno, 736 void (*func)(struct vc4_seqno_cb *cb)); 737 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data, 738 struct drm_file *file_priv); 739 740 /* vc4_hdmi.c */ 741 extern struct platform_driver vc4_hdmi_driver; 742 int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused); 743 744 /* vc4_vec.c */ 745 extern struct platform_driver vc4_vec_driver; 746 int vc4_vec_debugfs_regs(struct seq_file *m, void *unused); 747 748 /* vc4_irq.c */ 749 irqreturn_t vc4_irq(int irq, void *arg); 750 void vc4_irq_preinstall(struct drm_device *dev); 751 int vc4_irq_postinstall(struct drm_device *dev); 752 void vc4_irq_uninstall(struct drm_device *dev); 753 void vc4_irq_reset(struct drm_device *dev); 754 755 /* vc4_hvs.c */ 756 extern struct platform_driver vc4_hvs_driver; 757 void vc4_hvs_dump_state(struct drm_device *dev); 758 int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused); 759 760 /* vc4_kms.c */ 761 int vc4_kms_load(struct drm_device *dev); 762 763 /* vc4_plane.c */ 764 struct drm_plane *vc4_plane_init(struct drm_device *dev, 765 enum drm_plane_type type); 766 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); 767 u32 vc4_plane_dlist_size(const struct drm_plane_state *state); 768 void vc4_plane_async_set_fb(struct drm_plane *plane, 769 struct drm_framebuffer *fb); 770 771 /* vc4_v3d.c */ 772 extern struct platform_driver vc4_v3d_driver; 773 int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused); 774 int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused); 775 int vc4_v3d_get_bin_slot(struct vc4_dev *vc4); 776 777 /* vc4_validate.c */ 778 int 779 vc4_validate_bin_cl(struct drm_device *dev, 780 void *validated, 781 void *unvalidated, 782 struct vc4_exec_info *exec); 783 784 int 785 vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec); 786 787 struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec, 788 uint32_t hindex); 789 790 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec); 791 792 bool vc4_check_tex_size(struct vc4_exec_info *exec, 793 struct drm_gem_cma_object *fbo, 794 uint32_t offset, uint8_t tiling_format, 795 uint32_t width, uint32_t height, uint8_t cpp); 796 797 /* vc4_validate_shader.c */ 798 struct vc4_validated_shader_info * 799 vc4_validate_shader(struct drm_gem_cma_object *shader_obj); 800 801 /* vc4_perfmon.c */ 802 void vc4_perfmon_get(struct vc4_perfmon *perfmon); 803 void vc4_perfmon_put(struct vc4_perfmon *perfmon); 804 void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon); 805 void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon, 806 bool capture); 807 struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id); 808 void vc4_perfmon_open_file(struct vc4_file *vc4file); 809 void vc4_perfmon_close_file(struct vc4_file *vc4file); 810 int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, 811 struct drm_file *file_priv); 812 int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data, 813 struct drm_file *file_priv); 814 int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data, 815 struct drm_file *file_priv); 816