1 /* 2 * Copyright (C) 2015 Broadcom 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 #include <linux/mm_types.h> 10 #include <drm/drmP.h> 11 #include <drm/drm_util.h> 12 #include <drm/drm_encoder.h> 13 #include <drm/drm_gem_cma_helper.h> 14 #include <drm/drm_atomic.h> 15 #include <drm/drm_syncobj.h> 16 17 #include "uapi/drm/vc4_drm.h" 18 19 /* Don't forget to update vc4_bo.c: bo_type_names[] when adding to 20 * this. 21 */ 22 enum vc4_kernel_bo_type { 23 /* Any kernel allocation (gem_create_object hook) before it 24 * gets another type set. 25 */ 26 VC4_BO_TYPE_KERNEL, 27 VC4_BO_TYPE_V3D, 28 VC4_BO_TYPE_V3D_SHADER, 29 VC4_BO_TYPE_DUMB, 30 VC4_BO_TYPE_BIN, 31 VC4_BO_TYPE_RCL, 32 VC4_BO_TYPE_BCL, 33 VC4_BO_TYPE_KERNEL_CACHE, 34 VC4_BO_TYPE_COUNT 35 }; 36 37 /* Performance monitor object. The perform lifetime is controlled by userspace 38 * using perfmon related ioctls. A perfmon can be attached to a submit_cl 39 * request, and when this is the case, HW perf counters will be activated just 40 * before the submit_cl is submitted to the GPU and disabled when the job is 41 * done. This way, only events related to a specific job will be counted. 42 */ 43 struct vc4_perfmon { 44 /* Tracks the number of users of the perfmon, when this counter reaches 45 * zero the perfmon is destroyed. 46 */ 47 refcount_t refcnt; 48 49 /* Number of counters activated in this perfmon instance 50 * (should be less than DRM_VC4_MAX_PERF_COUNTERS). 51 */ 52 u8 ncounters; 53 54 /* Events counted by the HW perf counters. */ 55 u8 events[DRM_VC4_MAX_PERF_COUNTERS]; 56 57 /* Storage for counter values. Counters are incremented by the HW 58 * perf counter values every time the perfmon is attached to a GPU job. 59 * This way, perfmon users don't have to retrieve the results after 60 * each job if they want to track events covering several submissions. 61 * Note that counter values can't be reset, but you can fake a reset by 62 * destroying the perfmon and creating a new one. 63 */ 64 u64 counters[0]; 65 }; 66 67 struct vc4_dev { 68 struct drm_device *dev; 69 70 struct vc4_hdmi *hdmi; 71 struct vc4_hvs *hvs; 72 struct vc4_v3d *v3d; 73 struct vc4_dpi *dpi; 74 struct vc4_dsi *dsi1; 75 struct vc4_vec *vec; 76 struct vc4_txp *txp; 77 78 struct vc4_hang_state *hang_state; 79 80 /* The kernel-space BO cache. Tracks buffers that have been 81 * unreferenced by all other users (refcounts of 0!) but not 82 * yet freed, so we can do cheap allocations. 83 */ 84 struct vc4_bo_cache { 85 /* Array of list heads for entries in the BO cache, 86 * based on number of pages, so we can do O(1) lookups 87 * in the cache when allocating. 88 */ 89 struct list_head *size_list; 90 uint32_t size_list_size; 91 92 /* List of all BOs in the cache, ordered by age, so we 93 * can do O(1) lookups when trying to free old 94 * buffers. 95 */ 96 struct list_head time_list; 97 struct work_struct time_work; 98 struct timer_list time_timer; 99 } bo_cache; 100 101 u32 num_labels; 102 struct vc4_label { 103 const char *name; 104 u32 num_allocated; 105 u32 size_allocated; 106 } *bo_labels; 107 108 /* Protects bo_cache and bo_labels. */ 109 struct mutex bo_lock; 110 111 /* Purgeable BO pool. All BOs in this pool can have their memory 112 * reclaimed if the driver is unable to allocate new BOs. We also 113 * keep stats related to the purge mechanism here. 114 */ 115 struct { 116 struct list_head list; 117 unsigned int num; 118 size_t size; 119 unsigned int purged_num; 120 size_t purged_size; 121 struct mutex lock; 122 } purgeable; 123 124 uint64_t dma_fence_context; 125 126 /* Sequence number for the last job queued in bin_job_list. 127 * Starts at 0 (no jobs emitted). 128 */ 129 uint64_t emit_seqno; 130 131 /* Sequence number for the last completed job on the GPU. 132 * Starts at 0 (no jobs completed). 133 */ 134 uint64_t finished_seqno; 135 136 /* List of all struct vc4_exec_info for jobs to be executed in 137 * the binner. The first job in the list is the one currently 138 * programmed into ct0ca for execution. 139 */ 140 struct list_head bin_job_list; 141 142 /* List of all struct vc4_exec_info for jobs that have 143 * completed binning and are ready for rendering. The first 144 * job in the list is the one currently programmed into ct1ca 145 * for execution. 146 */ 147 struct list_head render_job_list; 148 149 /* List of the finished vc4_exec_infos waiting to be freed by 150 * job_done_work. 151 */ 152 struct list_head job_done_list; 153 /* Spinlock used to synchronize the job_list and seqno 154 * accesses between the IRQ handler and GEM ioctls. 155 */ 156 spinlock_t job_lock; 157 wait_queue_head_t job_wait_queue; 158 struct work_struct job_done_work; 159 160 /* Used to track the active perfmon if any. Access to this field is 161 * protected by job_lock. 162 */ 163 struct vc4_perfmon *active_perfmon; 164 165 /* List of struct vc4_seqno_cb for callbacks to be made from a 166 * workqueue when the given seqno is passed. 167 */ 168 struct list_head seqno_cb_list; 169 170 /* The memory used for storing binner tile alloc, tile state, 171 * and overflow memory allocations. This is freed when V3D 172 * powers down. 173 */ 174 struct vc4_bo *bin_bo; 175 176 /* Size of blocks allocated within bin_bo. */ 177 uint32_t bin_alloc_size; 178 179 /* Bitmask of the bin_alloc_size chunks in bin_bo that are 180 * used. 181 */ 182 uint32_t bin_alloc_used; 183 184 /* Bitmask of the current bin_alloc used for overflow memory. */ 185 uint32_t bin_alloc_overflow; 186 187 /* Incremented when an underrun error happened after an atomic commit. 188 * This is particularly useful to detect when a specific modeset is too 189 * demanding in term of memory or HVS bandwidth which is hard to guess 190 * at atomic check time. 191 */ 192 atomic_t underrun; 193 194 struct work_struct overflow_mem_work; 195 196 int power_refcount; 197 198 /* Set to true when the load tracker is active. */ 199 bool load_tracker_enabled; 200 201 /* Mutex controlling the power refcount. */ 202 struct mutex power_lock; 203 204 struct { 205 struct timer_list timer; 206 struct work_struct reset_work; 207 } hangcheck; 208 209 struct semaphore async_modeset; 210 211 struct drm_modeset_lock ctm_state_lock; 212 struct drm_private_obj ctm_manager; 213 struct drm_private_obj load_tracker; 214 215 /* List of vc4_debugfs_info_entry for adding to debugfs once 216 * the minor is available (after drm_dev_register()). 217 */ 218 struct list_head debugfs_list; 219 }; 220 221 static inline struct vc4_dev * 222 to_vc4_dev(struct drm_device *dev) 223 { 224 return (struct vc4_dev *)dev->dev_private; 225 } 226 227 struct vc4_bo { 228 struct drm_gem_cma_object base; 229 230 /* seqno of the last job to render using this BO. */ 231 uint64_t seqno; 232 233 /* seqno of the last job to use the RCL to write to this BO. 234 * 235 * Note that this doesn't include binner overflow memory 236 * writes. 237 */ 238 uint64_t write_seqno; 239 240 bool t_format; 241 242 /* List entry for the BO's position in either 243 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list 244 */ 245 struct list_head unref_head; 246 247 /* Time in jiffies when the BO was put in vc4->bo_cache. */ 248 unsigned long free_time; 249 250 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */ 251 struct list_head size_head; 252 253 /* Struct for shader validation state, if created by 254 * DRM_IOCTL_VC4_CREATE_SHADER_BO. 255 */ 256 struct vc4_validated_shader_info *validated_shader; 257 258 /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i 259 * for user-allocated labels. 260 */ 261 int label; 262 263 /* Count the number of active users. This is needed to determine 264 * whether we can move the BO to the purgeable list or not (when the BO 265 * is used by the GPU or the display engine we can't purge it). 266 */ 267 refcount_t usecnt; 268 269 /* Store purgeable/purged state here */ 270 u32 madv; 271 struct mutex madv_lock; 272 }; 273 274 static inline struct vc4_bo * 275 to_vc4_bo(struct drm_gem_object *bo) 276 { 277 return (struct vc4_bo *)bo; 278 } 279 280 struct vc4_fence { 281 struct dma_fence base; 282 struct drm_device *dev; 283 /* vc4 seqno for signaled() test */ 284 uint64_t seqno; 285 }; 286 287 static inline struct vc4_fence * 288 to_vc4_fence(struct dma_fence *fence) 289 { 290 return (struct vc4_fence *)fence; 291 } 292 293 struct vc4_seqno_cb { 294 struct work_struct work; 295 uint64_t seqno; 296 void (*func)(struct vc4_seqno_cb *cb); 297 }; 298 299 struct vc4_v3d { 300 struct vc4_dev *vc4; 301 struct platform_device *pdev; 302 void __iomem *regs; 303 struct clk *clk; 304 struct debugfs_regset32 regset; 305 }; 306 307 struct vc4_hvs { 308 struct platform_device *pdev; 309 void __iomem *regs; 310 u32 __iomem *dlist; 311 312 /* Memory manager for CRTCs to allocate space in the display 313 * list. Units are dwords. 314 */ 315 struct drm_mm dlist_mm; 316 /* Memory manager for the LBM memory used by HVS scaling. */ 317 struct drm_mm lbm_mm; 318 spinlock_t mm_lock; 319 320 struct drm_mm_node mitchell_netravali_filter; 321 struct debugfs_regset32 regset; 322 }; 323 324 struct vc4_plane { 325 struct drm_plane base; 326 }; 327 328 static inline struct vc4_plane * 329 to_vc4_plane(struct drm_plane *plane) 330 { 331 return (struct vc4_plane *)plane; 332 } 333 334 enum vc4_scaling_mode { 335 VC4_SCALING_NONE, 336 VC4_SCALING_TPZ, 337 VC4_SCALING_PPF, 338 }; 339 340 struct vc4_plane_state { 341 struct drm_plane_state base; 342 /* System memory copy of the display list for this element, computed 343 * at atomic_check time. 344 */ 345 u32 *dlist; 346 u32 dlist_size; /* Number of dwords allocated for the display list */ 347 u32 dlist_count; /* Number of used dwords in the display list. */ 348 349 /* Offset in the dlist to various words, for pageflip or 350 * cursor updates. 351 */ 352 u32 pos0_offset; 353 u32 pos2_offset; 354 u32 ptr0_offset; 355 u32 lbm_offset; 356 357 /* Offset where the plane's dlist was last stored in the 358 * hardware at vc4_crtc_atomic_flush() time. 359 */ 360 u32 __iomem *hw_dlist; 361 362 /* Clipped coordinates of the plane on the display. */ 363 int crtc_x, crtc_y, crtc_w, crtc_h; 364 /* Clipped area being scanned from in the FB. */ 365 u32 src_x, src_y; 366 367 u32 src_w[2], src_h[2]; 368 369 /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */ 370 enum vc4_scaling_mode x_scaling[2], y_scaling[2]; 371 bool is_unity; 372 bool is_yuv; 373 374 /* Offset to start scanning out from the start of the plane's 375 * BO. 376 */ 377 u32 offsets[3]; 378 379 /* Our allocation in LBM for temporary storage during scaling. */ 380 struct drm_mm_node lbm; 381 382 /* Set when the plane has per-pixel alpha content or does not cover 383 * the entire screen. This is a hint to the CRTC that it might need 384 * to enable background color fill. 385 */ 386 bool needs_bg_fill; 387 388 /* Mark the dlist as initialized. Useful to avoid initializing it twice 389 * when async update is not possible. 390 */ 391 bool dlist_initialized; 392 393 /* Load of this plane on the HVS block. The load is expressed in HVS 394 * cycles/sec. 395 */ 396 u64 hvs_load; 397 398 /* Memory bandwidth needed for this plane. This is expressed in 399 * bytes/sec. 400 */ 401 u64 membus_load; 402 }; 403 404 static inline struct vc4_plane_state * 405 to_vc4_plane_state(struct drm_plane_state *state) 406 { 407 return (struct vc4_plane_state *)state; 408 } 409 410 enum vc4_encoder_type { 411 VC4_ENCODER_TYPE_NONE, 412 VC4_ENCODER_TYPE_HDMI, 413 VC4_ENCODER_TYPE_VEC, 414 VC4_ENCODER_TYPE_DSI0, 415 VC4_ENCODER_TYPE_DSI1, 416 VC4_ENCODER_TYPE_SMI, 417 VC4_ENCODER_TYPE_DPI, 418 }; 419 420 struct vc4_encoder { 421 struct drm_encoder base; 422 enum vc4_encoder_type type; 423 u32 clock_select; 424 }; 425 426 static inline struct vc4_encoder * 427 to_vc4_encoder(struct drm_encoder *encoder) 428 { 429 return container_of(encoder, struct vc4_encoder, base); 430 } 431 432 struct vc4_crtc_data { 433 /* Which channel of the HVS this pixelvalve sources from. */ 434 int hvs_channel; 435 436 enum vc4_encoder_type encoder_types[4]; 437 const char *debugfs_name; 438 }; 439 440 struct vc4_crtc { 441 struct drm_crtc base; 442 struct platform_device *pdev; 443 const struct vc4_crtc_data *data; 444 void __iomem *regs; 445 446 /* Timestamp at start of vblank irq - unaffected by lock delays. */ 447 ktime_t t_vblank; 448 449 /* Which HVS channel we're using for our CRTC. */ 450 int channel; 451 452 u8 lut_r[256]; 453 u8 lut_g[256]; 454 u8 lut_b[256]; 455 /* Size in pixels of the COB memory allocated to this CRTC. */ 456 u32 cob_size; 457 458 struct drm_pending_vblank_event *event; 459 460 struct debugfs_regset32 regset; 461 }; 462 463 static inline struct vc4_crtc * 464 to_vc4_crtc(struct drm_crtc *crtc) 465 { 466 return (struct vc4_crtc *)crtc; 467 } 468 469 #define V3D_READ(offset) readl(vc4->v3d->regs + offset) 470 #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset) 471 #define HVS_READ(offset) readl(vc4->hvs->regs + offset) 472 #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset) 473 474 #define VC4_REG32(reg) { .name = #reg, .offset = reg } 475 476 struct vc4_exec_info { 477 /* Sequence number for this bin/render job. */ 478 uint64_t seqno; 479 480 /* Latest write_seqno of any BO that binning depends on. */ 481 uint64_t bin_dep_seqno; 482 483 struct dma_fence *fence; 484 485 /* Last current addresses the hardware was processing when the 486 * hangcheck timer checked on us. 487 */ 488 uint32_t last_ct0ca, last_ct1ca; 489 490 /* Kernel-space copy of the ioctl arguments */ 491 struct drm_vc4_submit_cl *args; 492 493 /* This is the array of BOs that were looked up at the start of exec. 494 * Command validation will use indices into this array. 495 */ 496 struct drm_gem_cma_object **bo; 497 uint32_t bo_count; 498 499 /* List of BOs that are being written by the RCL. Other than 500 * the binner temporary storage, this is all the BOs written 501 * by the job. 502 */ 503 struct drm_gem_cma_object *rcl_write_bo[4]; 504 uint32_t rcl_write_bo_count; 505 506 /* Pointers for our position in vc4->job_list */ 507 struct list_head head; 508 509 /* List of other BOs used in the job that need to be released 510 * once the job is complete. 511 */ 512 struct list_head unref_list; 513 514 /* Current unvalidated indices into @bo loaded by the non-hardware 515 * VC4_PACKET_GEM_HANDLES. 516 */ 517 uint32_t bo_index[2]; 518 519 /* This is the BO where we store the validated command lists, shader 520 * records, and uniforms. 521 */ 522 struct drm_gem_cma_object *exec_bo; 523 524 /** 525 * This tracks the per-shader-record state (packet 64) that 526 * determines the length of the shader record and the offset 527 * it's expected to be found at. It gets read in from the 528 * command lists. 529 */ 530 struct vc4_shader_state { 531 uint32_t addr; 532 /* Maximum vertex index referenced by any primitive using this 533 * shader state. 534 */ 535 uint32_t max_index; 536 } *shader_state; 537 538 /** How many shader states the user declared they were using. */ 539 uint32_t shader_state_size; 540 /** How many shader state records the validator has seen. */ 541 uint32_t shader_state_count; 542 543 bool found_tile_binning_mode_config_packet; 544 bool found_start_tile_binning_packet; 545 bool found_increment_semaphore_packet; 546 bool found_flush; 547 uint8_t bin_tiles_x, bin_tiles_y; 548 /* Physical address of the start of the tile alloc array 549 * (where each tile's binned CL will start) 550 */ 551 uint32_t tile_alloc_offset; 552 /* Bitmask of which binner slots are freed when this job completes. */ 553 uint32_t bin_slots; 554 555 /** 556 * Computed addresses pointing into exec_bo where we start the 557 * bin thread (ct0) and render thread (ct1). 558 */ 559 uint32_t ct0ca, ct0ea; 560 uint32_t ct1ca, ct1ea; 561 562 /* Pointer to the unvalidated bin CL (if present). */ 563 void *bin_u; 564 565 /* Pointers to the shader recs. These paddr gets incremented as CL 566 * packets are relocated in validate_gl_shader_state, and the vaddrs 567 * (u and v) get incremented and size decremented as the shader recs 568 * themselves are validated. 569 */ 570 void *shader_rec_u; 571 void *shader_rec_v; 572 uint32_t shader_rec_p; 573 uint32_t shader_rec_size; 574 575 /* Pointers to the uniform data. These pointers are incremented, and 576 * size decremented, as each batch of uniforms is uploaded. 577 */ 578 void *uniforms_u; 579 void *uniforms_v; 580 uint32_t uniforms_p; 581 uint32_t uniforms_size; 582 583 /* Pointer to a performance monitor object if the user requested it, 584 * NULL otherwise. 585 */ 586 struct vc4_perfmon *perfmon; 587 }; 588 589 /* Per-open file private data. Any driver-specific resource that has to be 590 * released when the DRM file is closed should be placed here. 591 */ 592 struct vc4_file { 593 struct { 594 struct idr idr; 595 struct mutex lock; 596 } perfmon; 597 }; 598 599 static inline struct vc4_exec_info * 600 vc4_first_bin_job(struct vc4_dev *vc4) 601 { 602 return list_first_entry_or_null(&vc4->bin_job_list, 603 struct vc4_exec_info, head); 604 } 605 606 static inline struct vc4_exec_info * 607 vc4_first_render_job(struct vc4_dev *vc4) 608 { 609 return list_first_entry_or_null(&vc4->render_job_list, 610 struct vc4_exec_info, head); 611 } 612 613 static inline struct vc4_exec_info * 614 vc4_last_render_job(struct vc4_dev *vc4) 615 { 616 if (list_empty(&vc4->render_job_list)) 617 return NULL; 618 return list_last_entry(&vc4->render_job_list, 619 struct vc4_exec_info, head); 620 } 621 622 /** 623 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture 624 * setup parameters. 625 * 626 * This will be used at draw time to relocate the reference to the texture 627 * contents in p0, and validate that the offset combined with 628 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO. 629 * Note that the hardware treats unprovided config parameters as 0, so not all 630 * of them need to be set up for every texure sample, and we'll store ~0 as 631 * the offset to mark the unused ones. 632 * 633 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit 634 * Setup") for definitions of the texture parameters. 635 */ 636 struct vc4_texture_sample_info { 637 bool is_direct; 638 uint32_t p_offset[4]; 639 }; 640 641 /** 642 * struct vc4_validated_shader_info - information about validated shaders that 643 * needs to be used from command list validation. 644 * 645 * For a given shader, each time a shader state record references it, we need 646 * to verify that the shader doesn't read more uniforms than the shader state 647 * record's uniform BO pointer can provide, and we need to apply relocations 648 * and validate the shader state record's uniforms that define the texture 649 * samples. 650 */ 651 struct vc4_validated_shader_info { 652 uint32_t uniforms_size; 653 uint32_t uniforms_src_size; 654 uint32_t num_texture_samples; 655 struct vc4_texture_sample_info *texture_samples; 656 657 uint32_t num_uniform_addr_offsets; 658 uint32_t *uniform_addr_offsets; 659 660 bool is_threaded; 661 }; 662 663 /** 664 * _wait_for - magic (register) wait macro 665 * 666 * Does the right thing for modeset paths when run under kdgb or similar atomic 667 * contexts. Note that it's important that we check the condition again after 668 * having timed out, since the timeout could be due to preemption or similar and 669 * we've never had a chance to check the condition before the timeout. 670 */ 671 #define _wait_for(COND, MS, W) ({ \ 672 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \ 673 int ret__ = 0; \ 674 while (!(COND)) { \ 675 if (time_after(jiffies, timeout__)) { \ 676 if (!(COND)) \ 677 ret__ = -ETIMEDOUT; \ 678 break; \ 679 } \ 680 if (W && drm_can_sleep()) { \ 681 msleep(W); \ 682 } else { \ 683 cpu_relax(); \ 684 } \ 685 } \ 686 ret__; \ 687 }) 688 689 #define wait_for(COND, MS) _wait_for(COND, MS, 1) 690 691 /* vc4_bo.c */ 692 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size); 693 void vc4_free_object(struct drm_gem_object *gem_obj); 694 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size, 695 bool from_cache, enum vc4_kernel_bo_type type); 696 int vc4_dumb_create(struct drm_file *file_priv, 697 struct drm_device *dev, 698 struct drm_mode_create_dumb *args); 699 struct dma_buf *vc4_prime_export(struct drm_device *dev, 700 struct drm_gem_object *obj, int flags); 701 int vc4_create_bo_ioctl(struct drm_device *dev, void *data, 702 struct drm_file *file_priv); 703 int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, 704 struct drm_file *file_priv); 705 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, 706 struct drm_file *file_priv); 707 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, 708 struct drm_file *file_priv); 709 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, 710 struct drm_file *file_priv); 711 int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, 712 struct drm_file *file_priv); 713 int vc4_label_bo_ioctl(struct drm_device *dev, void *data, 714 struct drm_file *file_priv); 715 vm_fault_t vc4_fault(struct vm_fault *vmf); 716 int vc4_mmap(struct file *filp, struct vm_area_struct *vma); 717 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 718 struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev, 719 struct dma_buf_attachment *attach, 720 struct sg_table *sgt); 721 void *vc4_prime_vmap(struct drm_gem_object *obj); 722 int vc4_bo_cache_init(struct drm_device *dev); 723 void vc4_bo_cache_destroy(struct drm_device *dev); 724 int vc4_bo_inc_usecnt(struct vc4_bo *bo); 725 void vc4_bo_dec_usecnt(struct vc4_bo *bo); 726 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo); 727 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo); 728 729 /* vc4_crtc.c */ 730 extern struct platform_driver vc4_crtc_driver; 731 bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id, 732 bool in_vblank_irq, int *vpos, int *hpos, 733 ktime_t *stime, ktime_t *etime, 734 const struct drm_display_mode *mode); 735 void vc4_crtc_handle_vblank(struct vc4_crtc *crtc); 736 void vc4_crtc_txp_armed(struct drm_crtc_state *state); 737 void vc4_crtc_get_margins(struct drm_crtc_state *state, 738 unsigned int *right, unsigned int *left, 739 unsigned int *top, unsigned int *bottom); 740 741 /* vc4_debugfs.c */ 742 int vc4_debugfs_init(struct drm_minor *minor); 743 #ifdef CONFIG_DEBUG_FS 744 void vc4_debugfs_add_file(struct drm_device *drm, 745 const char *filename, 746 int (*show)(struct seq_file*, void*), 747 void *data); 748 void vc4_debugfs_add_regset32(struct drm_device *drm, 749 const char *filename, 750 struct debugfs_regset32 *regset); 751 #else 752 static inline void vc4_debugfs_add_file(struct drm_device *drm, 753 const char *filename, 754 int (*show)(struct seq_file*, void*), 755 void *data) 756 { 757 } 758 759 static inline void vc4_debugfs_add_regset32(struct drm_device *drm, 760 const char *filename, 761 struct debugfs_regset32 *regset) 762 { 763 } 764 #endif 765 766 /* vc4_drv.c */ 767 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index); 768 769 /* vc4_dpi.c */ 770 extern struct platform_driver vc4_dpi_driver; 771 772 /* vc4_dsi.c */ 773 extern struct platform_driver vc4_dsi_driver; 774 775 /* vc4_fence.c */ 776 extern const struct dma_fence_ops vc4_fence_ops; 777 778 /* vc4_gem.c */ 779 void vc4_gem_init(struct drm_device *dev); 780 void vc4_gem_destroy(struct drm_device *dev); 781 int vc4_submit_cl_ioctl(struct drm_device *dev, void *data, 782 struct drm_file *file_priv); 783 int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, 784 struct drm_file *file_priv); 785 int vc4_wait_bo_ioctl(struct drm_device *dev, void *data, 786 struct drm_file *file_priv); 787 void vc4_submit_next_bin_job(struct drm_device *dev); 788 void vc4_submit_next_render_job(struct drm_device *dev); 789 void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec); 790 int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, 791 uint64_t timeout_ns, bool interruptible); 792 void vc4_job_handle_completed(struct vc4_dev *vc4); 793 int vc4_queue_seqno_cb(struct drm_device *dev, 794 struct vc4_seqno_cb *cb, uint64_t seqno, 795 void (*func)(struct vc4_seqno_cb *cb)); 796 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data, 797 struct drm_file *file_priv); 798 799 /* vc4_hdmi.c */ 800 extern struct platform_driver vc4_hdmi_driver; 801 802 /* vc4_vec.c */ 803 extern struct platform_driver vc4_vec_driver; 804 805 /* vc4_txp.c */ 806 extern struct platform_driver vc4_txp_driver; 807 808 /* vc4_irq.c */ 809 irqreturn_t vc4_irq(int irq, void *arg); 810 void vc4_irq_preinstall(struct drm_device *dev); 811 int vc4_irq_postinstall(struct drm_device *dev); 812 void vc4_irq_uninstall(struct drm_device *dev); 813 void vc4_irq_reset(struct drm_device *dev); 814 815 /* vc4_hvs.c */ 816 extern struct platform_driver vc4_hvs_driver; 817 void vc4_hvs_dump_state(struct drm_device *dev); 818 void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel); 819 void vc4_hvs_mask_underrun(struct drm_device *dev, int channel); 820 821 /* vc4_kms.c */ 822 int vc4_kms_load(struct drm_device *dev); 823 824 /* vc4_plane.c */ 825 struct drm_plane *vc4_plane_init(struct drm_device *dev, 826 enum drm_plane_type type); 827 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); 828 u32 vc4_plane_dlist_size(const struct drm_plane_state *state); 829 void vc4_plane_async_set_fb(struct drm_plane *plane, 830 struct drm_framebuffer *fb); 831 832 /* vc4_v3d.c */ 833 extern struct platform_driver vc4_v3d_driver; 834 extern const struct of_device_id vc4_v3d_dt_match[]; 835 int vc4_v3d_get_bin_slot(struct vc4_dev *vc4); 836 int vc4_v3d_pm_get(struct vc4_dev *vc4); 837 void vc4_v3d_pm_put(struct vc4_dev *vc4); 838 839 /* vc4_validate.c */ 840 int 841 vc4_validate_bin_cl(struct drm_device *dev, 842 void *validated, 843 void *unvalidated, 844 struct vc4_exec_info *exec); 845 846 int 847 vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec); 848 849 struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec, 850 uint32_t hindex); 851 852 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec); 853 854 bool vc4_check_tex_size(struct vc4_exec_info *exec, 855 struct drm_gem_cma_object *fbo, 856 uint32_t offset, uint8_t tiling_format, 857 uint32_t width, uint32_t height, uint8_t cpp); 858 859 /* vc4_validate_shader.c */ 860 struct vc4_validated_shader_info * 861 vc4_validate_shader(struct drm_gem_cma_object *shader_obj); 862 863 /* vc4_perfmon.c */ 864 void vc4_perfmon_get(struct vc4_perfmon *perfmon); 865 void vc4_perfmon_put(struct vc4_perfmon *perfmon); 866 void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon); 867 void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon, 868 bool capture); 869 struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id); 870 void vc4_perfmon_open_file(struct vc4_file *vc4file); 871 void vc4_perfmon_close_file(struct vc4_file *vc4file); 872 int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, 873 struct drm_file *file_priv); 874 int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data, 875 struct drm_file *file_priv); 876 int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data, 877 struct drm_file *file_priv); 878