1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2015 Broadcom 4 */ 5 6 #include <linux/delay.h> 7 #include <linux/refcount.h> 8 #include <linux/uaccess.h> 9 10 #include <drm/drm_atomic.h> 11 #include <drm/drm_debugfs.h> 12 #include <drm/drm_device.h> 13 #include <drm/drm_encoder.h> 14 #include <drm/drm_gem_cma_helper.h> 15 #include <drm/drm_mm.h> 16 #include <drm/drm_modeset_lock.h> 17 18 #include "uapi/drm/vc4_drm.h" 19 20 struct drm_device; 21 struct drm_gem_object; 22 23 /* Don't forget to update vc4_bo.c: bo_type_names[] when adding to 24 * this. 25 */ 26 enum vc4_kernel_bo_type { 27 /* Any kernel allocation (gem_create_object hook) before it 28 * gets another type set. 29 */ 30 VC4_BO_TYPE_KERNEL, 31 VC4_BO_TYPE_V3D, 32 VC4_BO_TYPE_V3D_SHADER, 33 VC4_BO_TYPE_DUMB, 34 VC4_BO_TYPE_BIN, 35 VC4_BO_TYPE_RCL, 36 VC4_BO_TYPE_BCL, 37 VC4_BO_TYPE_KERNEL_CACHE, 38 VC4_BO_TYPE_COUNT 39 }; 40 41 /* Performance monitor object. The perform lifetime is controlled by userspace 42 * using perfmon related ioctls. A perfmon can be attached to a submit_cl 43 * request, and when this is the case, HW perf counters will be activated just 44 * before the submit_cl is submitted to the GPU and disabled when the job is 45 * done. This way, only events related to a specific job will be counted. 46 */ 47 struct vc4_perfmon { 48 /* Tracks the number of users of the perfmon, when this counter reaches 49 * zero the perfmon is destroyed. 50 */ 51 refcount_t refcnt; 52 53 /* Number of counters activated in this perfmon instance 54 * (should be less than DRM_VC4_MAX_PERF_COUNTERS). 55 */ 56 u8 ncounters; 57 58 /* Events counted by the HW perf counters. */ 59 u8 events[DRM_VC4_MAX_PERF_COUNTERS]; 60 61 /* Storage for counter values. Counters are incremented by the HW 62 * perf counter values every time the perfmon is attached to a GPU job. 63 * This way, perfmon users don't have to retrieve the results after 64 * each job if they want to track events covering several submissions. 65 * Note that counter values can't be reset, but you can fake a reset by 66 * destroying the perfmon and creating a new one. 67 */ 68 u64 counters[]; 69 }; 70 71 struct vc4_dev { 72 struct drm_device *dev; 73 74 struct vc4_hdmi *hdmi; 75 struct vc4_hvs *hvs; 76 struct vc4_v3d *v3d; 77 struct vc4_dpi *dpi; 78 struct vc4_dsi *dsi1; 79 struct vc4_vec *vec; 80 struct vc4_txp *txp; 81 82 struct vc4_hang_state *hang_state; 83 84 /* The kernel-space BO cache. Tracks buffers that have been 85 * unreferenced by all other users (refcounts of 0!) but not 86 * yet freed, so we can do cheap allocations. 87 */ 88 struct vc4_bo_cache { 89 /* Array of list heads for entries in the BO cache, 90 * based on number of pages, so we can do O(1) lookups 91 * in the cache when allocating. 92 */ 93 struct list_head *size_list; 94 uint32_t size_list_size; 95 96 /* List of all BOs in the cache, ordered by age, so we 97 * can do O(1) lookups when trying to free old 98 * buffers. 99 */ 100 struct list_head time_list; 101 struct work_struct time_work; 102 struct timer_list time_timer; 103 } bo_cache; 104 105 u32 num_labels; 106 struct vc4_label { 107 const char *name; 108 u32 num_allocated; 109 u32 size_allocated; 110 } *bo_labels; 111 112 /* Protects bo_cache and bo_labels. */ 113 struct mutex bo_lock; 114 115 /* Purgeable BO pool. All BOs in this pool can have their memory 116 * reclaimed if the driver is unable to allocate new BOs. We also 117 * keep stats related to the purge mechanism here. 118 */ 119 struct { 120 struct list_head list; 121 unsigned int num; 122 size_t size; 123 unsigned int purged_num; 124 size_t purged_size; 125 struct mutex lock; 126 } purgeable; 127 128 uint64_t dma_fence_context; 129 130 /* Sequence number for the last job queued in bin_job_list. 131 * Starts at 0 (no jobs emitted). 132 */ 133 uint64_t emit_seqno; 134 135 /* Sequence number for the last completed job on the GPU. 136 * Starts at 0 (no jobs completed). 137 */ 138 uint64_t finished_seqno; 139 140 /* List of all struct vc4_exec_info for jobs to be executed in 141 * the binner. The first job in the list is the one currently 142 * programmed into ct0ca for execution. 143 */ 144 struct list_head bin_job_list; 145 146 /* List of all struct vc4_exec_info for jobs that have 147 * completed binning and are ready for rendering. The first 148 * job in the list is the one currently programmed into ct1ca 149 * for execution. 150 */ 151 struct list_head render_job_list; 152 153 /* List of the finished vc4_exec_infos waiting to be freed by 154 * job_done_work. 155 */ 156 struct list_head job_done_list; 157 /* Spinlock used to synchronize the job_list and seqno 158 * accesses between the IRQ handler and GEM ioctls. 159 */ 160 spinlock_t job_lock; 161 wait_queue_head_t job_wait_queue; 162 struct work_struct job_done_work; 163 164 /* Used to track the active perfmon if any. Access to this field is 165 * protected by job_lock. 166 */ 167 struct vc4_perfmon *active_perfmon; 168 169 /* List of struct vc4_seqno_cb for callbacks to be made from a 170 * workqueue when the given seqno is passed. 171 */ 172 struct list_head seqno_cb_list; 173 174 /* The memory used for storing binner tile alloc, tile state, 175 * and overflow memory allocations. This is freed when V3D 176 * powers down. 177 */ 178 struct vc4_bo *bin_bo; 179 180 /* Size of blocks allocated within bin_bo. */ 181 uint32_t bin_alloc_size; 182 183 /* Bitmask of the bin_alloc_size chunks in bin_bo that are 184 * used. 185 */ 186 uint32_t bin_alloc_used; 187 188 /* Bitmask of the current bin_alloc used for overflow memory. */ 189 uint32_t bin_alloc_overflow; 190 191 /* Incremented when an underrun error happened after an atomic commit. 192 * This is particularly useful to detect when a specific modeset is too 193 * demanding in term of memory or HVS bandwidth which is hard to guess 194 * at atomic check time. 195 */ 196 atomic_t underrun; 197 198 struct work_struct overflow_mem_work; 199 200 int power_refcount; 201 202 /* Set to true when the load tracker is active. */ 203 bool load_tracker_enabled; 204 205 /* Mutex controlling the power refcount. */ 206 struct mutex power_lock; 207 208 struct { 209 struct timer_list timer; 210 struct work_struct reset_work; 211 } hangcheck; 212 213 struct semaphore async_modeset; 214 215 struct drm_modeset_lock ctm_state_lock; 216 struct drm_private_obj ctm_manager; 217 struct drm_private_obj load_tracker; 218 219 /* List of vc4_debugfs_info_entry for adding to debugfs once 220 * the minor is available (after drm_dev_register()). 221 */ 222 struct list_head debugfs_list; 223 224 /* Mutex for binner bo allocation. */ 225 struct mutex bin_bo_lock; 226 /* Reference count for our binner bo. */ 227 struct kref bin_bo_kref; 228 }; 229 230 static inline struct vc4_dev * 231 to_vc4_dev(struct drm_device *dev) 232 { 233 return (struct vc4_dev *)dev->dev_private; 234 } 235 236 struct vc4_bo { 237 struct drm_gem_cma_object base; 238 239 /* seqno of the last job to render using this BO. */ 240 uint64_t seqno; 241 242 /* seqno of the last job to use the RCL to write to this BO. 243 * 244 * Note that this doesn't include binner overflow memory 245 * writes. 246 */ 247 uint64_t write_seqno; 248 249 bool t_format; 250 251 /* List entry for the BO's position in either 252 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list 253 */ 254 struct list_head unref_head; 255 256 /* Time in jiffies when the BO was put in vc4->bo_cache. */ 257 unsigned long free_time; 258 259 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */ 260 struct list_head size_head; 261 262 /* Struct for shader validation state, if created by 263 * DRM_IOCTL_VC4_CREATE_SHADER_BO. 264 */ 265 struct vc4_validated_shader_info *validated_shader; 266 267 /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i 268 * for user-allocated labels. 269 */ 270 int label; 271 272 /* Count the number of active users. This is needed to determine 273 * whether we can move the BO to the purgeable list or not (when the BO 274 * is used by the GPU or the display engine we can't purge it). 275 */ 276 refcount_t usecnt; 277 278 /* Store purgeable/purged state here */ 279 u32 madv; 280 struct mutex madv_lock; 281 }; 282 283 static inline struct vc4_bo * 284 to_vc4_bo(struct drm_gem_object *bo) 285 { 286 return (struct vc4_bo *)bo; 287 } 288 289 struct vc4_fence { 290 struct dma_fence base; 291 struct drm_device *dev; 292 /* vc4 seqno for signaled() test */ 293 uint64_t seqno; 294 }; 295 296 static inline struct vc4_fence * 297 to_vc4_fence(struct dma_fence *fence) 298 { 299 return (struct vc4_fence *)fence; 300 } 301 302 struct vc4_seqno_cb { 303 struct work_struct work; 304 uint64_t seqno; 305 void (*func)(struct vc4_seqno_cb *cb); 306 }; 307 308 struct vc4_v3d { 309 struct vc4_dev *vc4; 310 struct platform_device *pdev; 311 void __iomem *regs; 312 struct clk *clk; 313 struct debugfs_regset32 regset; 314 }; 315 316 struct vc4_hvs { 317 struct platform_device *pdev; 318 void __iomem *regs; 319 u32 __iomem *dlist; 320 321 /* Memory manager for CRTCs to allocate space in the display 322 * list. Units are dwords. 323 */ 324 struct drm_mm dlist_mm; 325 /* Memory manager for the LBM memory used by HVS scaling. */ 326 struct drm_mm lbm_mm; 327 spinlock_t mm_lock; 328 329 struct drm_mm_node mitchell_netravali_filter; 330 struct debugfs_regset32 regset; 331 }; 332 333 struct vc4_plane { 334 struct drm_plane base; 335 }; 336 337 static inline struct vc4_plane * 338 to_vc4_plane(struct drm_plane *plane) 339 { 340 return (struct vc4_plane *)plane; 341 } 342 343 enum vc4_scaling_mode { 344 VC4_SCALING_NONE, 345 VC4_SCALING_TPZ, 346 VC4_SCALING_PPF, 347 }; 348 349 struct vc4_plane_state { 350 struct drm_plane_state base; 351 /* System memory copy of the display list for this element, computed 352 * at atomic_check time. 353 */ 354 u32 *dlist; 355 u32 dlist_size; /* Number of dwords allocated for the display list */ 356 u32 dlist_count; /* Number of used dwords in the display list. */ 357 358 /* Offset in the dlist to various words, for pageflip or 359 * cursor updates. 360 */ 361 u32 pos0_offset; 362 u32 pos2_offset; 363 u32 ptr0_offset; 364 u32 lbm_offset; 365 366 /* Offset where the plane's dlist was last stored in the 367 * hardware at vc4_crtc_atomic_flush() time. 368 */ 369 u32 __iomem *hw_dlist; 370 371 /* Clipped coordinates of the plane on the display. */ 372 int crtc_x, crtc_y, crtc_w, crtc_h; 373 /* Clipped area being scanned from in the FB. */ 374 u32 src_x, src_y; 375 376 u32 src_w[2], src_h[2]; 377 378 /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */ 379 enum vc4_scaling_mode x_scaling[2], y_scaling[2]; 380 bool is_unity; 381 bool is_yuv; 382 383 /* Offset to start scanning out from the start of the plane's 384 * BO. 385 */ 386 u32 offsets[3]; 387 388 /* Our allocation in LBM for temporary storage during scaling. */ 389 struct drm_mm_node lbm; 390 391 /* Set when the plane has per-pixel alpha content or does not cover 392 * the entire screen. This is a hint to the CRTC that it might need 393 * to enable background color fill. 394 */ 395 bool needs_bg_fill; 396 397 /* Mark the dlist as initialized. Useful to avoid initializing it twice 398 * when async update is not possible. 399 */ 400 bool dlist_initialized; 401 402 /* Load of this plane on the HVS block. The load is expressed in HVS 403 * cycles/sec. 404 */ 405 u64 hvs_load; 406 407 /* Memory bandwidth needed for this plane. This is expressed in 408 * bytes/sec. 409 */ 410 u64 membus_load; 411 }; 412 413 static inline struct vc4_plane_state * 414 to_vc4_plane_state(struct drm_plane_state *state) 415 { 416 return (struct vc4_plane_state *)state; 417 } 418 419 enum vc4_encoder_type { 420 VC4_ENCODER_TYPE_NONE, 421 VC4_ENCODER_TYPE_HDMI, 422 VC4_ENCODER_TYPE_VEC, 423 VC4_ENCODER_TYPE_DSI0, 424 VC4_ENCODER_TYPE_DSI1, 425 VC4_ENCODER_TYPE_SMI, 426 VC4_ENCODER_TYPE_DPI, 427 }; 428 429 struct vc4_encoder { 430 struct drm_encoder base; 431 enum vc4_encoder_type type; 432 u32 clock_select; 433 }; 434 435 static inline struct vc4_encoder * 436 to_vc4_encoder(struct drm_encoder *encoder) 437 { 438 return container_of(encoder, struct vc4_encoder, base); 439 } 440 441 struct vc4_crtc_data { 442 /* Which channel of the HVS this pixelvalve sources from. */ 443 int hvs_channel; 444 445 enum vc4_encoder_type encoder_types[4]; 446 const char *debugfs_name; 447 }; 448 449 struct vc4_crtc { 450 struct drm_crtc base; 451 struct platform_device *pdev; 452 const struct vc4_crtc_data *data; 453 void __iomem *regs; 454 455 /* Timestamp at start of vblank irq - unaffected by lock delays. */ 456 ktime_t t_vblank; 457 458 /* Which HVS channel we're using for our CRTC. */ 459 int channel; 460 461 u8 lut_r[256]; 462 u8 lut_g[256]; 463 u8 lut_b[256]; 464 /* Size in pixels of the COB memory allocated to this CRTC. */ 465 u32 cob_size; 466 467 struct drm_pending_vblank_event *event; 468 469 struct debugfs_regset32 regset; 470 }; 471 472 static inline struct vc4_crtc * 473 to_vc4_crtc(struct drm_crtc *crtc) 474 { 475 return (struct vc4_crtc *)crtc; 476 } 477 478 #define V3D_READ(offset) readl(vc4->v3d->regs + offset) 479 #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset) 480 #define HVS_READ(offset) readl(vc4->hvs->regs + offset) 481 #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset) 482 483 #define VC4_REG32(reg) { .name = #reg, .offset = reg } 484 485 struct vc4_exec_info { 486 /* Sequence number for this bin/render job. */ 487 uint64_t seqno; 488 489 /* Latest write_seqno of any BO that binning depends on. */ 490 uint64_t bin_dep_seqno; 491 492 struct dma_fence *fence; 493 494 /* Last current addresses the hardware was processing when the 495 * hangcheck timer checked on us. 496 */ 497 uint32_t last_ct0ca, last_ct1ca; 498 499 /* Kernel-space copy of the ioctl arguments */ 500 struct drm_vc4_submit_cl *args; 501 502 /* This is the array of BOs that were looked up at the start of exec. 503 * Command validation will use indices into this array. 504 */ 505 struct drm_gem_cma_object **bo; 506 uint32_t bo_count; 507 508 /* List of BOs that are being written by the RCL. Other than 509 * the binner temporary storage, this is all the BOs written 510 * by the job. 511 */ 512 struct drm_gem_cma_object *rcl_write_bo[4]; 513 uint32_t rcl_write_bo_count; 514 515 /* Pointers for our position in vc4->job_list */ 516 struct list_head head; 517 518 /* List of other BOs used in the job that need to be released 519 * once the job is complete. 520 */ 521 struct list_head unref_list; 522 523 /* Current unvalidated indices into @bo loaded by the non-hardware 524 * VC4_PACKET_GEM_HANDLES. 525 */ 526 uint32_t bo_index[2]; 527 528 /* This is the BO where we store the validated command lists, shader 529 * records, and uniforms. 530 */ 531 struct drm_gem_cma_object *exec_bo; 532 533 /** 534 * This tracks the per-shader-record state (packet 64) that 535 * determines the length of the shader record and the offset 536 * it's expected to be found at. It gets read in from the 537 * command lists. 538 */ 539 struct vc4_shader_state { 540 uint32_t addr; 541 /* Maximum vertex index referenced by any primitive using this 542 * shader state. 543 */ 544 uint32_t max_index; 545 } *shader_state; 546 547 /** How many shader states the user declared they were using. */ 548 uint32_t shader_state_size; 549 /** How many shader state records the validator has seen. */ 550 uint32_t shader_state_count; 551 552 bool found_tile_binning_mode_config_packet; 553 bool found_start_tile_binning_packet; 554 bool found_increment_semaphore_packet; 555 bool found_flush; 556 uint8_t bin_tiles_x, bin_tiles_y; 557 /* Physical address of the start of the tile alloc array 558 * (where each tile's binned CL will start) 559 */ 560 uint32_t tile_alloc_offset; 561 /* Bitmask of which binner slots are freed when this job completes. */ 562 uint32_t bin_slots; 563 564 /** 565 * Computed addresses pointing into exec_bo where we start the 566 * bin thread (ct0) and render thread (ct1). 567 */ 568 uint32_t ct0ca, ct0ea; 569 uint32_t ct1ca, ct1ea; 570 571 /* Pointer to the unvalidated bin CL (if present). */ 572 void *bin_u; 573 574 /* Pointers to the shader recs. These paddr gets incremented as CL 575 * packets are relocated in validate_gl_shader_state, and the vaddrs 576 * (u and v) get incremented and size decremented as the shader recs 577 * themselves are validated. 578 */ 579 void *shader_rec_u; 580 void *shader_rec_v; 581 uint32_t shader_rec_p; 582 uint32_t shader_rec_size; 583 584 /* Pointers to the uniform data. These pointers are incremented, and 585 * size decremented, as each batch of uniforms is uploaded. 586 */ 587 void *uniforms_u; 588 void *uniforms_v; 589 uint32_t uniforms_p; 590 uint32_t uniforms_size; 591 592 /* Pointer to a performance monitor object if the user requested it, 593 * NULL otherwise. 594 */ 595 struct vc4_perfmon *perfmon; 596 597 /* Whether the exec has taken a reference to the binner BO, which should 598 * happen with a VC4_PACKET_TILE_BINNING_MODE_CONFIG packet. 599 */ 600 bool bin_bo_used; 601 }; 602 603 /* Per-open file private data. Any driver-specific resource that has to be 604 * released when the DRM file is closed should be placed here. 605 */ 606 struct vc4_file { 607 struct { 608 struct idr idr; 609 struct mutex lock; 610 } perfmon; 611 612 bool bin_bo_used; 613 }; 614 615 static inline struct vc4_exec_info * 616 vc4_first_bin_job(struct vc4_dev *vc4) 617 { 618 return list_first_entry_or_null(&vc4->bin_job_list, 619 struct vc4_exec_info, head); 620 } 621 622 static inline struct vc4_exec_info * 623 vc4_first_render_job(struct vc4_dev *vc4) 624 { 625 return list_first_entry_or_null(&vc4->render_job_list, 626 struct vc4_exec_info, head); 627 } 628 629 static inline struct vc4_exec_info * 630 vc4_last_render_job(struct vc4_dev *vc4) 631 { 632 if (list_empty(&vc4->render_job_list)) 633 return NULL; 634 return list_last_entry(&vc4->render_job_list, 635 struct vc4_exec_info, head); 636 } 637 638 /** 639 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture 640 * setup parameters. 641 * 642 * This will be used at draw time to relocate the reference to the texture 643 * contents in p0, and validate that the offset combined with 644 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO. 645 * Note that the hardware treats unprovided config parameters as 0, so not all 646 * of them need to be set up for every texure sample, and we'll store ~0 as 647 * the offset to mark the unused ones. 648 * 649 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit 650 * Setup") for definitions of the texture parameters. 651 */ 652 struct vc4_texture_sample_info { 653 bool is_direct; 654 uint32_t p_offset[4]; 655 }; 656 657 /** 658 * struct vc4_validated_shader_info - information about validated shaders that 659 * needs to be used from command list validation. 660 * 661 * For a given shader, each time a shader state record references it, we need 662 * to verify that the shader doesn't read more uniforms than the shader state 663 * record's uniform BO pointer can provide, and we need to apply relocations 664 * and validate the shader state record's uniforms that define the texture 665 * samples. 666 */ 667 struct vc4_validated_shader_info { 668 uint32_t uniforms_size; 669 uint32_t uniforms_src_size; 670 uint32_t num_texture_samples; 671 struct vc4_texture_sample_info *texture_samples; 672 673 uint32_t num_uniform_addr_offsets; 674 uint32_t *uniform_addr_offsets; 675 676 bool is_threaded; 677 }; 678 679 /** 680 * __wait_for - magic wait macro 681 * 682 * Macro to help avoid open coding check/wait/timeout patterns. Note that it's 683 * important that we check the condition again after having timed out, since the 684 * timeout could be due to preemption or similar and we've never had a chance to 685 * check the condition before the timeout. 686 */ 687 #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ 688 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \ 689 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ 690 int ret__; \ 691 might_sleep(); \ 692 for (;;) { \ 693 const bool expired__ = ktime_after(ktime_get_raw(), end__); \ 694 OP; \ 695 /* Guarantee COND check prior to timeout */ \ 696 barrier(); \ 697 if (COND) { \ 698 ret__ = 0; \ 699 break; \ 700 } \ 701 if (expired__) { \ 702 ret__ = -ETIMEDOUT; \ 703 break; \ 704 } \ 705 usleep_range(wait__, wait__ * 2); \ 706 if (wait__ < (Wmax)) \ 707 wait__ <<= 1; \ 708 } \ 709 ret__; \ 710 }) 711 712 #define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \ 713 (Wmax)) 714 #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) 715 716 /* vc4_bo.c */ 717 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size); 718 void vc4_free_object(struct drm_gem_object *gem_obj); 719 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size, 720 bool from_cache, enum vc4_kernel_bo_type type); 721 int vc4_dumb_create(struct drm_file *file_priv, 722 struct drm_device *dev, 723 struct drm_mode_create_dumb *args); 724 struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags); 725 int vc4_create_bo_ioctl(struct drm_device *dev, void *data, 726 struct drm_file *file_priv); 727 int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, 728 struct drm_file *file_priv); 729 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, 730 struct drm_file *file_priv); 731 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, 732 struct drm_file *file_priv); 733 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, 734 struct drm_file *file_priv); 735 int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, 736 struct drm_file *file_priv); 737 int vc4_label_bo_ioctl(struct drm_device *dev, void *data, 738 struct drm_file *file_priv); 739 vm_fault_t vc4_fault(struct vm_fault *vmf); 740 int vc4_mmap(struct file *filp, struct vm_area_struct *vma); 741 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 742 struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev, 743 struct dma_buf_attachment *attach, 744 struct sg_table *sgt); 745 void *vc4_prime_vmap(struct drm_gem_object *obj); 746 int vc4_bo_cache_init(struct drm_device *dev); 747 void vc4_bo_cache_destroy(struct drm_device *dev); 748 int vc4_bo_inc_usecnt(struct vc4_bo *bo); 749 void vc4_bo_dec_usecnt(struct vc4_bo *bo); 750 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo); 751 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo); 752 753 /* vc4_crtc.c */ 754 extern struct platform_driver vc4_crtc_driver; 755 void vc4_crtc_handle_vblank(struct vc4_crtc *crtc); 756 void vc4_crtc_txp_armed(struct drm_crtc_state *state); 757 void vc4_crtc_get_margins(struct drm_crtc_state *state, 758 unsigned int *right, unsigned int *left, 759 unsigned int *top, unsigned int *bottom); 760 761 /* vc4_debugfs.c */ 762 void vc4_debugfs_init(struct drm_minor *minor); 763 #ifdef CONFIG_DEBUG_FS 764 void vc4_debugfs_add_file(struct drm_device *drm, 765 const char *filename, 766 int (*show)(struct seq_file*, void*), 767 void *data); 768 void vc4_debugfs_add_regset32(struct drm_device *drm, 769 const char *filename, 770 struct debugfs_regset32 *regset); 771 #else 772 static inline void vc4_debugfs_add_file(struct drm_device *drm, 773 const char *filename, 774 int (*show)(struct seq_file*, void*), 775 void *data) 776 { 777 } 778 779 static inline void vc4_debugfs_add_regset32(struct drm_device *drm, 780 const char *filename, 781 struct debugfs_regset32 *regset) 782 { 783 } 784 #endif 785 786 /* vc4_drv.c */ 787 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index); 788 789 /* vc4_dpi.c */ 790 extern struct platform_driver vc4_dpi_driver; 791 792 /* vc4_dsi.c */ 793 extern struct platform_driver vc4_dsi_driver; 794 795 /* vc4_fence.c */ 796 extern const struct dma_fence_ops vc4_fence_ops; 797 798 /* vc4_gem.c */ 799 void vc4_gem_init(struct drm_device *dev); 800 void vc4_gem_destroy(struct drm_device *dev); 801 int vc4_submit_cl_ioctl(struct drm_device *dev, void *data, 802 struct drm_file *file_priv); 803 int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, 804 struct drm_file *file_priv); 805 int vc4_wait_bo_ioctl(struct drm_device *dev, void *data, 806 struct drm_file *file_priv); 807 void vc4_submit_next_bin_job(struct drm_device *dev); 808 void vc4_submit_next_render_job(struct drm_device *dev); 809 void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec); 810 int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, 811 uint64_t timeout_ns, bool interruptible); 812 void vc4_job_handle_completed(struct vc4_dev *vc4); 813 int vc4_queue_seqno_cb(struct drm_device *dev, 814 struct vc4_seqno_cb *cb, uint64_t seqno, 815 void (*func)(struct vc4_seqno_cb *cb)); 816 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data, 817 struct drm_file *file_priv); 818 819 /* vc4_hdmi.c */ 820 extern struct platform_driver vc4_hdmi_driver; 821 822 /* vc4_vec.c */ 823 extern struct platform_driver vc4_vec_driver; 824 825 /* vc4_txp.c */ 826 extern struct platform_driver vc4_txp_driver; 827 828 /* vc4_irq.c */ 829 irqreturn_t vc4_irq(int irq, void *arg); 830 void vc4_irq_preinstall(struct drm_device *dev); 831 int vc4_irq_postinstall(struct drm_device *dev); 832 void vc4_irq_uninstall(struct drm_device *dev); 833 void vc4_irq_reset(struct drm_device *dev); 834 835 /* vc4_hvs.c */ 836 extern struct platform_driver vc4_hvs_driver; 837 void vc4_hvs_dump_state(struct drm_device *dev); 838 void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel); 839 void vc4_hvs_mask_underrun(struct drm_device *dev, int channel); 840 841 /* vc4_kms.c */ 842 int vc4_kms_load(struct drm_device *dev); 843 844 /* vc4_plane.c */ 845 struct drm_plane *vc4_plane_init(struct drm_device *dev, 846 enum drm_plane_type type); 847 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); 848 u32 vc4_plane_dlist_size(const struct drm_plane_state *state); 849 void vc4_plane_async_set_fb(struct drm_plane *plane, 850 struct drm_framebuffer *fb); 851 852 /* vc4_v3d.c */ 853 extern struct platform_driver vc4_v3d_driver; 854 extern const struct of_device_id vc4_v3d_dt_match[]; 855 int vc4_v3d_get_bin_slot(struct vc4_dev *vc4); 856 int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used); 857 void vc4_v3d_bin_bo_put(struct vc4_dev *vc4); 858 int vc4_v3d_pm_get(struct vc4_dev *vc4); 859 void vc4_v3d_pm_put(struct vc4_dev *vc4); 860 861 /* vc4_validate.c */ 862 int 863 vc4_validate_bin_cl(struct drm_device *dev, 864 void *validated, 865 void *unvalidated, 866 struct vc4_exec_info *exec); 867 868 int 869 vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec); 870 871 struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec, 872 uint32_t hindex); 873 874 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec); 875 876 bool vc4_check_tex_size(struct vc4_exec_info *exec, 877 struct drm_gem_cma_object *fbo, 878 uint32_t offset, uint8_t tiling_format, 879 uint32_t width, uint32_t height, uint8_t cpp); 880 881 /* vc4_validate_shader.c */ 882 struct vc4_validated_shader_info * 883 vc4_validate_shader(struct drm_gem_cma_object *shader_obj); 884 885 /* vc4_perfmon.c */ 886 void vc4_perfmon_get(struct vc4_perfmon *perfmon); 887 void vc4_perfmon_put(struct vc4_perfmon *perfmon); 888 void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon); 889 void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon, 890 bool capture); 891 struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id); 892 void vc4_perfmon_open_file(struct vc4_file *vc4file); 893 void vc4_perfmon_close_file(struct vc4_file *vc4file); 894 int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, 895 struct drm_file *file_priv); 896 int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data, 897 struct drm_file *file_priv); 898 int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data, 899 struct drm_file *file_priv); 900