1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2015 Broadcom 4 */ 5 #ifndef _VC4_DRV_H_ 6 #define _VC4_DRV_H_ 7 8 #include <linux/delay.h> 9 #include <linux/refcount.h> 10 #include <linux/uaccess.h> 11 12 #include <drm/drm_atomic.h> 13 #include <drm/drm_debugfs.h> 14 #include <drm/drm_device.h> 15 #include <drm/drm_encoder.h> 16 #include <drm/drm_gem_cma_helper.h> 17 #include <drm/drm_managed.h> 18 #include <drm/drm_mm.h> 19 #include <drm/drm_modeset_lock.h> 20 21 #include "uapi/drm/vc4_drm.h" 22 23 struct drm_device; 24 struct drm_gem_object; 25 26 /* Don't forget to update vc4_bo.c: bo_type_names[] when adding to 27 * this. 28 */ 29 enum vc4_kernel_bo_type { 30 /* Any kernel allocation (gem_create_object hook) before it 31 * gets another type set. 32 */ 33 VC4_BO_TYPE_KERNEL, 34 VC4_BO_TYPE_V3D, 35 VC4_BO_TYPE_V3D_SHADER, 36 VC4_BO_TYPE_DUMB, 37 VC4_BO_TYPE_BIN, 38 VC4_BO_TYPE_RCL, 39 VC4_BO_TYPE_BCL, 40 VC4_BO_TYPE_KERNEL_CACHE, 41 VC4_BO_TYPE_COUNT 42 }; 43 44 /* Performance monitor object. The perform lifetime is controlled by userspace 45 * using perfmon related ioctls. A perfmon can be attached to a submit_cl 46 * request, and when this is the case, HW perf counters will be activated just 47 * before the submit_cl is submitted to the GPU and disabled when the job is 48 * done. This way, only events related to a specific job will be counted. 49 */ 50 struct vc4_perfmon { 51 struct vc4_dev *dev; 52 53 /* Tracks the number of users of the perfmon, when this counter reaches 54 * zero the perfmon is destroyed. 55 */ 56 refcount_t refcnt; 57 58 /* Number of counters activated in this perfmon instance 59 * (should be less than DRM_VC4_MAX_PERF_COUNTERS). 60 */ 61 u8 ncounters; 62 63 /* Events counted by the HW perf counters. */ 64 u8 events[DRM_VC4_MAX_PERF_COUNTERS]; 65 66 /* Storage for counter values. Counters are incremented by the HW 67 * perf counter values every time the perfmon is attached to a GPU job. 68 * This way, perfmon users don't have to retrieve the results after 69 * each job if they want to track events covering several submissions. 70 * Note that counter values can't be reset, but you can fake a reset by 71 * destroying the perfmon and creating a new one. 72 */ 73 u64 counters[]; 74 }; 75 76 struct vc4_dev { 77 struct drm_device base; 78 79 bool is_vc5; 80 81 unsigned int irq; 82 83 struct vc4_hvs *hvs; 84 struct vc4_v3d *v3d; 85 struct vc4_dpi *dpi; 86 struct vc4_vec *vec; 87 struct vc4_txp *txp; 88 89 struct vc4_hang_state *hang_state; 90 91 /* The kernel-space BO cache. Tracks buffers that have been 92 * unreferenced by all other users (refcounts of 0!) but not 93 * yet freed, so we can do cheap allocations. 94 */ 95 struct vc4_bo_cache { 96 /* Array of list heads for entries in the BO cache, 97 * based on number of pages, so we can do O(1) lookups 98 * in the cache when allocating. 99 */ 100 struct list_head *size_list; 101 uint32_t size_list_size; 102 103 /* List of all BOs in the cache, ordered by age, so we 104 * can do O(1) lookups when trying to free old 105 * buffers. 106 */ 107 struct list_head time_list; 108 struct work_struct time_work; 109 struct timer_list time_timer; 110 } bo_cache; 111 112 u32 num_labels; 113 struct vc4_label { 114 const char *name; 115 u32 num_allocated; 116 u32 size_allocated; 117 } *bo_labels; 118 119 /* Protects bo_cache and bo_labels. */ 120 struct mutex bo_lock; 121 122 /* Purgeable BO pool. All BOs in this pool can have their memory 123 * reclaimed if the driver is unable to allocate new BOs. We also 124 * keep stats related to the purge mechanism here. 125 */ 126 struct { 127 struct list_head list; 128 unsigned int num; 129 size_t size; 130 unsigned int purged_num; 131 size_t purged_size; 132 struct mutex lock; 133 } purgeable; 134 135 uint64_t dma_fence_context; 136 137 /* Sequence number for the last job queued in bin_job_list. 138 * Starts at 0 (no jobs emitted). 139 */ 140 uint64_t emit_seqno; 141 142 /* Sequence number for the last completed job on the GPU. 143 * Starts at 0 (no jobs completed). 144 */ 145 uint64_t finished_seqno; 146 147 /* List of all struct vc4_exec_info for jobs to be executed in 148 * the binner. The first job in the list is the one currently 149 * programmed into ct0ca for execution. 150 */ 151 struct list_head bin_job_list; 152 153 /* List of all struct vc4_exec_info for jobs that have 154 * completed binning and are ready for rendering. The first 155 * job in the list is the one currently programmed into ct1ca 156 * for execution. 157 */ 158 struct list_head render_job_list; 159 160 /* List of the finished vc4_exec_infos waiting to be freed by 161 * job_done_work. 162 */ 163 struct list_head job_done_list; 164 /* Spinlock used to synchronize the job_list and seqno 165 * accesses between the IRQ handler and GEM ioctls. 166 */ 167 spinlock_t job_lock; 168 wait_queue_head_t job_wait_queue; 169 struct work_struct job_done_work; 170 171 /* Used to track the active perfmon if any. Access to this field is 172 * protected by job_lock. 173 */ 174 struct vc4_perfmon *active_perfmon; 175 176 /* List of struct vc4_seqno_cb for callbacks to be made from a 177 * workqueue when the given seqno is passed. 178 */ 179 struct list_head seqno_cb_list; 180 181 /* The memory used for storing binner tile alloc, tile state, 182 * and overflow memory allocations. This is freed when V3D 183 * powers down. 184 */ 185 struct vc4_bo *bin_bo; 186 187 /* Size of blocks allocated within bin_bo. */ 188 uint32_t bin_alloc_size; 189 190 /* Bitmask of the bin_alloc_size chunks in bin_bo that are 191 * used. 192 */ 193 uint32_t bin_alloc_used; 194 195 /* Bitmask of the current bin_alloc used for overflow memory. */ 196 uint32_t bin_alloc_overflow; 197 198 /* Incremented when an underrun error happened after an atomic commit. 199 * This is particularly useful to detect when a specific modeset is too 200 * demanding in term of memory or HVS bandwidth which is hard to guess 201 * at atomic check time. 202 */ 203 atomic_t underrun; 204 205 struct work_struct overflow_mem_work; 206 207 int power_refcount; 208 209 /* Set to true when the load tracker is active. */ 210 bool load_tracker_enabled; 211 212 /* Mutex controlling the power refcount. */ 213 struct mutex power_lock; 214 215 struct { 216 struct timer_list timer; 217 struct work_struct reset_work; 218 } hangcheck; 219 220 struct drm_modeset_lock ctm_state_lock; 221 struct drm_private_obj ctm_manager; 222 struct drm_private_obj hvs_channels; 223 struct drm_private_obj load_tracker; 224 225 /* List of vc4_debugfs_info_entry for adding to debugfs once 226 * the minor is available (after drm_dev_register()). 227 */ 228 struct list_head debugfs_list; 229 230 /* Mutex for binner bo allocation. */ 231 struct mutex bin_bo_lock; 232 /* Reference count for our binner bo. */ 233 struct kref bin_bo_kref; 234 }; 235 236 static inline struct vc4_dev * 237 to_vc4_dev(struct drm_device *dev) 238 { 239 return container_of(dev, struct vc4_dev, base); 240 } 241 242 struct vc4_bo { 243 struct drm_gem_cma_object base; 244 245 /* seqno of the last job to render using this BO. */ 246 uint64_t seqno; 247 248 /* seqno of the last job to use the RCL to write to this BO. 249 * 250 * Note that this doesn't include binner overflow memory 251 * writes. 252 */ 253 uint64_t write_seqno; 254 255 bool t_format; 256 257 /* List entry for the BO's position in either 258 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list 259 */ 260 struct list_head unref_head; 261 262 /* Time in jiffies when the BO was put in vc4->bo_cache. */ 263 unsigned long free_time; 264 265 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */ 266 struct list_head size_head; 267 268 /* Struct for shader validation state, if created by 269 * DRM_IOCTL_VC4_CREATE_SHADER_BO. 270 */ 271 struct vc4_validated_shader_info *validated_shader; 272 273 /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i 274 * for user-allocated labels. 275 */ 276 int label; 277 278 /* Count the number of active users. This is needed to determine 279 * whether we can move the BO to the purgeable list or not (when the BO 280 * is used by the GPU or the display engine we can't purge it). 281 */ 282 refcount_t usecnt; 283 284 /* Store purgeable/purged state here */ 285 u32 madv; 286 struct mutex madv_lock; 287 }; 288 289 static inline struct vc4_bo * 290 to_vc4_bo(struct drm_gem_object *bo) 291 { 292 return container_of(to_drm_gem_cma_obj(bo), struct vc4_bo, base); 293 } 294 295 struct vc4_fence { 296 struct dma_fence base; 297 struct drm_device *dev; 298 /* vc4 seqno for signaled() test */ 299 uint64_t seqno; 300 }; 301 302 static inline struct vc4_fence * 303 to_vc4_fence(struct dma_fence *fence) 304 { 305 return container_of(fence, struct vc4_fence, base); 306 } 307 308 struct vc4_seqno_cb { 309 struct work_struct work; 310 uint64_t seqno; 311 void (*func)(struct vc4_seqno_cb *cb); 312 }; 313 314 struct vc4_v3d { 315 struct vc4_dev *vc4; 316 struct platform_device *pdev; 317 void __iomem *regs; 318 struct clk *clk; 319 struct debugfs_regset32 regset; 320 }; 321 322 struct vc4_hvs { 323 struct vc4_dev *vc4; 324 struct platform_device *pdev; 325 void __iomem *regs; 326 u32 __iomem *dlist; 327 328 struct clk *core_clk; 329 330 /* Memory manager for CRTCs to allocate space in the display 331 * list. Units are dwords. 332 */ 333 struct drm_mm dlist_mm; 334 /* Memory manager for the LBM memory used by HVS scaling. */ 335 struct drm_mm lbm_mm; 336 spinlock_t mm_lock; 337 338 struct drm_mm_node mitchell_netravali_filter; 339 340 struct debugfs_regset32 regset; 341 }; 342 343 struct vc4_plane { 344 struct drm_plane base; 345 }; 346 347 static inline struct vc4_plane * 348 to_vc4_plane(struct drm_plane *plane) 349 { 350 return container_of(plane, struct vc4_plane, base); 351 } 352 353 enum vc4_scaling_mode { 354 VC4_SCALING_NONE, 355 VC4_SCALING_TPZ, 356 VC4_SCALING_PPF, 357 }; 358 359 struct vc4_plane_state { 360 struct drm_plane_state base; 361 /* System memory copy of the display list for this element, computed 362 * at atomic_check time. 363 */ 364 u32 *dlist; 365 u32 dlist_size; /* Number of dwords allocated for the display list */ 366 u32 dlist_count; /* Number of used dwords in the display list. */ 367 368 /* Offset in the dlist to various words, for pageflip or 369 * cursor updates. 370 */ 371 u32 pos0_offset; 372 u32 pos2_offset; 373 u32 ptr0_offset; 374 u32 lbm_offset; 375 376 /* Offset where the plane's dlist was last stored in the 377 * hardware at vc4_crtc_atomic_flush() time. 378 */ 379 u32 __iomem *hw_dlist; 380 381 /* Clipped coordinates of the plane on the display. */ 382 int crtc_x, crtc_y, crtc_w, crtc_h; 383 /* Clipped area being scanned from in the FB. */ 384 u32 src_x, src_y; 385 386 u32 src_w[2], src_h[2]; 387 388 /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */ 389 enum vc4_scaling_mode x_scaling[2], y_scaling[2]; 390 bool is_unity; 391 bool is_yuv; 392 393 /* Offset to start scanning out from the start of the plane's 394 * BO. 395 */ 396 u32 offsets[3]; 397 398 /* Our allocation in LBM for temporary storage during scaling. */ 399 struct drm_mm_node lbm; 400 401 /* Set when the plane has per-pixel alpha content or does not cover 402 * the entire screen. This is a hint to the CRTC that it might need 403 * to enable background color fill. 404 */ 405 bool needs_bg_fill; 406 407 /* Mark the dlist as initialized. Useful to avoid initializing it twice 408 * when async update is not possible. 409 */ 410 bool dlist_initialized; 411 412 /* Load of this plane on the HVS block. The load is expressed in HVS 413 * cycles/sec. 414 */ 415 u64 hvs_load; 416 417 /* Memory bandwidth needed for this plane. This is expressed in 418 * bytes/sec. 419 */ 420 u64 membus_load; 421 }; 422 423 static inline struct vc4_plane_state * 424 to_vc4_plane_state(struct drm_plane_state *state) 425 { 426 return container_of(state, struct vc4_plane_state, base); 427 } 428 429 enum vc4_encoder_type { 430 VC4_ENCODER_TYPE_NONE, 431 VC4_ENCODER_TYPE_HDMI0, 432 VC4_ENCODER_TYPE_HDMI1, 433 VC4_ENCODER_TYPE_VEC, 434 VC4_ENCODER_TYPE_DSI0, 435 VC4_ENCODER_TYPE_DSI1, 436 VC4_ENCODER_TYPE_SMI, 437 VC4_ENCODER_TYPE_DPI, 438 }; 439 440 struct vc4_encoder { 441 struct drm_encoder base; 442 enum vc4_encoder_type type; 443 u32 clock_select; 444 445 void (*pre_crtc_configure)(struct drm_encoder *encoder, struct drm_atomic_state *state); 446 void (*pre_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state); 447 void (*post_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state); 448 449 void (*post_crtc_disable)(struct drm_encoder *encoder, struct drm_atomic_state *state); 450 void (*post_crtc_powerdown)(struct drm_encoder *encoder, struct drm_atomic_state *state); 451 }; 452 453 static inline struct vc4_encoder * 454 to_vc4_encoder(struct drm_encoder *encoder) 455 { 456 return container_of(encoder, struct vc4_encoder, base); 457 } 458 459 struct vc4_crtc_data { 460 /* Bitmask of channels (FIFOs) of the HVS that the output can source from */ 461 unsigned int hvs_available_channels; 462 463 /* Which output of the HVS this pixelvalve sources from. */ 464 int hvs_output; 465 }; 466 467 struct vc4_pv_data { 468 struct vc4_crtc_data base; 469 470 /* Depth of the PixelValve FIFO in bytes */ 471 unsigned int fifo_depth; 472 473 /* Number of pixels output per clock period */ 474 u8 pixels_per_clock; 475 476 enum vc4_encoder_type encoder_types[4]; 477 const char *debugfs_name; 478 479 }; 480 481 struct vc4_crtc { 482 struct drm_crtc base; 483 struct platform_device *pdev; 484 const struct vc4_crtc_data *data; 485 void __iomem *regs; 486 487 /* Timestamp at start of vblank irq - unaffected by lock delays. */ 488 ktime_t t_vblank; 489 490 u8 lut_r[256]; 491 u8 lut_g[256]; 492 u8 lut_b[256]; 493 494 struct drm_pending_vblank_event *event; 495 496 struct debugfs_regset32 regset; 497 498 /** 499 * @feeds_txp: True if the CRTC feeds our writeback controller. 500 */ 501 bool feeds_txp; 502 503 /** 504 * @irq_lock: Spinlock protecting the resources shared between 505 * the atomic code and our vblank handler. 506 */ 507 spinlock_t irq_lock; 508 509 /** 510 * @current_dlist: Start offset of the display list currently 511 * set in the HVS for that CRTC. Protected by @irq_lock, and 512 * copied in vc4_hvs_update_dlist() for the CRTC interrupt 513 * handler to have access to that value. 514 */ 515 unsigned int current_dlist; 516 517 /** 518 * @current_hvs_channel: HVS channel currently assigned to the 519 * CRTC. Protected by @irq_lock, and copied in 520 * vc4_hvs_atomic_begin() for the CRTC interrupt handler to have 521 * access to that value. 522 */ 523 unsigned int current_hvs_channel; 524 }; 525 526 static inline struct vc4_crtc * 527 to_vc4_crtc(struct drm_crtc *crtc) 528 { 529 return container_of(crtc, struct vc4_crtc, base); 530 } 531 532 static inline const struct vc4_crtc_data * 533 vc4_crtc_to_vc4_crtc_data(const struct vc4_crtc *crtc) 534 { 535 return crtc->data; 536 } 537 538 static inline const struct vc4_pv_data * 539 vc4_crtc_to_vc4_pv_data(const struct vc4_crtc *crtc) 540 { 541 const struct vc4_crtc_data *data = vc4_crtc_to_vc4_crtc_data(crtc); 542 543 return container_of(data, struct vc4_pv_data, base); 544 } 545 546 struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc, 547 struct drm_crtc_state *state); 548 549 struct vc4_crtc_state { 550 struct drm_crtc_state base; 551 /* Dlist area for this CRTC configuration. */ 552 struct drm_mm_node mm; 553 bool txp_armed; 554 unsigned int assigned_channel; 555 556 struct { 557 unsigned int left; 558 unsigned int right; 559 unsigned int top; 560 unsigned int bottom; 561 } margins; 562 563 unsigned long hvs_load; 564 565 /* Transitional state below, only valid during atomic commits */ 566 bool update_muxing; 567 }; 568 569 #define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1) 570 571 static inline struct vc4_crtc_state * 572 to_vc4_crtc_state(struct drm_crtc_state *crtc_state) 573 { 574 return container_of(crtc_state, struct vc4_crtc_state, base); 575 } 576 577 #define V3D_READ(offset) readl(vc4->v3d->regs + offset) 578 #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset) 579 #define HVS_READ(offset) readl(hvs->regs + offset) 580 #define HVS_WRITE(offset, val) writel(val, hvs->regs + offset) 581 582 #define VC4_REG32(reg) { .name = #reg, .offset = reg } 583 584 struct vc4_exec_info { 585 struct vc4_dev *dev; 586 587 /* Sequence number for this bin/render job. */ 588 uint64_t seqno; 589 590 /* Latest write_seqno of any BO that binning depends on. */ 591 uint64_t bin_dep_seqno; 592 593 struct dma_fence *fence; 594 595 /* Last current addresses the hardware was processing when the 596 * hangcheck timer checked on us. 597 */ 598 uint32_t last_ct0ca, last_ct1ca; 599 600 /* Kernel-space copy of the ioctl arguments */ 601 struct drm_vc4_submit_cl *args; 602 603 /* This is the array of BOs that were looked up at the start of exec. 604 * Command validation will use indices into this array. 605 */ 606 struct drm_gem_cma_object **bo; 607 uint32_t bo_count; 608 609 /* List of BOs that are being written by the RCL. Other than 610 * the binner temporary storage, this is all the BOs written 611 * by the job. 612 */ 613 struct drm_gem_cma_object *rcl_write_bo[4]; 614 uint32_t rcl_write_bo_count; 615 616 /* Pointers for our position in vc4->job_list */ 617 struct list_head head; 618 619 /* List of other BOs used in the job that need to be released 620 * once the job is complete. 621 */ 622 struct list_head unref_list; 623 624 /* Current unvalidated indices into @bo loaded by the non-hardware 625 * VC4_PACKET_GEM_HANDLES. 626 */ 627 uint32_t bo_index[2]; 628 629 /* This is the BO where we store the validated command lists, shader 630 * records, and uniforms. 631 */ 632 struct drm_gem_cma_object *exec_bo; 633 634 /** 635 * This tracks the per-shader-record state (packet 64) that 636 * determines the length of the shader record and the offset 637 * it's expected to be found at. It gets read in from the 638 * command lists. 639 */ 640 struct vc4_shader_state { 641 uint32_t addr; 642 /* Maximum vertex index referenced by any primitive using this 643 * shader state. 644 */ 645 uint32_t max_index; 646 } *shader_state; 647 648 /** How many shader states the user declared they were using. */ 649 uint32_t shader_state_size; 650 /** How many shader state records the validator has seen. */ 651 uint32_t shader_state_count; 652 653 bool found_tile_binning_mode_config_packet; 654 bool found_start_tile_binning_packet; 655 bool found_increment_semaphore_packet; 656 bool found_flush; 657 uint8_t bin_tiles_x, bin_tiles_y; 658 /* Physical address of the start of the tile alloc array 659 * (where each tile's binned CL will start) 660 */ 661 uint32_t tile_alloc_offset; 662 /* Bitmask of which binner slots are freed when this job completes. */ 663 uint32_t bin_slots; 664 665 /** 666 * Computed addresses pointing into exec_bo where we start the 667 * bin thread (ct0) and render thread (ct1). 668 */ 669 uint32_t ct0ca, ct0ea; 670 uint32_t ct1ca, ct1ea; 671 672 /* Pointer to the unvalidated bin CL (if present). */ 673 void *bin_u; 674 675 /* Pointers to the shader recs. These paddr gets incremented as CL 676 * packets are relocated in validate_gl_shader_state, and the vaddrs 677 * (u and v) get incremented and size decremented as the shader recs 678 * themselves are validated. 679 */ 680 void *shader_rec_u; 681 void *shader_rec_v; 682 uint32_t shader_rec_p; 683 uint32_t shader_rec_size; 684 685 /* Pointers to the uniform data. These pointers are incremented, and 686 * size decremented, as each batch of uniforms is uploaded. 687 */ 688 void *uniforms_u; 689 void *uniforms_v; 690 uint32_t uniforms_p; 691 uint32_t uniforms_size; 692 693 /* Pointer to a performance monitor object if the user requested it, 694 * NULL otherwise. 695 */ 696 struct vc4_perfmon *perfmon; 697 698 /* Whether the exec has taken a reference to the binner BO, which should 699 * happen with a VC4_PACKET_TILE_BINNING_MODE_CONFIG packet. 700 */ 701 bool bin_bo_used; 702 }; 703 704 /* Per-open file private data. Any driver-specific resource that has to be 705 * released when the DRM file is closed should be placed here. 706 */ 707 struct vc4_file { 708 struct vc4_dev *dev; 709 710 struct { 711 struct idr idr; 712 struct mutex lock; 713 } perfmon; 714 715 bool bin_bo_used; 716 }; 717 718 static inline struct vc4_exec_info * 719 vc4_first_bin_job(struct vc4_dev *vc4) 720 { 721 return list_first_entry_or_null(&vc4->bin_job_list, 722 struct vc4_exec_info, head); 723 } 724 725 static inline struct vc4_exec_info * 726 vc4_first_render_job(struct vc4_dev *vc4) 727 { 728 return list_first_entry_or_null(&vc4->render_job_list, 729 struct vc4_exec_info, head); 730 } 731 732 static inline struct vc4_exec_info * 733 vc4_last_render_job(struct vc4_dev *vc4) 734 { 735 if (list_empty(&vc4->render_job_list)) 736 return NULL; 737 return list_last_entry(&vc4->render_job_list, 738 struct vc4_exec_info, head); 739 } 740 741 /** 742 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture 743 * setup parameters. 744 * 745 * This will be used at draw time to relocate the reference to the texture 746 * contents in p0, and validate that the offset combined with 747 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO. 748 * Note that the hardware treats unprovided config parameters as 0, so not all 749 * of them need to be set up for every texure sample, and we'll store ~0 as 750 * the offset to mark the unused ones. 751 * 752 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit 753 * Setup") for definitions of the texture parameters. 754 */ 755 struct vc4_texture_sample_info { 756 bool is_direct; 757 uint32_t p_offset[4]; 758 }; 759 760 /** 761 * struct vc4_validated_shader_info - information about validated shaders that 762 * needs to be used from command list validation. 763 * 764 * For a given shader, each time a shader state record references it, we need 765 * to verify that the shader doesn't read more uniforms than the shader state 766 * record's uniform BO pointer can provide, and we need to apply relocations 767 * and validate the shader state record's uniforms that define the texture 768 * samples. 769 */ 770 struct vc4_validated_shader_info { 771 uint32_t uniforms_size; 772 uint32_t uniforms_src_size; 773 uint32_t num_texture_samples; 774 struct vc4_texture_sample_info *texture_samples; 775 776 uint32_t num_uniform_addr_offsets; 777 uint32_t *uniform_addr_offsets; 778 779 bool is_threaded; 780 }; 781 782 /** 783 * __wait_for - magic wait macro 784 * 785 * Macro to help avoid open coding check/wait/timeout patterns. Note that it's 786 * important that we check the condition again after having timed out, since the 787 * timeout could be due to preemption or similar and we've never had a chance to 788 * check the condition before the timeout. 789 */ 790 #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ 791 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \ 792 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ 793 int ret__; \ 794 might_sleep(); \ 795 for (;;) { \ 796 const bool expired__ = ktime_after(ktime_get_raw(), end__); \ 797 OP; \ 798 /* Guarantee COND check prior to timeout */ \ 799 barrier(); \ 800 if (COND) { \ 801 ret__ = 0; \ 802 break; \ 803 } \ 804 if (expired__) { \ 805 ret__ = -ETIMEDOUT; \ 806 break; \ 807 } \ 808 usleep_range(wait__, wait__ * 2); \ 809 if (wait__ < (Wmax)) \ 810 wait__ <<= 1; \ 811 } \ 812 ret__; \ 813 }) 814 815 #define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \ 816 (Wmax)) 817 #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) 818 819 /* vc4_bo.c */ 820 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size); 821 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size, 822 bool from_cache, enum vc4_kernel_bo_type type); 823 int vc4_bo_dumb_create(struct drm_file *file_priv, 824 struct drm_device *dev, 825 struct drm_mode_create_dumb *args); 826 int vc4_create_bo_ioctl(struct drm_device *dev, void *data, 827 struct drm_file *file_priv); 828 int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, 829 struct drm_file *file_priv); 830 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, 831 struct drm_file *file_priv); 832 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, 833 struct drm_file *file_priv); 834 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, 835 struct drm_file *file_priv); 836 int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, 837 struct drm_file *file_priv); 838 int vc4_label_bo_ioctl(struct drm_device *dev, void *data, 839 struct drm_file *file_priv); 840 int vc4_bo_cache_init(struct drm_device *dev); 841 int vc4_bo_inc_usecnt(struct vc4_bo *bo); 842 void vc4_bo_dec_usecnt(struct vc4_bo *bo); 843 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo); 844 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo); 845 846 /* vc4_crtc.c */ 847 extern struct platform_driver vc4_crtc_driver; 848 int vc4_crtc_disable_at_boot(struct drm_crtc *crtc); 849 int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc, 850 const struct drm_crtc_funcs *crtc_funcs, 851 const struct drm_crtc_helper_funcs *crtc_helper_funcs); 852 void vc4_crtc_destroy(struct drm_crtc *crtc); 853 int vc4_page_flip(struct drm_crtc *crtc, 854 struct drm_framebuffer *fb, 855 struct drm_pending_vblank_event *event, 856 uint32_t flags, 857 struct drm_modeset_acquire_ctx *ctx); 858 struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc); 859 void vc4_crtc_destroy_state(struct drm_crtc *crtc, 860 struct drm_crtc_state *state); 861 void vc4_crtc_reset(struct drm_crtc *crtc); 862 void vc4_crtc_handle_vblank(struct vc4_crtc *crtc); 863 void vc4_crtc_get_margins(struct drm_crtc_state *state, 864 unsigned int *left, unsigned int *right, 865 unsigned int *top, unsigned int *bottom); 866 867 /* vc4_debugfs.c */ 868 void vc4_debugfs_init(struct drm_minor *minor); 869 #ifdef CONFIG_DEBUG_FS 870 void vc4_debugfs_add_file(struct drm_device *drm, 871 const char *filename, 872 int (*show)(struct seq_file*, void*), 873 void *data); 874 void vc4_debugfs_add_regset32(struct drm_device *drm, 875 const char *filename, 876 struct debugfs_regset32 *regset); 877 #else 878 static inline void vc4_debugfs_add_file(struct drm_device *drm, 879 const char *filename, 880 int (*show)(struct seq_file*, void*), 881 void *data) 882 { 883 } 884 885 static inline void vc4_debugfs_add_regset32(struct drm_device *drm, 886 const char *filename, 887 struct debugfs_regset32 *regset) 888 { 889 } 890 #endif 891 892 /* vc4_drv.c */ 893 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index); 894 int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args); 895 896 /* vc4_dpi.c */ 897 extern struct platform_driver vc4_dpi_driver; 898 899 /* vc4_dsi.c */ 900 extern struct platform_driver vc4_dsi_driver; 901 902 /* vc4_fence.c */ 903 extern const struct dma_fence_ops vc4_fence_ops; 904 905 /* vc4_gem.c */ 906 int vc4_gem_init(struct drm_device *dev); 907 int vc4_submit_cl_ioctl(struct drm_device *dev, void *data, 908 struct drm_file *file_priv); 909 int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, 910 struct drm_file *file_priv); 911 int vc4_wait_bo_ioctl(struct drm_device *dev, void *data, 912 struct drm_file *file_priv); 913 void vc4_submit_next_bin_job(struct drm_device *dev); 914 void vc4_submit_next_render_job(struct drm_device *dev); 915 void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec); 916 int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, 917 uint64_t timeout_ns, bool interruptible); 918 void vc4_job_handle_completed(struct vc4_dev *vc4); 919 int vc4_queue_seqno_cb(struct drm_device *dev, 920 struct vc4_seqno_cb *cb, uint64_t seqno, 921 void (*func)(struct vc4_seqno_cb *cb)); 922 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data, 923 struct drm_file *file_priv); 924 925 /* vc4_hdmi.c */ 926 extern struct platform_driver vc4_hdmi_driver; 927 928 /* vc4_vec.c */ 929 extern struct platform_driver vc4_vec_driver; 930 931 /* vc4_txp.c */ 932 extern struct platform_driver vc4_txp_driver; 933 934 /* vc4_irq.c */ 935 void vc4_irq_enable(struct drm_device *dev); 936 void vc4_irq_disable(struct drm_device *dev); 937 int vc4_irq_install(struct drm_device *dev, int irq); 938 void vc4_irq_uninstall(struct drm_device *dev); 939 void vc4_irq_reset(struct drm_device *dev); 940 941 /* vc4_hvs.c */ 942 extern struct platform_driver vc4_hvs_driver; 943 void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int output); 944 int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output); 945 u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo); 946 int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state); 947 void vc4_hvs_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state); 948 void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state); 949 void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state); 950 void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state); 951 void vc4_hvs_dump_state(struct vc4_hvs *hvs); 952 void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel); 953 void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel); 954 955 /* vc4_kms.c */ 956 int vc4_kms_load(struct drm_device *dev); 957 958 /* vc4_plane.c */ 959 struct drm_plane *vc4_plane_init(struct drm_device *dev, 960 enum drm_plane_type type); 961 int vc4_plane_create_additional_planes(struct drm_device *dev); 962 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); 963 u32 vc4_plane_dlist_size(const struct drm_plane_state *state); 964 void vc4_plane_async_set_fb(struct drm_plane *plane, 965 struct drm_framebuffer *fb); 966 967 /* vc4_v3d.c */ 968 extern struct platform_driver vc4_v3d_driver; 969 extern const struct of_device_id vc4_v3d_dt_match[]; 970 int vc4_v3d_get_bin_slot(struct vc4_dev *vc4); 971 int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used); 972 void vc4_v3d_bin_bo_put(struct vc4_dev *vc4); 973 int vc4_v3d_pm_get(struct vc4_dev *vc4); 974 void vc4_v3d_pm_put(struct vc4_dev *vc4); 975 976 /* vc4_validate.c */ 977 int 978 vc4_validate_bin_cl(struct drm_device *dev, 979 void *validated, 980 void *unvalidated, 981 struct vc4_exec_info *exec); 982 983 int 984 vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec); 985 986 struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec, 987 uint32_t hindex); 988 989 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec); 990 991 bool vc4_check_tex_size(struct vc4_exec_info *exec, 992 struct drm_gem_cma_object *fbo, 993 uint32_t offset, uint8_t tiling_format, 994 uint32_t width, uint32_t height, uint8_t cpp); 995 996 /* vc4_validate_shader.c */ 997 struct vc4_validated_shader_info * 998 vc4_validate_shader(struct drm_gem_cma_object *shader_obj); 999 1000 /* vc4_perfmon.c */ 1001 void vc4_perfmon_get(struct vc4_perfmon *perfmon); 1002 void vc4_perfmon_put(struct vc4_perfmon *perfmon); 1003 void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon); 1004 void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon, 1005 bool capture); 1006 struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id); 1007 void vc4_perfmon_open_file(struct vc4_file *vc4file); 1008 void vc4_perfmon_close_file(struct vc4_file *vc4file); 1009 int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, 1010 struct drm_file *file_priv); 1011 int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data, 1012 struct drm_file *file_priv); 1013 int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data, 1014 struct drm_file *file_priv); 1015 1016 #endif /* _VC4_DRV_H_ */ 1017