1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2015 Broadcom 4 */ 5 #ifndef _VC4_DRV_H_ 6 #define _VC4_DRV_H_ 7 8 #include <linux/delay.h> 9 #include <linux/of.h> 10 #include <linux/refcount.h> 11 #include <linux/uaccess.h> 12 13 #include <drm/drm_atomic.h> 14 #include <drm/drm_debugfs.h> 15 #include <drm/drm_device.h> 16 #include <drm/drm_encoder.h> 17 #include <drm/drm_gem_dma_helper.h> 18 #include <drm/drm_managed.h> 19 #include <drm/drm_mm.h> 20 #include <drm/drm_modeset_lock.h> 21 22 #include "uapi/drm/vc4_drm.h" 23 24 struct drm_device; 25 struct drm_gem_object; 26 27 /* Don't forget to update vc4_bo.c: bo_type_names[] when adding to 28 * this. 29 */ 30 enum vc4_kernel_bo_type { 31 /* Any kernel allocation (gem_create_object hook) before it 32 * gets another type set. 33 */ 34 VC4_BO_TYPE_KERNEL, 35 VC4_BO_TYPE_V3D, 36 VC4_BO_TYPE_V3D_SHADER, 37 VC4_BO_TYPE_DUMB, 38 VC4_BO_TYPE_BIN, 39 VC4_BO_TYPE_RCL, 40 VC4_BO_TYPE_BCL, 41 VC4_BO_TYPE_KERNEL_CACHE, 42 VC4_BO_TYPE_COUNT 43 }; 44 45 /* Performance monitor object. The perform lifetime is controlled by userspace 46 * using perfmon related ioctls. A perfmon can be attached to a submit_cl 47 * request, and when this is the case, HW perf counters will be activated just 48 * before the submit_cl is submitted to the GPU and disabled when the job is 49 * done. This way, only events related to a specific job will be counted. 50 */ 51 struct vc4_perfmon { 52 struct vc4_dev *dev; 53 54 /* Tracks the number of users of the perfmon, when this counter reaches 55 * zero the perfmon is destroyed. 56 */ 57 refcount_t refcnt; 58 59 /* Number of counters activated in this perfmon instance 60 * (should be less than DRM_VC4_MAX_PERF_COUNTERS). 61 */ 62 u8 ncounters; 63 64 /* Events counted by the HW perf counters. */ 65 u8 events[DRM_VC4_MAX_PERF_COUNTERS]; 66 67 /* Storage for counter values. Counters are incremented by the HW 68 * perf counter values every time the perfmon is attached to a GPU job. 69 * This way, perfmon users don't have to retrieve the results after 70 * each job if they want to track events covering several submissions. 71 * Note that counter values can't be reset, but you can fake a reset by 72 * destroying the perfmon and creating a new one. 73 */ 74 u64 counters[]; 75 }; 76 77 struct vc4_dev { 78 struct drm_device base; 79 struct device *dev; 80 81 bool is_vc5; 82 83 unsigned int irq; 84 85 struct vc4_hvs *hvs; 86 struct vc4_v3d *v3d; 87 88 struct vc4_hang_state *hang_state; 89 90 /* The kernel-space BO cache. Tracks buffers that have been 91 * unreferenced by all other users (refcounts of 0!) but not 92 * yet freed, so we can do cheap allocations. 93 */ 94 struct vc4_bo_cache { 95 /* Array of list heads for entries in the BO cache, 96 * based on number of pages, so we can do O(1) lookups 97 * in the cache when allocating. 98 */ 99 struct list_head *size_list; 100 uint32_t size_list_size; 101 102 /* List of all BOs in the cache, ordered by age, so we 103 * can do O(1) lookups when trying to free old 104 * buffers. 105 */ 106 struct list_head time_list; 107 struct work_struct time_work; 108 struct timer_list time_timer; 109 } bo_cache; 110 111 u32 num_labels; 112 struct vc4_label { 113 const char *name; 114 u32 num_allocated; 115 u32 size_allocated; 116 } *bo_labels; 117 118 /* Protects bo_cache and bo_labels. */ 119 struct mutex bo_lock; 120 121 /* Purgeable BO pool. All BOs in this pool can have their memory 122 * reclaimed if the driver is unable to allocate new BOs. We also 123 * keep stats related to the purge mechanism here. 124 */ 125 struct { 126 struct list_head list; 127 unsigned int num; 128 size_t size; 129 unsigned int purged_num; 130 size_t purged_size; 131 struct mutex lock; 132 } purgeable; 133 134 uint64_t dma_fence_context; 135 136 /* Sequence number for the last job queued in bin_job_list. 137 * Starts at 0 (no jobs emitted). 138 */ 139 uint64_t emit_seqno; 140 141 /* Sequence number for the last completed job on the GPU. 142 * Starts at 0 (no jobs completed). 143 */ 144 uint64_t finished_seqno; 145 146 /* List of all struct vc4_exec_info for jobs to be executed in 147 * the binner. The first job in the list is the one currently 148 * programmed into ct0ca for execution. 149 */ 150 struct list_head bin_job_list; 151 152 /* List of all struct vc4_exec_info for jobs that have 153 * completed binning and are ready for rendering. The first 154 * job in the list is the one currently programmed into ct1ca 155 * for execution. 156 */ 157 struct list_head render_job_list; 158 159 /* List of the finished vc4_exec_infos waiting to be freed by 160 * job_done_work. 161 */ 162 struct list_head job_done_list; 163 /* Spinlock used to synchronize the job_list and seqno 164 * accesses between the IRQ handler and GEM ioctls. 165 */ 166 spinlock_t job_lock; 167 wait_queue_head_t job_wait_queue; 168 struct work_struct job_done_work; 169 170 /* Used to track the active perfmon if any. Access to this field is 171 * protected by job_lock. 172 */ 173 struct vc4_perfmon *active_perfmon; 174 175 /* List of struct vc4_seqno_cb for callbacks to be made from a 176 * workqueue when the given seqno is passed. 177 */ 178 struct list_head seqno_cb_list; 179 180 /* The memory used for storing binner tile alloc, tile state, 181 * and overflow memory allocations. This is freed when V3D 182 * powers down. 183 */ 184 struct vc4_bo *bin_bo; 185 186 /* Size of blocks allocated within bin_bo. */ 187 uint32_t bin_alloc_size; 188 189 /* Bitmask of the bin_alloc_size chunks in bin_bo that are 190 * used. 191 */ 192 uint32_t bin_alloc_used; 193 194 /* Bitmask of the current bin_alloc used for overflow memory. */ 195 uint32_t bin_alloc_overflow; 196 197 /* Incremented when an underrun error happened after an atomic commit. 198 * This is particularly useful to detect when a specific modeset is too 199 * demanding in term of memory or HVS bandwidth which is hard to guess 200 * at atomic check time. 201 */ 202 atomic_t underrun; 203 204 struct work_struct overflow_mem_work; 205 206 int power_refcount; 207 208 /* Set to true when the load tracker is active. */ 209 bool load_tracker_enabled; 210 211 /* Mutex controlling the power refcount. */ 212 struct mutex power_lock; 213 214 struct { 215 struct timer_list timer; 216 struct work_struct reset_work; 217 } hangcheck; 218 219 struct drm_modeset_lock ctm_state_lock; 220 struct drm_private_obj ctm_manager; 221 struct drm_private_obj hvs_channels; 222 struct drm_private_obj load_tracker; 223 224 /* List of vc4_debugfs_info_entry for adding to debugfs once 225 * the minor is available (after drm_dev_register()). 226 */ 227 struct list_head debugfs_list; 228 229 /* Mutex for binner bo allocation. */ 230 struct mutex bin_bo_lock; 231 /* Reference count for our binner bo. */ 232 struct kref bin_bo_kref; 233 }; 234 235 static inline struct vc4_dev * 236 to_vc4_dev(struct drm_device *dev) 237 { 238 return container_of(dev, struct vc4_dev, base); 239 } 240 241 struct vc4_bo { 242 struct drm_gem_dma_object base; 243 244 /* seqno of the last job to render using this BO. */ 245 uint64_t seqno; 246 247 /* seqno of the last job to use the RCL to write to this BO. 248 * 249 * Note that this doesn't include binner overflow memory 250 * writes. 251 */ 252 uint64_t write_seqno; 253 254 bool t_format; 255 256 /* List entry for the BO's position in either 257 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list 258 */ 259 struct list_head unref_head; 260 261 /* Time in jiffies when the BO was put in vc4->bo_cache. */ 262 unsigned long free_time; 263 264 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */ 265 struct list_head size_head; 266 267 /* Struct for shader validation state, if created by 268 * DRM_IOCTL_VC4_CREATE_SHADER_BO. 269 */ 270 struct vc4_validated_shader_info *validated_shader; 271 272 /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i 273 * for user-allocated labels. 274 */ 275 int label; 276 277 /* Count the number of active users. This is needed to determine 278 * whether we can move the BO to the purgeable list or not (when the BO 279 * is used by the GPU or the display engine we can't purge it). 280 */ 281 refcount_t usecnt; 282 283 /* Store purgeable/purged state here */ 284 u32 madv; 285 struct mutex madv_lock; 286 }; 287 288 static inline struct vc4_bo * 289 to_vc4_bo(struct drm_gem_object *bo) 290 { 291 return container_of(to_drm_gem_dma_obj(bo), struct vc4_bo, base); 292 } 293 294 struct vc4_fence { 295 struct dma_fence base; 296 struct drm_device *dev; 297 /* vc4 seqno for signaled() test */ 298 uint64_t seqno; 299 }; 300 301 static inline struct vc4_fence * 302 to_vc4_fence(struct dma_fence *fence) 303 { 304 return container_of(fence, struct vc4_fence, base); 305 } 306 307 struct vc4_seqno_cb { 308 struct work_struct work; 309 uint64_t seqno; 310 void (*func)(struct vc4_seqno_cb *cb); 311 }; 312 313 struct vc4_v3d { 314 struct vc4_dev *vc4; 315 struct platform_device *pdev; 316 void __iomem *regs; 317 struct clk *clk; 318 struct debugfs_regset32 regset; 319 }; 320 321 struct vc4_hvs { 322 struct vc4_dev *vc4; 323 struct platform_device *pdev; 324 void __iomem *regs; 325 u32 __iomem *dlist; 326 327 struct clk *core_clk; 328 329 /* Memory manager for CRTCs to allocate space in the display 330 * list. Units are dwords. 331 */ 332 struct drm_mm dlist_mm; 333 /* Memory manager for the LBM memory used by HVS scaling. */ 334 struct drm_mm lbm_mm; 335 spinlock_t mm_lock; 336 337 struct drm_mm_node mitchell_netravali_filter; 338 339 struct debugfs_regset32 regset; 340 }; 341 342 struct vc4_plane { 343 struct drm_plane base; 344 }; 345 346 static inline struct vc4_plane * 347 to_vc4_plane(struct drm_plane *plane) 348 { 349 return container_of(plane, struct vc4_plane, base); 350 } 351 352 enum vc4_scaling_mode { 353 VC4_SCALING_NONE, 354 VC4_SCALING_TPZ, 355 VC4_SCALING_PPF, 356 }; 357 358 struct vc4_plane_state { 359 struct drm_plane_state base; 360 /* System memory copy of the display list for this element, computed 361 * at atomic_check time. 362 */ 363 u32 *dlist; 364 u32 dlist_size; /* Number of dwords allocated for the display list */ 365 u32 dlist_count; /* Number of used dwords in the display list. */ 366 367 /* Offset in the dlist to various words, for pageflip or 368 * cursor updates. 369 */ 370 u32 pos0_offset; 371 u32 pos2_offset; 372 u32 ptr0_offset; 373 u32 lbm_offset; 374 375 /* Offset where the plane's dlist was last stored in the 376 * hardware at vc4_crtc_atomic_flush() time. 377 */ 378 u32 __iomem *hw_dlist; 379 380 /* Clipped coordinates of the plane on the display. */ 381 int crtc_x, crtc_y, crtc_w, crtc_h; 382 /* Clipped area being scanned from in the FB. */ 383 u32 src_x, src_y; 384 385 u32 src_w[2], src_h[2]; 386 387 /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */ 388 enum vc4_scaling_mode x_scaling[2], y_scaling[2]; 389 bool is_unity; 390 bool is_yuv; 391 392 /* Offset to start scanning out from the start of the plane's 393 * BO. 394 */ 395 u32 offsets[3]; 396 397 /* Our allocation in LBM for temporary storage during scaling. */ 398 struct drm_mm_node lbm; 399 400 /* Set when the plane has per-pixel alpha content or does not cover 401 * the entire screen. This is a hint to the CRTC that it might need 402 * to enable background color fill. 403 */ 404 bool needs_bg_fill; 405 406 /* Mark the dlist as initialized. Useful to avoid initializing it twice 407 * when async update is not possible. 408 */ 409 bool dlist_initialized; 410 411 /* Load of this plane on the HVS block. The load is expressed in HVS 412 * cycles/sec. 413 */ 414 u64 hvs_load; 415 416 /* Memory bandwidth needed for this plane. This is expressed in 417 * bytes/sec. 418 */ 419 u64 membus_load; 420 }; 421 422 static inline struct vc4_plane_state * 423 to_vc4_plane_state(struct drm_plane_state *state) 424 { 425 return container_of(state, struct vc4_plane_state, base); 426 } 427 428 enum vc4_encoder_type { 429 VC4_ENCODER_TYPE_NONE, 430 VC4_ENCODER_TYPE_HDMI0, 431 VC4_ENCODER_TYPE_HDMI1, 432 VC4_ENCODER_TYPE_VEC, 433 VC4_ENCODER_TYPE_DSI0, 434 VC4_ENCODER_TYPE_DSI1, 435 VC4_ENCODER_TYPE_SMI, 436 VC4_ENCODER_TYPE_DPI, 437 }; 438 439 struct vc4_encoder { 440 struct drm_encoder base; 441 enum vc4_encoder_type type; 442 u32 clock_select; 443 444 void (*pre_crtc_configure)(struct drm_encoder *encoder, struct drm_atomic_state *state); 445 void (*pre_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state); 446 void (*post_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state); 447 448 void (*post_crtc_disable)(struct drm_encoder *encoder, struct drm_atomic_state *state); 449 void (*post_crtc_powerdown)(struct drm_encoder *encoder, struct drm_atomic_state *state); 450 }; 451 452 static inline struct vc4_encoder * 453 to_vc4_encoder(struct drm_encoder *encoder) 454 { 455 return container_of(encoder, struct vc4_encoder, base); 456 } 457 458 struct vc4_crtc_data { 459 const char *debugfs_name; 460 461 /* Bitmask of channels (FIFOs) of the HVS that the output can source from */ 462 unsigned int hvs_available_channels; 463 464 /* Which output of the HVS this pixelvalve sources from. */ 465 int hvs_output; 466 }; 467 468 struct vc4_pv_data { 469 struct vc4_crtc_data base; 470 471 /* Depth of the PixelValve FIFO in bytes */ 472 unsigned int fifo_depth; 473 474 /* Number of pixels output per clock period */ 475 u8 pixels_per_clock; 476 477 enum vc4_encoder_type encoder_types[4]; 478 }; 479 480 struct vc4_crtc { 481 struct drm_crtc base; 482 struct platform_device *pdev; 483 const struct vc4_crtc_data *data; 484 void __iomem *regs; 485 486 /* Timestamp at start of vblank irq - unaffected by lock delays. */ 487 ktime_t t_vblank; 488 489 u8 lut_r[256]; 490 u8 lut_g[256]; 491 u8 lut_b[256]; 492 493 struct drm_pending_vblank_event *event; 494 495 struct debugfs_regset32 regset; 496 497 /** 498 * @feeds_txp: True if the CRTC feeds our writeback controller. 499 */ 500 bool feeds_txp; 501 502 /** 503 * @irq_lock: Spinlock protecting the resources shared between 504 * the atomic code and our vblank handler. 505 */ 506 spinlock_t irq_lock; 507 508 /** 509 * @current_dlist: Start offset of the display list currently 510 * set in the HVS for that CRTC. Protected by @irq_lock, and 511 * copied in vc4_hvs_update_dlist() for the CRTC interrupt 512 * handler to have access to that value. 513 */ 514 unsigned int current_dlist; 515 516 /** 517 * @current_hvs_channel: HVS channel currently assigned to the 518 * CRTC. Protected by @irq_lock, and copied in 519 * vc4_hvs_atomic_begin() for the CRTC interrupt handler to have 520 * access to that value. 521 */ 522 unsigned int current_hvs_channel; 523 }; 524 525 static inline struct vc4_crtc * 526 to_vc4_crtc(struct drm_crtc *crtc) 527 { 528 return container_of(crtc, struct vc4_crtc, base); 529 } 530 531 static inline const struct vc4_crtc_data * 532 vc4_crtc_to_vc4_crtc_data(const struct vc4_crtc *crtc) 533 { 534 return crtc->data; 535 } 536 537 static inline const struct vc4_pv_data * 538 vc4_crtc_to_vc4_pv_data(const struct vc4_crtc *crtc) 539 { 540 const struct vc4_crtc_data *data = vc4_crtc_to_vc4_crtc_data(crtc); 541 542 return container_of(data, struct vc4_pv_data, base); 543 } 544 545 struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc, 546 struct drm_crtc_state *state); 547 548 struct vc4_crtc_state { 549 struct drm_crtc_state base; 550 /* Dlist area for this CRTC configuration. */ 551 struct drm_mm_node mm; 552 bool txp_armed; 553 unsigned int assigned_channel; 554 555 struct { 556 unsigned int left; 557 unsigned int right; 558 unsigned int top; 559 unsigned int bottom; 560 } margins; 561 562 unsigned long hvs_load; 563 564 /* Transitional state below, only valid during atomic commits */ 565 bool update_muxing; 566 }; 567 568 #define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1) 569 570 static inline struct vc4_crtc_state * 571 to_vc4_crtc_state(struct drm_crtc_state *crtc_state) 572 { 573 return container_of(crtc_state, struct vc4_crtc_state, base); 574 } 575 576 #define V3D_READ(offset) readl(vc4->v3d->regs + offset) 577 #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset) 578 #define HVS_READ(offset) readl(hvs->regs + offset) 579 #define HVS_WRITE(offset, val) writel(val, hvs->regs + offset) 580 581 #define VC4_REG32(reg) { .name = #reg, .offset = reg } 582 583 struct vc4_exec_info { 584 struct vc4_dev *dev; 585 586 /* Sequence number for this bin/render job. */ 587 uint64_t seqno; 588 589 /* Latest write_seqno of any BO that binning depends on. */ 590 uint64_t bin_dep_seqno; 591 592 struct dma_fence *fence; 593 594 /* Last current addresses the hardware was processing when the 595 * hangcheck timer checked on us. 596 */ 597 uint32_t last_ct0ca, last_ct1ca; 598 599 /* Kernel-space copy of the ioctl arguments */ 600 struct drm_vc4_submit_cl *args; 601 602 /* This is the array of BOs that were looked up at the start of exec. 603 * Command validation will use indices into this array. 604 */ 605 struct drm_gem_dma_object **bo; 606 uint32_t bo_count; 607 608 /* List of BOs that are being written by the RCL. Other than 609 * the binner temporary storage, this is all the BOs written 610 * by the job. 611 */ 612 struct drm_gem_dma_object *rcl_write_bo[4]; 613 uint32_t rcl_write_bo_count; 614 615 /* Pointers for our position in vc4->job_list */ 616 struct list_head head; 617 618 /* List of other BOs used in the job that need to be released 619 * once the job is complete. 620 */ 621 struct list_head unref_list; 622 623 /* Current unvalidated indices into @bo loaded by the non-hardware 624 * VC4_PACKET_GEM_HANDLES. 625 */ 626 uint32_t bo_index[2]; 627 628 /* This is the BO where we store the validated command lists, shader 629 * records, and uniforms. 630 */ 631 struct drm_gem_dma_object *exec_bo; 632 633 /** 634 * This tracks the per-shader-record state (packet 64) that 635 * determines the length of the shader record and the offset 636 * it's expected to be found at. It gets read in from the 637 * command lists. 638 */ 639 struct vc4_shader_state { 640 uint32_t addr; 641 /* Maximum vertex index referenced by any primitive using this 642 * shader state. 643 */ 644 uint32_t max_index; 645 } *shader_state; 646 647 /** How many shader states the user declared they were using. */ 648 uint32_t shader_state_size; 649 /** How many shader state records the validator has seen. */ 650 uint32_t shader_state_count; 651 652 bool found_tile_binning_mode_config_packet; 653 bool found_start_tile_binning_packet; 654 bool found_increment_semaphore_packet; 655 bool found_flush; 656 uint8_t bin_tiles_x, bin_tiles_y; 657 /* Physical address of the start of the tile alloc array 658 * (where each tile's binned CL will start) 659 */ 660 uint32_t tile_alloc_offset; 661 /* Bitmask of which binner slots are freed when this job completes. */ 662 uint32_t bin_slots; 663 664 /** 665 * Computed addresses pointing into exec_bo where we start the 666 * bin thread (ct0) and render thread (ct1). 667 */ 668 uint32_t ct0ca, ct0ea; 669 uint32_t ct1ca, ct1ea; 670 671 /* Pointer to the unvalidated bin CL (if present). */ 672 void *bin_u; 673 674 /* Pointers to the shader recs. These paddr gets incremented as CL 675 * packets are relocated in validate_gl_shader_state, and the vaddrs 676 * (u and v) get incremented and size decremented as the shader recs 677 * themselves are validated. 678 */ 679 void *shader_rec_u; 680 void *shader_rec_v; 681 uint32_t shader_rec_p; 682 uint32_t shader_rec_size; 683 684 /* Pointers to the uniform data. These pointers are incremented, and 685 * size decremented, as each batch of uniforms is uploaded. 686 */ 687 void *uniforms_u; 688 void *uniforms_v; 689 uint32_t uniforms_p; 690 uint32_t uniforms_size; 691 692 /* Pointer to a performance monitor object if the user requested it, 693 * NULL otherwise. 694 */ 695 struct vc4_perfmon *perfmon; 696 697 /* Whether the exec has taken a reference to the binner BO, which should 698 * happen with a VC4_PACKET_TILE_BINNING_MODE_CONFIG packet. 699 */ 700 bool bin_bo_used; 701 }; 702 703 /* Per-open file private data. Any driver-specific resource that has to be 704 * released when the DRM file is closed should be placed here. 705 */ 706 struct vc4_file { 707 struct vc4_dev *dev; 708 709 struct { 710 struct idr idr; 711 struct mutex lock; 712 } perfmon; 713 714 bool bin_bo_used; 715 }; 716 717 static inline struct vc4_exec_info * 718 vc4_first_bin_job(struct vc4_dev *vc4) 719 { 720 return list_first_entry_or_null(&vc4->bin_job_list, 721 struct vc4_exec_info, head); 722 } 723 724 static inline struct vc4_exec_info * 725 vc4_first_render_job(struct vc4_dev *vc4) 726 { 727 return list_first_entry_or_null(&vc4->render_job_list, 728 struct vc4_exec_info, head); 729 } 730 731 static inline struct vc4_exec_info * 732 vc4_last_render_job(struct vc4_dev *vc4) 733 { 734 if (list_empty(&vc4->render_job_list)) 735 return NULL; 736 return list_last_entry(&vc4->render_job_list, 737 struct vc4_exec_info, head); 738 } 739 740 /** 741 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture 742 * setup parameters. 743 * 744 * This will be used at draw time to relocate the reference to the texture 745 * contents in p0, and validate that the offset combined with 746 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO. 747 * Note that the hardware treats unprovided config parameters as 0, so not all 748 * of them need to be set up for every texure sample, and we'll store ~0 as 749 * the offset to mark the unused ones. 750 * 751 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit 752 * Setup") for definitions of the texture parameters. 753 */ 754 struct vc4_texture_sample_info { 755 bool is_direct; 756 uint32_t p_offset[4]; 757 }; 758 759 /** 760 * struct vc4_validated_shader_info - information about validated shaders that 761 * needs to be used from command list validation. 762 * 763 * For a given shader, each time a shader state record references it, we need 764 * to verify that the shader doesn't read more uniforms than the shader state 765 * record's uniform BO pointer can provide, and we need to apply relocations 766 * and validate the shader state record's uniforms that define the texture 767 * samples. 768 */ 769 struct vc4_validated_shader_info { 770 uint32_t uniforms_size; 771 uint32_t uniforms_src_size; 772 uint32_t num_texture_samples; 773 struct vc4_texture_sample_info *texture_samples; 774 775 uint32_t num_uniform_addr_offsets; 776 uint32_t *uniform_addr_offsets; 777 778 bool is_threaded; 779 }; 780 781 /** 782 * __wait_for - magic wait macro 783 * 784 * Macro to help avoid open coding check/wait/timeout patterns. Note that it's 785 * important that we check the condition again after having timed out, since the 786 * timeout could be due to preemption or similar and we've never had a chance to 787 * check the condition before the timeout. 788 */ 789 #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ 790 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \ 791 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ 792 int ret__; \ 793 might_sleep(); \ 794 for (;;) { \ 795 const bool expired__ = ktime_after(ktime_get_raw(), end__); \ 796 OP; \ 797 /* Guarantee COND check prior to timeout */ \ 798 barrier(); \ 799 if (COND) { \ 800 ret__ = 0; \ 801 break; \ 802 } \ 803 if (expired__) { \ 804 ret__ = -ETIMEDOUT; \ 805 break; \ 806 } \ 807 usleep_range(wait__, wait__ * 2); \ 808 if (wait__ < (Wmax)) \ 809 wait__ <<= 1; \ 810 } \ 811 ret__; \ 812 }) 813 814 #define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \ 815 (Wmax)) 816 #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) 817 818 /* vc4_bo.c */ 819 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size); 820 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size, 821 bool from_cache, enum vc4_kernel_bo_type type); 822 int vc4_bo_dumb_create(struct drm_file *file_priv, 823 struct drm_device *dev, 824 struct drm_mode_create_dumb *args); 825 int vc4_create_bo_ioctl(struct drm_device *dev, void *data, 826 struct drm_file *file_priv); 827 int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, 828 struct drm_file *file_priv); 829 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, 830 struct drm_file *file_priv); 831 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, 832 struct drm_file *file_priv); 833 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, 834 struct drm_file *file_priv); 835 int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, 836 struct drm_file *file_priv); 837 int vc4_label_bo_ioctl(struct drm_device *dev, void *data, 838 struct drm_file *file_priv); 839 int vc4_bo_cache_init(struct drm_device *dev); 840 int vc4_bo_inc_usecnt(struct vc4_bo *bo); 841 void vc4_bo_dec_usecnt(struct vc4_bo *bo); 842 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo); 843 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo); 844 int vc4_bo_debugfs_init(struct drm_minor *minor); 845 846 /* vc4_crtc.c */ 847 extern struct platform_driver vc4_crtc_driver; 848 int vc4_crtc_disable_at_boot(struct drm_crtc *crtc); 849 int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc, 850 const struct drm_crtc_funcs *crtc_funcs, 851 const struct drm_crtc_helper_funcs *crtc_helper_funcs); 852 int vc4_page_flip(struct drm_crtc *crtc, 853 struct drm_framebuffer *fb, 854 struct drm_pending_vblank_event *event, 855 uint32_t flags, 856 struct drm_modeset_acquire_ctx *ctx); 857 struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc); 858 void vc4_crtc_destroy_state(struct drm_crtc *crtc, 859 struct drm_crtc_state *state); 860 void vc4_crtc_reset(struct drm_crtc *crtc); 861 void vc4_crtc_handle_vblank(struct vc4_crtc *crtc); 862 void vc4_crtc_send_vblank(struct drm_crtc *crtc); 863 int vc4_crtc_late_register(struct drm_crtc *crtc); 864 void vc4_crtc_get_margins(struct drm_crtc_state *state, 865 unsigned int *left, unsigned int *right, 866 unsigned int *top, unsigned int *bottom); 867 868 /* vc4_debugfs.c */ 869 void vc4_debugfs_init(struct drm_minor *minor); 870 #ifdef CONFIG_DEBUG_FS 871 int vc4_debugfs_add_file(struct drm_minor *minor, 872 const char *filename, 873 int (*show)(struct seq_file*, void*), 874 void *data); 875 int vc4_debugfs_add_regset32(struct drm_minor *minor, 876 const char *filename, 877 struct debugfs_regset32 *regset); 878 #else 879 static inline int vc4_debugfs_add_file(struct drm_minor *minor, 880 const char *filename, 881 int (*show)(struct seq_file*, void*), 882 void *data) 883 { 884 return 0; 885 } 886 887 static inline int vc4_debugfs_add_regset32(struct drm_minor *minor, 888 const char *filename, 889 struct debugfs_regset32 *regset) 890 { 891 return 0; 892 } 893 #endif 894 895 /* vc4_drv.c */ 896 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index); 897 int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args); 898 899 /* vc4_dpi.c */ 900 extern struct platform_driver vc4_dpi_driver; 901 902 /* vc4_dsi.c */ 903 extern struct platform_driver vc4_dsi_driver; 904 905 /* vc4_fence.c */ 906 extern const struct dma_fence_ops vc4_fence_ops; 907 908 /* vc4_gem.c */ 909 int vc4_gem_init(struct drm_device *dev); 910 int vc4_submit_cl_ioctl(struct drm_device *dev, void *data, 911 struct drm_file *file_priv); 912 int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, 913 struct drm_file *file_priv); 914 int vc4_wait_bo_ioctl(struct drm_device *dev, void *data, 915 struct drm_file *file_priv); 916 void vc4_submit_next_bin_job(struct drm_device *dev); 917 void vc4_submit_next_render_job(struct drm_device *dev); 918 void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec); 919 int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, 920 uint64_t timeout_ns, bool interruptible); 921 void vc4_job_handle_completed(struct vc4_dev *vc4); 922 int vc4_queue_seqno_cb(struct drm_device *dev, 923 struct vc4_seqno_cb *cb, uint64_t seqno, 924 void (*func)(struct vc4_seqno_cb *cb)); 925 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data, 926 struct drm_file *file_priv); 927 928 /* vc4_hdmi.c */ 929 extern struct platform_driver vc4_hdmi_driver; 930 931 /* vc4_vec.c */ 932 extern struct platform_driver vc4_vec_driver; 933 934 /* vc4_txp.c */ 935 extern struct platform_driver vc4_txp_driver; 936 937 /* vc4_irq.c */ 938 void vc4_irq_enable(struct drm_device *dev); 939 void vc4_irq_disable(struct drm_device *dev); 940 int vc4_irq_install(struct drm_device *dev, int irq); 941 void vc4_irq_uninstall(struct drm_device *dev); 942 void vc4_irq_reset(struct drm_device *dev); 943 944 /* vc4_hvs.c */ 945 extern struct platform_driver vc4_hvs_driver; 946 void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int output); 947 int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output); 948 u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo); 949 int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state); 950 void vc4_hvs_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state); 951 void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state); 952 void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state); 953 void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state); 954 void vc4_hvs_dump_state(struct vc4_hvs *hvs); 955 void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel); 956 void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel); 957 int vc4_hvs_debugfs_init(struct drm_minor *minor); 958 959 /* vc4_kms.c */ 960 int vc4_kms_load(struct drm_device *dev); 961 962 /* vc4_plane.c */ 963 struct drm_plane *vc4_plane_init(struct drm_device *dev, 964 enum drm_plane_type type, 965 uint32_t possible_crtcs); 966 int vc4_plane_create_additional_planes(struct drm_device *dev); 967 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); 968 u32 vc4_plane_dlist_size(const struct drm_plane_state *state); 969 void vc4_plane_async_set_fb(struct drm_plane *plane, 970 struct drm_framebuffer *fb); 971 972 /* vc4_v3d.c */ 973 extern struct platform_driver vc4_v3d_driver; 974 extern const struct of_device_id vc4_v3d_dt_match[]; 975 int vc4_v3d_get_bin_slot(struct vc4_dev *vc4); 976 int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used); 977 void vc4_v3d_bin_bo_put(struct vc4_dev *vc4); 978 int vc4_v3d_pm_get(struct vc4_dev *vc4); 979 void vc4_v3d_pm_put(struct vc4_dev *vc4); 980 int vc4_v3d_debugfs_init(struct drm_minor *minor); 981 982 /* vc4_validate.c */ 983 int 984 vc4_validate_bin_cl(struct drm_device *dev, 985 void *validated, 986 void *unvalidated, 987 struct vc4_exec_info *exec); 988 989 int 990 vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec); 991 992 struct drm_gem_dma_object *vc4_use_bo(struct vc4_exec_info *exec, 993 uint32_t hindex); 994 995 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec); 996 997 bool vc4_check_tex_size(struct vc4_exec_info *exec, 998 struct drm_gem_dma_object *fbo, 999 uint32_t offset, uint8_t tiling_format, 1000 uint32_t width, uint32_t height, uint8_t cpp); 1001 1002 /* vc4_validate_shader.c */ 1003 struct vc4_validated_shader_info * 1004 vc4_validate_shader(struct drm_gem_dma_object *shader_obj); 1005 1006 /* vc4_perfmon.c */ 1007 void vc4_perfmon_get(struct vc4_perfmon *perfmon); 1008 void vc4_perfmon_put(struct vc4_perfmon *perfmon); 1009 void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon); 1010 void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon, 1011 bool capture); 1012 struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id); 1013 void vc4_perfmon_open_file(struct vc4_file *vc4file); 1014 void vc4_perfmon_close_file(struct vc4_file *vc4file); 1015 int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, 1016 struct drm_file *file_priv); 1017 int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data, 1018 struct drm_file *file_priv); 1019 int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data, 1020 struct drm_file *file_priv); 1021 1022 #endif /* _VC4_DRV_H_ */ 1023