1 /* 2 * Copyright (C) 2015 Broadcom 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 #include "drmP.h" 10 #include "drm_gem_cma_helper.h" 11 12 struct vc4_dev { 13 struct drm_device *dev; 14 15 struct vc4_hdmi *hdmi; 16 struct vc4_hvs *hvs; 17 struct vc4_crtc *crtc[3]; 18 struct vc4_v3d *v3d; 19 20 struct drm_fbdev_cma *fbdev; 21 22 struct vc4_hang_state *hang_state; 23 24 /* The kernel-space BO cache. Tracks buffers that have been 25 * unreferenced by all other users (refcounts of 0!) but not 26 * yet freed, so we can do cheap allocations. 27 */ 28 struct vc4_bo_cache { 29 /* Array of list heads for entries in the BO cache, 30 * based on number of pages, so we can do O(1) lookups 31 * in the cache when allocating. 32 */ 33 struct list_head *size_list; 34 uint32_t size_list_size; 35 36 /* List of all BOs in the cache, ordered by age, so we 37 * can do O(1) lookups when trying to free old 38 * buffers. 39 */ 40 struct list_head time_list; 41 struct work_struct time_work; 42 struct timer_list time_timer; 43 } bo_cache; 44 45 struct vc4_bo_stats { 46 u32 num_allocated; 47 u32 size_allocated; 48 u32 num_cached; 49 u32 size_cached; 50 } bo_stats; 51 52 /* Protects bo_cache and the BO stats. */ 53 struct mutex bo_lock; 54 55 /* Sequence number for the last job queued in bin_job_list. 56 * Starts at 0 (no jobs emitted). 57 */ 58 uint64_t emit_seqno; 59 60 /* Sequence number for the last completed job on the GPU. 61 * Starts at 0 (no jobs completed). 62 */ 63 uint64_t finished_seqno; 64 65 /* List of all struct vc4_exec_info for jobs to be executed in 66 * the binner. The first job in the list is the one currently 67 * programmed into ct0ca for execution. 68 */ 69 struct list_head bin_job_list; 70 71 /* List of all struct vc4_exec_info for jobs that have 72 * completed binning and are ready for rendering. The first 73 * job in the list is the one currently programmed into ct1ca 74 * for execution. 75 */ 76 struct list_head render_job_list; 77 78 /* List of the finished vc4_exec_infos waiting to be freed by 79 * job_done_work. 80 */ 81 struct list_head job_done_list; 82 /* Spinlock used to synchronize the job_list and seqno 83 * accesses between the IRQ handler and GEM ioctls. 84 */ 85 spinlock_t job_lock; 86 wait_queue_head_t job_wait_queue; 87 struct work_struct job_done_work; 88 89 /* List of struct vc4_seqno_cb for callbacks to be made from a 90 * workqueue when the given seqno is passed. 91 */ 92 struct list_head seqno_cb_list; 93 94 /* The binner overflow memory that's currently set up in 95 * BPOA/BPOS registers. When overflow occurs and a new one is 96 * allocated, the previous one will be moved to 97 * vc4->current_exec's free list. 98 */ 99 struct vc4_bo *overflow_mem; 100 struct work_struct overflow_mem_work; 101 102 int power_refcount; 103 104 /* Mutex controlling the power refcount. */ 105 struct mutex power_lock; 106 107 struct { 108 struct timer_list timer; 109 struct work_struct reset_work; 110 } hangcheck; 111 112 struct semaphore async_modeset; 113 }; 114 115 static inline struct vc4_dev * 116 to_vc4_dev(struct drm_device *dev) 117 { 118 return (struct vc4_dev *)dev->dev_private; 119 } 120 121 struct vc4_bo { 122 struct drm_gem_cma_object base; 123 124 /* seqno of the last job to render to this BO. */ 125 uint64_t seqno; 126 127 /* List entry for the BO's position in either 128 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list 129 */ 130 struct list_head unref_head; 131 132 /* Time in jiffies when the BO was put in vc4->bo_cache. */ 133 unsigned long free_time; 134 135 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */ 136 struct list_head size_head; 137 138 /* Struct for shader validation state, if created by 139 * DRM_IOCTL_VC4_CREATE_SHADER_BO. 140 */ 141 struct vc4_validated_shader_info *validated_shader; 142 }; 143 144 static inline struct vc4_bo * 145 to_vc4_bo(struct drm_gem_object *bo) 146 { 147 return (struct vc4_bo *)bo; 148 } 149 150 struct vc4_seqno_cb { 151 struct work_struct work; 152 uint64_t seqno; 153 void (*func)(struct vc4_seqno_cb *cb); 154 }; 155 156 struct vc4_v3d { 157 struct vc4_dev *vc4; 158 struct platform_device *pdev; 159 void __iomem *regs; 160 }; 161 162 struct vc4_hvs { 163 struct platform_device *pdev; 164 void __iomem *regs; 165 u32 __iomem *dlist; 166 167 /* Memory manager for CRTCs to allocate space in the display 168 * list. Units are dwords. 169 */ 170 struct drm_mm dlist_mm; 171 /* Memory manager for the LBM memory used by HVS scaling. */ 172 struct drm_mm lbm_mm; 173 spinlock_t mm_lock; 174 175 struct drm_mm_node mitchell_netravali_filter; 176 }; 177 178 struct vc4_plane { 179 struct drm_plane base; 180 }; 181 182 static inline struct vc4_plane * 183 to_vc4_plane(struct drm_plane *plane) 184 { 185 return (struct vc4_plane *)plane; 186 } 187 188 enum vc4_encoder_type { 189 VC4_ENCODER_TYPE_HDMI, 190 VC4_ENCODER_TYPE_VEC, 191 VC4_ENCODER_TYPE_DSI0, 192 VC4_ENCODER_TYPE_DSI1, 193 VC4_ENCODER_TYPE_SMI, 194 VC4_ENCODER_TYPE_DPI, 195 }; 196 197 struct vc4_encoder { 198 struct drm_encoder base; 199 enum vc4_encoder_type type; 200 u32 clock_select; 201 }; 202 203 static inline struct vc4_encoder * 204 to_vc4_encoder(struct drm_encoder *encoder) 205 { 206 return container_of(encoder, struct vc4_encoder, base); 207 } 208 209 #define V3D_READ(offset) readl(vc4->v3d->regs + offset) 210 #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset) 211 #define HVS_READ(offset) readl(vc4->hvs->regs + offset) 212 #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset) 213 214 struct vc4_exec_info { 215 /* Sequence number for this bin/render job. */ 216 uint64_t seqno; 217 218 /* Last current addresses the hardware was processing when the 219 * hangcheck timer checked on us. 220 */ 221 uint32_t last_ct0ca, last_ct1ca; 222 223 /* Kernel-space copy of the ioctl arguments */ 224 struct drm_vc4_submit_cl *args; 225 226 /* This is the array of BOs that were looked up at the start of exec. 227 * Command validation will use indices into this array. 228 */ 229 struct drm_gem_cma_object **bo; 230 uint32_t bo_count; 231 232 /* Pointers for our position in vc4->job_list */ 233 struct list_head head; 234 235 /* List of other BOs used in the job that need to be released 236 * once the job is complete. 237 */ 238 struct list_head unref_list; 239 240 /* Current unvalidated indices into @bo loaded by the non-hardware 241 * VC4_PACKET_GEM_HANDLES. 242 */ 243 uint32_t bo_index[2]; 244 245 /* This is the BO where we store the validated command lists, shader 246 * records, and uniforms. 247 */ 248 struct drm_gem_cma_object *exec_bo; 249 250 /** 251 * This tracks the per-shader-record state (packet 64) that 252 * determines the length of the shader record and the offset 253 * it's expected to be found at. It gets read in from the 254 * command lists. 255 */ 256 struct vc4_shader_state { 257 uint32_t addr; 258 /* Maximum vertex index referenced by any primitive using this 259 * shader state. 260 */ 261 uint32_t max_index; 262 } *shader_state; 263 264 /** How many shader states the user declared they were using. */ 265 uint32_t shader_state_size; 266 /** How many shader state records the validator has seen. */ 267 uint32_t shader_state_count; 268 269 bool found_tile_binning_mode_config_packet; 270 bool found_start_tile_binning_packet; 271 bool found_increment_semaphore_packet; 272 bool found_flush; 273 uint8_t bin_tiles_x, bin_tiles_y; 274 struct drm_gem_cma_object *tile_bo; 275 uint32_t tile_alloc_offset; 276 277 /** 278 * Computed addresses pointing into exec_bo where we start the 279 * bin thread (ct0) and render thread (ct1). 280 */ 281 uint32_t ct0ca, ct0ea; 282 uint32_t ct1ca, ct1ea; 283 284 /* Pointer to the unvalidated bin CL (if present). */ 285 void *bin_u; 286 287 /* Pointers to the shader recs. These paddr gets incremented as CL 288 * packets are relocated in validate_gl_shader_state, and the vaddrs 289 * (u and v) get incremented and size decremented as the shader recs 290 * themselves are validated. 291 */ 292 void *shader_rec_u; 293 void *shader_rec_v; 294 uint32_t shader_rec_p; 295 uint32_t shader_rec_size; 296 297 /* Pointers to the uniform data. These pointers are incremented, and 298 * size decremented, as each batch of uniforms is uploaded. 299 */ 300 void *uniforms_u; 301 void *uniforms_v; 302 uint32_t uniforms_p; 303 uint32_t uniforms_size; 304 }; 305 306 static inline struct vc4_exec_info * 307 vc4_first_bin_job(struct vc4_dev *vc4) 308 { 309 if (list_empty(&vc4->bin_job_list)) 310 return NULL; 311 return list_first_entry(&vc4->bin_job_list, struct vc4_exec_info, head); 312 } 313 314 static inline struct vc4_exec_info * 315 vc4_first_render_job(struct vc4_dev *vc4) 316 { 317 if (list_empty(&vc4->render_job_list)) 318 return NULL; 319 return list_first_entry(&vc4->render_job_list, 320 struct vc4_exec_info, head); 321 } 322 323 /** 324 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture 325 * setup parameters. 326 * 327 * This will be used at draw time to relocate the reference to the texture 328 * contents in p0, and validate that the offset combined with 329 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO. 330 * Note that the hardware treats unprovided config parameters as 0, so not all 331 * of them need to be set up for every texure sample, and we'll store ~0 as 332 * the offset to mark the unused ones. 333 * 334 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit 335 * Setup") for definitions of the texture parameters. 336 */ 337 struct vc4_texture_sample_info { 338 bool is_direct; 339 uint32_t p_offset[4]; 340 }; 341 342 /** 343 * struct vc4_validated_shader_info - information about validated shaders that 344 * needs to be used from command list validation. 345 * 346 * For a given shader, each time a shader state record references it, we need 347 * to verify that the shader doesn't read more uniforms than the shader state 348 * record's uniform BO pointer can provide, and we need to apply relocations 349 * and validate the shader state record's uniforms that define the texture 350 * samples. 351 */ 352 struct vc4_validated_shader_info { 353 uint32_t uniforms_size; 354 uint32_t uniforms_src_size; 355 uint32_t num_texture_samples; 356 struct vc4_texture_sample_info *texture_samples; 357 }; 358 359 /** 360 * _wait_for - magic (register) wait macro 361 * 362 * Does the right thing for modeset paths when run under kdgb or similar atomic 363 * contexts. Note that it's important that we check the condition again after 364 * having timed out, since the timeout could be due to preemption or similar and 365 * we've never had a chance to check the condition before the timeout. 366 */ 367 #define _wait_for(COND, MS, W) ({ \ 368 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \ 369 int ret__ = 0; \ 370 while (!(COND)) { \ 371 if (time_after(jiffies, timeout__)) { \ 372 if (!(COND)) \ 373 ret__ = -ETIMEDOUT; \ 374 break; \ 375 } \ 376 if (W && drm_can_sleep()) { \ 377 msleep(W); \ 378 } else { \ 379 cpu_relax(); \ 380 } \ 381 } \ 382 ret__; \ 383 }) 384 385 #define wait_for(COND, MS) _wait_for(COND, MS, 1) 386 387 /* vc4_bo.c */ 388 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size); 389 void vc4_free_object(struct drm_gem_object *gem_obj); 390 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size, 391 bool from_cache); 392 int vc4_dumb_create(struct drm_file *file_priv, 393 struct drm_device *dev, 394 struct drm_mode_create_dumb *args); 395 struct dma_buf *vc4_prime_export(struct drm_device *dev, 396 struct drm_gem_object *obj, int flags); 397 int vc4_create_bo_ioctl(struct drm_device *dev, void *data, 398 struct drm_file *file_priv); 399 int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, 400 struct drm_file *file_priv); 401 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, 402 struct drm_file *file_priv); 403 int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, 404 struct drm_file *file_priv); 405 int vc4_mmap(struct file *filp, struct vm_area_struct *vma); 406 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 407 void *vc4_prime_vmap(struct drm_gem_object *obj); 408 void vc4_bo_cache_init(struct drm_device *dev); 409 void vc4_bo_cache_destroy(struct drm_device *dev); 410 int vc4_bo_stats_debugfs(struct seq_file *m, void *arg); 411 412 /* vc4_crtc.c */ 413 extern struct platform_driver vc4_crtc_driver; 414 int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id); 415 void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id); 416 int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg); 417 418 /* vc4_debugfs.c */ 419 int vc4_debugfs_init(struct drm_minor *minor); 420 void vc4_debugfs_cleanup(struct drm_minor *minor); 421 422 /* vc4_drv.c */ 423 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index); 424 425 /* vc4_gem.c */ 426 void vc4_gem_init(struct drm_device *dev); 427 void vc4_gem_destroy(struct drm_device *dev); 428 int vc4_submit_cl_ioctl(struct drm_device *dev, void *data, 429 struct drm_file *file_priv); 430 int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, 431 struct drm_file *file_priv); 432 int vc4_wait_bo_ioctl(struct drm_device *dev, void *data, 433 struct drm_file *file_priv); 434 void vc4_submit_next_bin_job(struct drm_device *dev); 435 void vc4_submit_next_render_job(struct drm_device *dev); 436 void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec); 437 int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, 438 uint64_t timeout_ns, bool interruptible); 439 void vc4_job_handle_completed(struct vc4_dev *vc4); 440 int vc4_queue_seqno_cb(struct drm_device *dev, 441 struct vc4_seqno_cb *cb, uint64_t seqno, 442 void (*func)(struct vc4_seqno_cb *cb)); 443 444 /* vc4_hdmi.c */ 445 extern struct platform_driver vc4_hdmi_driver; 446 int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused); 447 448 /* vc4_irq.c */ 449 irqreturn_t vc4_irq(int irq, void *arg); 450 void vc4_irq_preinstall(struct drm_device *dev); 451 int vc4_irq_postinstall(struct drm_device *dev); 452 void vc4_irq_uninstall(struct drm_device *dev); 453 void vc4_irq_reset(struct drm_device *dev); 454 455 /* vc4_hvs.c */ 456 extern struct platform_driver vc4_hvs_driver; 457 void vc4_hvs_dump_state(struct drm_device *dev); 458 int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused); 459 460 /* vc4_kms.c */ 461 int vc4_kms_load(struct drm_device *dev); 462 463 /* vc4_plane.c */ 464 struct drm_plane *vc4_plane_init(struct drm_device *dev, 465 enum drm_plane_type type); 466 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); 467 u32 vc4_plane_dlist_size(struct drm_plane_state *state); 468 void vc4_plane_async_set_fb(struct drm_plane *plane, 469 struct drm_framebuffer *fb); 470 471 /* vc4_v3d.c */ 472 extern struct platform_driver vc4_v3d_driver; 473 int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused); 474 int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused); 475 476 /* vc4_validate.c */ 477 int 478 vc4_validate_bin_cl(struct drm_device *dev, 479 void *validated, 480 void *unvalidated, 481 struct vc4_exec_info *exec); 482 483 int 484 vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec); 485 486 struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec, 487 uint32_t hindex); 488 489 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec); 490 491 bool vc4_check_tex_size(struct vc4_exec_info *exec, 492 struct drm_gem_cma_object *fbo, 493 uint32_t offset, uint8_t tiling_format, 494 uint32_t width, uint32_t height, uint8_t cpp); 495 496 /* vc4_validate_shader.c */ 497 struct vc4_validated_shader_info * 498 vc4_validate_shader(struct drm_gem_cma_object *shader_obj); 499