1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #ifndef __AMDGPU_H__ 29 #define __AMDGPU_H__ 30 31 #include <linux/atomic.h> 32 #include <linux/wait.h> 33 #include <linux/list.h> 34 #include <linux/kref.h> 35 #include <linux/interval_tree.h> 36 #include <linux/hashtable.h> 37 #include <linux/fence.h> 38 39 #include <ttm/ttm_bo_api.h> 40 #include <ttm/ttm_bo_driver.h> 41 #include <ttm/ttm_placement.h> 42 #include <ttm/ttm_module.h> 43 #include <ttm/ttm_execbuf_util.h> 44 45 #include <drm/drmP.h> 46 #include <drm/drm_gem.h> 47 #include <drm/amdgpu_drm.h> 48 49 #include "amd_shared.h" 50 #include "amdgpu_mode.h" 51 #include "amdgpu_ih.h" 52 #include "amdgpu_irq.h" 53 #include "amdgpu_ucode.h" 54 #include "amdgpu_gds.h" 55 #include "amd_powerplay.h" 56 #include "amdgpu_acp.h" 57 58 #include "gpu_scheduler.h" 59 60 /* 61 * Modules parameters. 62 */ 63 extern int amdgpu_modeset; 64 extern int amdgpu_vram_limit; 65 extern int amdgpu_gart_size; 66 extern int amdgpu_benchmarking; 67 extern int amdgpu_testing; 68 extern int amdgpu_audio; 69 extern int amdgpu_disp_priority; 70 extern int amdgpu_hw_i2c; 71 extern int amdgpu_pcie_gen2; 72 extern int amdgpu_msi; 73 extern int amdgpu_lockup_timeout; 74 extern int amdgpu_dpm; 75 extern int amdgpu_smc_load_fw; 76 extern int amdgpu_aspm; 77 extern int amdgpu_runtime_pm; 78 extern unsigned amdgpu_ip_block_mask; 79 extern int amdgpu_bapm; 80 extern int amdgpu_deep_color; 81 extern int amdgpu_vm_size; 82 extern int amdgpu_vm_block_size; 83 extern int amdgpu_vm_fault_stop; 84 extern int amdgpu_vm_debug; 85 extern int amdgpu_sched_jobs; 86 extern int amdgpu_sched_hw_submission; 87 extern int amdgpu_powerplay; 88 extern int amdgpu_powercontainment; 89 extern unsigned amdgpu_pcie_gen_cap; 90 extern unsigned amdgpu_pcie_lane_cap; 91 extern unsigned amdgpu_cg_mask; 92 extern unsigned amdgpu_pg_mask; 93 extern char *amdgpu_disable_cu; 94 95 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 96 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 97 #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) 98 /* AMDGPU_IB_POOL_SIZE must be a power of 2 */ 99 #define AMDGPU_IB_POOL_SIZE 16 100 #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32 101 #define AMDGPUFB_CONN_LIMIT 4 102 #define AMDGPU_BIOS_NUM_SCRATCH 8 103 104 /* max number of rings */ 105 #define AMDGPU_MAX_RINGS 16 106 #define AMDGPU_MAX_GFX_RINGS 1 107 #define AMDGPU_MAX_COMPUTE_RINGS 8 108 #define AMDGPU_MAX_VCE_RINGS 2 109 110 /* max number of IP instances */ 111 #define AMDGPU_MAX_SDMA_INSTANCES 2 112 113 /* hardcode that limit for now */ 114 #define AMDGPU_VA_RESERVED_SIZE (8 << 20) 115 116 /* hard reset data */ 117 #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b 118 119 /* reset flags */ 120 #define AMDGPU_RESET_GFX (1 << 0) 121 #define AMDGPU_RESET_COMPUTE (1 << 1) 122 #define AMDGPU_RESET_DMA (1 << 2) 123 #define AMDGPU_RESET_CP (1 << 3) 124 #define AMDGPU_RESET_GRBM (1 << 4) 125 #define AMDGPU_RESET_DMA1 (1 << 5) 126 #define AMDGPU_RESET_RLC (1 << 6) 127 #define AMDGPU_RESET_SEM (1 << 7) 128 #define AMDGPU_RESET_IH (1 << 8) 129 #define AMDGPU_RESET_VMC (1 << 9) 130 #define AMDGPU_RESET_MC (1 << 10) 131 #define AMDGPU_RESET_DISPLAY (1 << 11) 132 #define AMDGPU_RESET_UVD (1 << 12) 133 #define AMDGPU_RESET_VCE (1 << 13) 134 #define AMDGPU_RESET_VCE1 (1 << 14) 135 136 /* GFX current status */ 137 #define AMDGPU_GFX_NORMAL_MODE 0x00000000L 138 #define AMDGPU_GFX_SAFE_MODE 0x00000001L 139 #define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L 140 #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L 141 #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L 142 143 /* max cursor sizes (in pixels) */ 144 #define CIK_CURSOR_WIDTH 128 145 #define CIK_CURSOR_HEIGHT 128 146 147 struct amdgpu_device; 148 struct amdgpu_ib; 149 struct amdgpu_vm; 150 struct amdgpu_ring; 151 struct amdgpu_cs_parser; 152 struct amdgpu_job; 153 struct amdgpu_irq_src; 154 struct amdgpu_fpriv; 155 156 enum amdgpu_cp_irq { 157 AMDGPU_CP_IRQ_GFX_EOP = 0, 158 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP, 159 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP, 160 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP, 161 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP, 162 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP, 163 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP, 164 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP, 165 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP, 166 167 AMDGPU_CP_IRQ_LAST 168 }; 169 170 enum amdgpu_sdma_irq { 171 AMDGPU_SDMA_IRQ_TRAP0 = 0, 172 AMDGPU_SDMA_IRQ_TRAP1, 173 174 AMDGPU_SDMA_IRQ_LAST 175 }; 176 177 enum amdgpu_thermal_irq { 178 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0, 179 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW, 180 181 AMDGPU_THERMAL_IRQ_LAST 182 }; 183 184 int amdgpu_set_clockgating_state(struct amdgpu_device *adev, 185 enum amd_ip_block_type block_type, 186 enum amd_clockgating_state state); 187 int amdgpu_set_powergating_state(struct amdgpu_device *adev, 188 enum amd_ip_block_type block_type, 189 enum amd_powergating_state state); 190 int amdgpu_wait_for_idle(struct amdgpu_device *adev, 191 enum amd_ip_block_type block_type); 192 bool amdgpu_is_idle(struct amdgpu_device *adev, 193 enum amd_ip_block_type block_type); 194 195 struct amdgpu_ip_block_version { 196 enum amd_ip_block_type type; 197 u32 major; 198 u32 minor; 199 u32 rev; 200 const struct amd_ip_funcs *funcs; 201 }; 202 203 int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, 204 enum amd_ip_block_type type, 205 u32 major, u32 minor); 206 207 const struct amdgpu_ip_block_version * amdgpu_get_ip_block( 208 struct amdgpu_device *adev, 209 enum amd_ip_block_type type); 210 211 /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */ 212 struct amdgpu_buffer_funcs { 213 /* maximum bytes in a single operation */ 214 uint32_t copy_max_bytes; 215 216 /* number of dw to reserve per operation */ 217 unsigned copy_num_dw; 218 219 /* used for buffer migration */ 220 void (*emit_copy_buffer)(struct amdgpu_ib *ib, 221 /* src addr in bytes */ 222 uint64_t src_offset, 223 /* dst addr in bytes */ 224 uint64_t dst_offset, 225 /* number of byte to transfer */ 226 uint32_t byte_count); 227 228 /* maximum bytes in a single operation */ 229 uint32_t fill_max_bytes; 230 231 /* number of dw to reserve per operation */ 232 unsigned fill_num_dw; 233 234 /* used for buffer clearing */ 235 void (*emit_fill_buffer)(struct amdgpu_ib *ib, 236 /* value to write to memory */ 237 uint32_t src_data, 238 /* dst addr in bytes */ 239 uint64_t dst_offset, 240 /* number of byte to fill */ 241 uint32_t byte_count); 242 }; 243 244 /* provided by hw blocks that can write ptes, e.g., sdma */ 245 struct amdgpu_vm_pte_funcs { 246 /* copy pte entries from GART */ 247 void (*copy_pte)(struct amdgpu_ib *ib, 248 uint64_t pe, uint64_t src, 249 unsigned count); 250 /* write pte one entry at a time with addr mapping */ 251 void (*write_pte)(struct amdgpu_ib *ib, 252 const dma_addr_t *pages_addr, uint64_t pe, 253 uint64_t addr, unsigned count, 254 uint32_t incr, uint32_t flags); 255 /* for linear pte/pde updates without addr mapping */ 256 void (*set_pte_pde)(struct amdgpu_ib *ib, 257 uint64_t pe, 258 uint64_t addr, unsigned count, 259 uint32_t incr, uint32_t flags); 260 }; 261 262 /* provided by the gmc block */ 263 struct amdgpu_gart_funcs { 264 /* flush the vm tlb via mmio */ 265 void (*flush_gpu_tlb)(struct amdgpu_device *adev, 266 uint32_t vmid); 267 /* write pte/pde updates using the cpu */ 268 int (*set_pte_pde)(struct amdgpu_device *adev, 269 void *cpu_pt_addr, /* cpu addr of page table */ 270 uint32_t gpu_page_idx, /* pte/pde to update */ 271 uint64_t addr, /* addr to write into pte/pde */ 272 uint32_t flags); /* access flags */ 273 }; 274 275 /* provided by the ih block */ 276 struct amdgpu_ih_funcs { 277 /* ring read/write ptr handling, called from interrupt context */ 278 u32 (*get_wptr)(struct amdgpu_device *adev); 279 void (*decode_iv)(struct amdgpu_device *adev, 280 struct amdgpu_iv_entry *entry); 281 void (*set_rptr)(struct amdgpu_device *adev); 282 }; 283 284 /* provided by hw blocks that expose a ring buffer for commands */ 285 struct amdgpu_ring_funcs { 286 /* ring read/write ptr handling */ 287 u32 (*get_rptr)(struct amdgpu_ring *ring); 288 u32 (*get_wptr)(struct amdgpu_ring *ring); 289 void (*set_wptr)(struct amdgpu_ring *ring); 290 /* validating and patching of IBs */ 291 int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx); 292 /* command emit functions */ 293 void (*emit_ib)(struct amdgpu_ring *ring, 294 struct amdgpu_ib *ib, 295 unsigned vm_id, bool ctx_switch); 296 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, 297 uint64_t seq, unsigned flags); 298 void (*emit_pipeline_sync)(struct amdgpu_ring *ring); 299 void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, 300 uint64_t pd_addr); 301 void (*emit_hdp_flush)(struct amdgpu_ring *ring); 302 void (*emit_hdp_invalidate)(struct amdgpu_ring *ring); 303 void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, 304 uint32_t gds_base, uint32_t gds_size, 305 uint32_t gws_base, uint32_t gws_size, 306 uint32_t oa_base, uint32_t oa_size); 307 /* testing functions */ 308 int (*test_ring)(struct amdgpu_ring *ring); 309 int (*test_ib)(struct amdgpu_ring *ring, long timeout); 310 /* insert NOP packets */ 311 void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); 312 /* pad the indirect buffer to the necessary number of dw */ 313 void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); 314 unsigned (*init_cond_exec)(struct amdgpu_ring *ring); 315 void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset); 316 /* note usage for clock and power gating */ 317 void (*begin_use)(struct amdgpu_ring *ring); 318 void (*end_use)(struct amdgpu_ring *ring); 319 }; 320 321 /* 322 * BIOS. 323 */ 324 bool amdgpu_get_bios(struct amdgpu_device *adev); 325 bool amdgpu_read_bios(struct amdgpu_device *adev); 326 327 /* 328 * Dummy page 329 */ 330 struct amdgpu_dummy_page { 331 struct page *page; 332 dma_addr_t addr; 333 }; 334 int amdgpu_dummy_page_init(struct amdgpu_device *adev); 335 void amdgpu_dummy_page_fini(struct amdgpu_device *adev); 336 337 338 /* 339 * Clocks 340 */ 341 342 #define AMDGPU_MAX_PPLL 3 343 344 struct amdgpu_clock { 345 struct amdgpu_pll ppll[AMDGPU_MAX_PPLL]; 346 struct amdgpu_pll spll; 347 struct amdgpu_pll mpll; 348 /* 10 Khz units */ 349 uint32_t default_mclk; 350 uint32_t default_sclk; 351 uint32_t default_dispclk; 352 uint32_t current_dispclk; 353 uint32_t dp_extclk; 354 uint32_t max_pixel_clock; 355 }; 356 357 /* 358 * Fences. 359 */ 360 struct amdgpu_fence_driver { 361 uint64_t gpu_addr; 362 volatile uint32_t *cpu_addr; 363 /* sync_seq is protected by ring emission lock */ 364 uint32_t sync_seq; 365 atomic_t last_seq; 366 bool initialized; 367 struct amdgpu_irq_src *irq_src; 368 unsigned irq_type; 369 struct timer_list fallback_timer; 370 unsigned num_fences_mask; 371 spinlock_t lock; 372 struct fence **fences; 373 }; 374 375 /* some special values for the owner field */ 376 #define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul) 377 #define AMDGPU_FENCE_OWNER_VM ((void*)1ul) 378 379 #define AMDGPU_FENCE_FLAG_64BIT (1 << 0) 380 #define AMDGPU_FENCE_FLAG_INT (1 << 1) 381 382 int amdgpu_fence_driver_init(struct amdgpu_device *adev); 383 void amdgpu_fence_driver_fini(struct amdgpu_device *adev); 384 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); 385 386 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, 387 unsigned num_hw_submission); 388 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 389 struct amdgpu_irq_src *irq_src, 390 unsigned irq_type); 391 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); 392 void amdgpu_fence_driver_resume(struct amdgpu_device *adev); 393 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence); 394 void amdgpu_fence_process(struct amdgpu_ring *ring); 395 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); 396 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); 397 398 /* 399 * TTM. 400 */ 401 402 #define AMDGPU_TTM_LRU_SIZE 20 403 404 struct amdgpu_mman_lru { 405 struct list_head *lru[TTM_NUM_MEM_TYPES]; 406 struct list_head *swap_lru; 407 }; 408 409 struct amdgpu_mman { 410 struct ttm_bo_global_ref bo_global_ref; 411 struct drm_global_reference mem_global_ref; 412 struct ttm_bo_device bdev; 413 bool mem_global_referenced; 414 bool initialized; 415 416 #if defined(CONFIG_DEBUG_FS) 417 struct dentry *vram; 418 struct dentry *gtt; 419 #endif 420 421 /* buffer handling */ 422 const struct amdgpu_buffer_funcs *buffer_funcs; 423 struct amdgpu_ring *buffer_funcs_ring; 424 /* Scheduler entity for buffer moves */ 425 struct amd_sched_entity entity; 426 427 /* custom LRU management */ 428 struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; 429 }; 430 431 int amdgpu_copy_buffer(struct amdgpu_ring *ring, 432 uint64_t src_offset, 433 uint64_t dst_offset, 434 uint32_t byte_count, 435 struct reservation_object *resv, 436 struct fence **fence); 437 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); 438 439 struct amdgpu_bo_list_entry { 440 struct amdgpu_bo *robj; 441 struct ttm_validate_buffer tv; 442 struct amdgpu_bo_va *bo_va; 443 uint32_t priority; 444 struct page **user_pages; 445 int user_invalidated; 446 }; 447 448 struct amdgpu_bo_va_mapping { 449 struct list_head list; 450 struct interval_tree_node it; 451 uint64_t offset; 452 uint32_t flags; 453 }; 454 455 /* bo virtual addresses in a specific vm */ 456 struct amdgpu_bo_va { 457 /* protected by bo being reserved */ 458 struct list_head bo_list; 459 struct fence *last_pt_update; 460 unsigned ref_count; 461 462 /* protected by vm mutex and spinlock */ 463 struct list_head vm_status; 464 465 /* mappings for this bo_va */ 466 struct list_head invalids; 467 struct list_head valids; 468 469 /* constant after initialization */ 470 struct amdgpu_vm *vm; 471 struct amdgpu_bo *bo; 472 }; 473 474 #define AMDGPU_GEM_DOMAIN_MAX 0x3 475 476 struct amdgpu_bo { 477 /* Protected by gem.mutex */ 478 struct list_head list; 479 /* Protected by tbo.reserved */ 480 u32 prefered_domains; 481 u32 allowed_domains; 482 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; 483 struct ttm_placement placement; 484 struct ttm_buffer_object tbo; 485 struct ttm_bo_kmap_obj kmap; 486 u64 flags; 487 unsigned pin_count; 488 void *kptr; 489 u64 tiling_flags; 490 u64 metadata_flags; 491 void *metadata; 492 u32 metadata_size; 493 /* list of all virtual address to which this bo 494 * is associated to 495 */ 496 struct list_head va; 497 /* Constant after initialization */ 498 struct amdgpu_device *adev; 499 struct drm_gem_object gem_base; 500 struct amdgpu_bo *parent; 501 502 struct ttm_bo_kmap_obj dma_buf_vmap; 503 struct amdgpu_mn *mn; 504 struct list_head mn_list; 505 }; 506 #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) 507 508 void amdgpu_gem_object_free(struct drm_gem_object *obj); 509 int amdgpu_gem_object_open(struct drm_gem_object *obj, 510 struct drm_file *file_priv); 511 void amdgpu_gem_object_close(struct drm_gem_object *obj, 512 struct drm_file *file_priv); 513 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns); 514 struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); 515 struct drm_gem_object * 516 amdgpu_gem_prime_import_sg_table(struct drm_device *dev, 517 struct dma_buf_attachment *attach, 518 struct sg_table *sg); 519 struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, 520 struct drm_gem_object *gobj, 521 int flags); 522 int amdgpu_gem_prime_pin(struct drm_gem_object *obj); 523 void amdgpu_gem_prime_unpin(struct drm_gem_object *obj); 524 struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); 525 void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); 526 void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 527 int amdgpu_gem_debugfs_init(struct amdgpu_device *adev); 528 529 /* sub-allocation manager, it has to be protected by another lock. 530 * By conception this is an helper for other part of the driver 531 * like the indirect buffer or semaphore, which both have their 532 * locking. 533 * 534 * Principe is simple, we keep a list of sub allocation in offset 535 * order (first entry has offset == 0, last entry has the highest 536 * offset). 537 * 538 * When allocating new object we first check if there is room at 539 * the end total_size - (last_object_offset + last_object_size) >= 540 * alloc_size. If so we allocate new object there. 541 * 542 * When there is not enough room at the end, we start waiting for 543 * each sub object until we reach object_offset+object_size >= 544 * alloc_size, this object then become the sub object we return. 545 * 546 * Alignment can't be bigger than page size. 547 * 548 * Hole are not considered for allocation to keep things simple. 549 * Assumption is that there won't be hole (all object on same 550 * alignment). 551 */ 552 553 #define AMDGPU_SA_NUM_FENCE_LISTS 32 554 555 struct amdgpu_sa_manager { 556 wait_queue_head_t wq; 557 struct amdgpu_bo *bo; 558 struct list_head *hole; 559 struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS]; 560 struct list_head olist; 561 unsigned size; 562 uint64_t gpu_addr; 563 void *cpu_ptr; 564 uint32_t domain; 565 uint32_t align; 566 }; 567 568 /* sub-allocation buffer */ 569 struct amdgpu_sa_bo { 570 struct list_head olist; 571 struct list_head flist; 572 struct amdgpu_sa_manager *manager; 573 unsigned soffset; 574 unsigned eoffset; 575 struct fence *fence; 576 }; 577 578 /* 579 * GEM objects. 580 */ 581 void amdgpu_gem_force_release(struct amdgpu_device *adev); 582 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 583 int alignment, u32 initial_domain, 584 u64 flags, bool kernel, 585 struct drm_gem_object **obj); 586 587 int amdgpu_mode_dumb_create(struct drm_file *file_priv, 588 struct drm_device *dev, 589 struct drm_mode_create_dumb *args); 590 int amdgpu_mode_dumb_mmap(struct drm_file *filp, 591 struct drm_device *dev, 592 uint32_t handle, uint64_t *offset_p); 593 /* 594 * Synchronization 595 */ 596 struct amdgpu_sync { 597 DECLARE_HASHTABLE(fences, 4); 598 struct fence *last_vm_update; 599 }; 600 601 void amdgpu_sync_create(struct amdgpu_sync *sync); 602 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, 603 struct fence *f); 604 int amdgpu_sync_resv(struct amdgpu_device *adev, 605 struct amdgpu_sync *sync, 606 struct reservation_object *resv, 607 void *owner); 608 struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, 609 struct amdgpu_ring *ring); 610 struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); 611 void amdgpu_sync_free(struct amdgpu_sync *sync); 612 int amdgpu_sync_init(void); 613 void amdgpu_sync_fini(void); 614 int amdgpu_fence_slab_init(void); 615 void amdgpu_fence_slab_fini(void); 616 617 /* 618 * GART structures, functions & helpers 619 */ 620 struct amdgpu_mc; 621 622 #define AMDGPU_GPU_PAGE_SIZE 4096 623 #define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1) 624 #define AMDGPU_GPU_PAGE_SHIFT 12 625 #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK) 626 627 struct amdgpu_gart { 628 dma_addr_t table_addr; 629 struct amdgpu_bo *robj; 630 void *ptr; 631 unsigned num_gpu_pages; 632 unsigned num_cpu_pages; 633 unsigned table_size; 634 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS 635 struct page **pages; 636 #endif 637 bool ready; 638 const struct amdgpu_gart_funcs *gart_funcs; 639 }; 640 641 int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); 642 void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); 643 int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); 644 void amdgpu_gart_table_vram_free(struct amdgpu_device *adev); 645 int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); 646 void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); 647 int amdgpu_gart_init(struct amdgpu_device *adev); 648 void amdgpu_gart_fini(struct amdgpu_device *adev); 649 void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, 650 int pages); 651 int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, 652 int pages, struct page **pagelist, 653 dma_addr_t *dma_addr, uint32_t flags); 654 655 /* 656 * GPU MC structures, functions & helpers 657 */ 658 struct amdgpu_mc { 659 resource_size_t aper_size; 660 resource_size_t aper_base; 661 resource_size_t agp_base; 662 /* for some chips with <= 32MB we need to lie 663 * about vram size near mc fb location */ 664 u64 mc_vram_size; 665 u64 visible_vram_size; 666 u64 gtt_size; 667 u64 gtt_start; 668 u64 gtt_end; 669 u64 vram_start; 670 u64 vram_end; 671 unsigned vram_width; 672 u64 real_vram_size; 673 int vram_mtrr; 674 u64 gtt_base_align; 675 u64 mc_mask; 676 const struct firmware *fw; /* MC firmware */ 677 uint32_t fw_version; 678 struct amdgpu_irq_src vm_fault; 679 uint32_t vram_type; 680 }; 681 682 /* 683 * GPU doorbell structures, functions & helpers 684 */ 685 typedef enum _AMDGPU_DOORBELL_ASSIGNMENT 686 { 687 AMDGPU_DOORBELL_KIQ = 0x000, 688 AMDGPU_DOORBELL_HIQ = 0x001, 689 AMDGPU_DOORBELL_DIQ = 0x002, 690 AMDGPU_DOORBELL_MEC_RING0 = 0x010, 691 AMDGPU_DOORBELL_MEC_RING1 = 0x011, 692 AMDGPU_DOORBELL_MEC_RING2 = 0x012, 693 AMDGPU_DOORBELL_MEC_RING3 = 0x013, 694 AMDGPU_DOORBELL_MEC_RING4 = 0x014, 695 AMDGPU_DOORBELL_MEC_RING5 = 0x015, 696 AMDGPU_DOORBELL_MEC_RING6 = 0x016, 697 AMDGPU_DOORBELL_MEC_RING7 = 0x017, 698 AMDGPU_DOORBELL_GFX_RING0 = 0x020, 699 AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0, 700 AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1, 701 AMDGPU_DOORBELL_IH = 0x1E8, 702 AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF, 703 AMDGPU_DOORBELL_INVALID = 0xFFFF 704 } AMDGPU_DOORBELL_ASSIGNMENT; 705 706 struct amdgpu_doorbell { 707 /* doorbell mmio */ 708 resource_size_t base; 709 resource_size_t size; 710 u32 __iomem *ptr; 711 u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */ 712 }; 713 714 void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, 715 phys_addr_t *aperture_base, 716 size_t *aperture_size, 717 size_t *start_offset); 718 719 /* 720 * IRQS. 721 */ 722 723 struct amdgpu_flip_work { 724 struct work_struct flip_work; 725 struct work_struct unpin_work; 726 struct amdgpu_device *adev; 727 int crtc_id; 728 uint64_t base; 729 struct drm_pending_vblank_event *event; 730 struct amdgpu_bo *old_rbo; 731 struct fence *excl; 732 unsigned shared_count; 733 struct fence **shared; 734 struct fence_cb cb; 735 bool async; 736 }; 737 738 739 /* 740 * CP & rings. 741 */ 742 743 struct amdgpu_ib { 744 struct amdgpu_sa_bo *sa_bo; 745 uint32_t length_dw; 746 uint64_t gpu_addr; 747 uint32_t *ptr; 748 uint32_t flags; 749 }; 750 751 enum amdgpu_ring_type { 752 AMDGPU_RING_TYPE_GFX, 753 AMDGPU_RING_TYPE_COMPUTE, 754 AMDGPU_RING_TYPE_SDMA, 755 AMDGPU_RING_TYPE_UVD, 756 AMDGPU_RING_TYPE_VCE 757 }; 758 759 extern const struct amd_sched_backend_ops amdgpu_sched_ops; 760 761 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, 762 struct amdgpu_job **job, struct amdgpu_vm *vm); 763 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, 764 struct amdgpu_job **job); 765 766 void amdgpu_job_free_resources(struct amdgpu_job *job); 767 void amdgpu_job_free(struct amdgpu_job *job); 768 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, 769 struct amd_sched_entity *entity, void *owner, 770 struct fence **f); 771 772 struct amdgpu_ring { 773 struct amdgpu_device *adev; 774 const struct amdgpu_ring_funcs *funcs; 775 struct amdgpu_fence_driver fence_drv; 776 struct amd_gpu_scheduler sched; 777 778 struct amdgpu_bo *ring_obj; 779 volatile uint32_t *ring; 780 unsigned rptr_offs; 781 unsigned wptr; 782 unsigned wptr_old; 783 unsigned ring_size; 784 unsigned max_dw; 785 int count_dw; 786 uint64_t gpu_addr; 787 uint32_t align_mask; 788 uint32_t ptr_mask; 789 bool ready; 790 u32 nop; 791 u32 idx; 792 u32 me; 793 u32 pipe; 794 u32 queue; 795 struct amdgpu_bo *mqd_obj; 796 u32 doorbell_index; 797 bool use_doorbell; 798 unsigned wptr_offs; 799 unsigned fence_offs; 800 uint64_t current_ctx; 801 enum amdgpu_ring_type type; 802 char name[16]; 803 unsigned cond_exe_offs; 804 u64 cond_exe_gpu_addr; 805 volatile u32 *cond_exe_cpu_addr; 806 #if defined(CONFIG_DEBUG_FS) 807 struct dentry *ent; 808 #endif 809 }; 810 811 /* 812 * VM 813 */ 814 815 /* maximum number of VMIDs */ 816 #define AMDGPU_NUM_VM 16 817 818 /* number of entries in page table */ 819 #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) 820 821 /* PTBs (Page Table Blocks) need to be aligned to 32K */ 822 #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 823 #define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1) 824 #define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK) 825 826 #define AMDGPU_PTE_VALID (1 << 0) 827 #define AMDGPU_PTE_SYSTEM (1 << 1) 828 #define AMDGPU_PTE_SNOOPED (1 << 2) 829 830 /* VI only */ 831 #define AMDGPU_PTE_EXECUTABLE (1 << 4) 832 833 #define AMDGPU_PTE_READABLE (1 << 5) 834 #define AMDGPU_PTE_WRITEABLE (1 << 6) 835 836 /* PTE (Page Table Entry) fragment field for different page sizes */ 837 #define AMDGPU_PTE_FRAG_4KB (0 << 7) 838 #define AMDGPU_PTE_FRAG_64KB (4 << 7) 839 #define AMDGPU_LOG2_PAGES_PER_FRAG 4 840 841 /* How to programm VM fault handling */ 842 #define AMDGPU_VM_FAULT_STOP_NEVER 0 843 #define AMDGPU_VM_FAULT_STOP_FIRST 1 844 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 845 846 struct amdgpu_vm_pt { 847 struct amdgpu_bo_list_entry entry; 848 uint64_t addr; 849 }; 850 851 struct amdgpu_vm { 852 /* tree of virtual addresses mapped */ 853 struct rb_root va; 854 855 /* protecting invalidated */ 856 spinlock_t status_lock; 857 858 /* BOs moved, but not yet updated in the PT */ 859 struct list_head invalidated; 860 861 /* BOs cleared in the PT because of a move */ 862 struct list_head cleared; 863 864 /* BO mappings freed, but not yet updated in the PT */ 865 struct list_head freed; 866 867 /* contains the page directory */ 868 struct amdgpu_bo *page_directory; 869 unsigned max_pde_used; 870 struct fence *page_directory_fence; 871 uint64_t last_eviction_counter; 872 873 /* array of page tables, one for each page directory entry */ 874 struct amdgpu_vm_pt *page_tables; 875 876 /* for id and flush management per ring */ 877 struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS]; 878 879 /* protecting freed */ 880 spinlock_t freed_lock; 881 882 /* Scheduler entity for page table updates */ 883 struct amd_sched_entity entity; 884 885 /* client id */ 886 u64 client_id; 887 }; 888 889 struct amdgpu_vm_id { 890 struct list_head list; 891 struct fence *first; 892 struct amdgpu_sync active; 893 struct fence *last_flush; 894 atomic64_t owner; 895 896 uint64_t pd_gpu_addr; 897 /* last flushed PD/PT update */ 898 struct fence *flushed_updates; 899 900 uint32_t current_gpu_reset_count; 901 902 uint32_t gds_base; 903 uint32_t gds_size; 904 uint32_t gws_base; 905 uint32_t gws_size; 906 uint32_t oa_base; 907 uint32_t oa_size; 908 }; 909 910 struct amdgpu_vm_manager { 911 /* Handling of VMIDs */ 912 struct mutex lock; 913 unsigned num_ids; 914 struct list_head ids_lru; 915 struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; 916 917 /* Handling of VM fences */ 918 u64 fence_context; 919 unsigned seqno[AMDGPU_MAX_RINGS]; 920 921 uint32_t max_pfn; 922 /* vram base address for page table entry */ 923 u64 vram_base_offset; 924 /* is vm enabled? */ 925 bool enabled; 926 /* vm pte handling */ 927 const struct amdgpu_vm_pte_funcs *vm_pte_funcs; 928 struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; 929 unsigned vm_pte_num_rings; 930 atomic_t vm_pte_next_ring; 931 /* client id counter */ 932 atomic64_t client_counter; 933 }; 934 935 void amdgpu_vm_manager_init(struct amdgpu_device *adev); 936 void amdgpu_vm_manager_fini(struct amdgpu_device *adev); 937 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); 938 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); 939 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, 940 struct list_head *validated, 941 struct amdgpu_bo_list_entry *entry); 942 void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, 943 struct list_head *duplicates); 944 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, 945 struct amdgpu_vm *vm); 946 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 947 struct amdgpu_sync *sync, struct fence *fence, 948 struct amdgpu_job *job); 949 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); 950 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); 951 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); 952 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, 953 struct amdgpu_vm *vm); 954 int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 955 struct amdgpu_vm *vm); 956 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, 957 struct amdgpu_sync *sync); 958 int amdgpu_vm_bo_update(struct amdgpu_device *adev, 959 struct amdgpu_bo_va *bo_va, 960 struct ttm_mem_reg *mem); 961 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 962 struct amdgpu_bo *bo); 963 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 964 struct amdgpu_bo *bo); 965 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 966 struct amdgpu_vm *vm, 967 struct amdgpu_bo *bo); 968 int amdgpu_vm_bo_map(struct amdgpu_device *adev, 969 struct amdgpu_bo_va *bo_va, 970 uint64_t addr, uint64_t offset, 971 uint64_t size, uint32_t flags); 972 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 973 struct amdgpu_bo_va *bo_va, 974 uint64_t addr); 975 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 976 struct amdgpu_bo_va *bo_va); 977 978 /* 979 * context related structures 980 */ 981 982 struct amdgpu_ctx_ring { 983 uint64_t sequence; 984 struct fence **fences; 985 struct amd_sched_entity entity; 986 }; 987 988 struct amdgpu_ctx { 989 struct kref refcount; 990 struct amdgpu_device *adev; 991 unsigned reset_counter; 992 spinlock_t ring_lock; 993 struct fence **fences; 994 struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; 995 }; 996 997 struct amdgpu_ctx_mgr { 998 struct amdgpu_device *adev; 999 struct mutex lock; 1000 /* protected by lock */ 1001 struct idr ctx_handles; 1002 }; 1003 1004 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); 1005 int amdgpu_ctx_put(struct amdgpu_ctx *ctx); 1006 1007 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, 1008 struct fence *fence); 1009 struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, 1010 struct amdgpu_ring *ring, uint64_t seq); 1011 1012 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, 1013 struct drm_file *filp); 1014 1015 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); 1016 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); 1017 1018 /* 1019 * file private structure 1020 */ 1021 1022 struct amdgpu_fpriv { 1023 struct amdgpu_vm vm; 1024 struct mutex bo_list_lock; 1025 struct idr bo_list_handles; 1026 struct amdgpu_ctx_mgr ctx_mgr; 1027 }; 1028 1029 /* 1030 * residency list 1031 */ 1032 1033 struct amdgpu_bo_list { 1034 struct mutex lock; 1035 struct amdgpu_bo *gds_obj; 1036 struct amdgpu_bo *gws_obj; 1037 struct amdgpu_bo *oa_obj; 1038 unsigned first_userptr; 1039 unsigned num_entries; 1040 struct amdgpu_bo_list_entry *array; 1041 }; 1042 1043 struct amdgpu_bo_list * 1044 amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id); 1045 void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, 1046 struct list_head *validated); 1047 void amdgpu_bo_list_put(struct amdgpu_bo_list *list); 1048 void amdgpu_bo_list_free(struct amdgpu_bo_list *list); 1049 1050 /* 1051 * GFX stuff 1052 */ 1053 #include "clearstate_defs.h" 1054 1055 struct amdgpu_rlc_funcs { 1056 void (*enter_safe_mode)(struct amdgpu_device *adev); 1057 void (*exit_safe_mode)(struct amdgpu_device *adev); 1058 }; 1059 1060 struct amdgpu_rlc { 1061 /* for power gating */ 1062 struct amdgpu_bo *save_restore_obj; 1063 uint64_t save_restore_gpu_addr; 1064 volatile uint32_t *sr_ptr; 1065 const u32 *reg_list; 1066 u32 reg_list_size; 1067 /* for clear state */ 1068 struct amdgpu_bo *clear_state_obj; 1069 uint64_t clear_state_gpu_addr; 1070 volatile uint32_t *cs_ptr; 1071 const struct cs_section_def *cs_data; 1072 u32 clear_state_size; 1073 /* for cp tables */ 1074 struct amdgpu_bo *cp_table_obj; 1075 uint64_t cp_table_gpu_addr; 1076 volatile uint32_t *cp_table_ptr; 1077 u32 cp_table_size; 1078 1079 /* safe mode for updating CG/PG state */ 1080 bool in_safe_mode; 1081 const struct amdgpu_rlc_funcs *funcs; 1082 1083 /* for firmware data */ 1084 u32 save_and_restore_offset; 1085 u32 clear_state_descriptor_offset; 1086 u32 avail_scratch_ram_locations; 1087 u32 reg_restore_list_size; 1088 u32 reg_list_format_start; 1089 u32 reg_list_format_separate_start; 1090 u32 starting_offsets_start; 1091 u32 reg_list_format_size_bytes; 1092 u32 reg_list_size_bytes; 1093 1094 u32 *register_list_format; 1095 u32 *register_restore; 1096 }; 1097 1098 struct amdgpu_mec { 1099 struct amdgpu_bo *hpd_eop_obj; 1100 u64 hpd_eop_gpu_addr; 1101 u32 num_pipe; 1102 u32 num_mec; 1103 u32 num_queue; 1104 }; 1105 1106 /* 1107 * GPU scratch registers structures, functions & helpers 1108 */ 1109 struct amdgpu_scratch { 1110 unsigned num_reg; 1111 uint32_t reg_base; 1112 bool free[32]; 1113 uint32_t reg[32]; 1114 }; 1115 1116 /* 1117 * GFX configurations 1118 */ 1119 struct amdgpu_gca_config { 1120 unsigned max_shader_engines; 1121 unsigned max_tile_pipes; 1122 unsigned max_cu_per_sh; 1123 unsigned max_sh_per_se; 1124 unsigned max_backends_per_se; 1125 unsigned max_texture_channel_caches; 1126 unsigned max_gprs; 1127 unsigned max_gs_threads; 1128 unsigned max_hw_contexts; 1129 unsigned sc_prim_fifo_size_frontend; 1130 unsigned sc_prim_fifo_size_backend; 1131 unsigned sc_hiz_tile_fifo_size; 1132 unsigned sc_earlyz_tile_fifo_size; 1133 1134 unsigned num_tile_pipes; 1135 unsigned backend_enable_mask; 1136 unsigned mem_max_burst_length_bytes; 1137 unsigned mem_row_size_in_kb; 1138 unsigned shader_engine_tile_size; 1139 unsigned num_gpus; 1140 unsigned multi_gpu_tile_size; 1141 unsigned mc_arb_ramcfg; 1142 unsigned gb_addr_config; 1143 unsigned num_rbs; 1144 1145 uint32_t tile_mode_array[32]; 1146 uint32_t macrotile_mode_array[16]; 1147 }; 1148 1149 struct amdgpu_cu_info { 1150 uint32_t number; /* total active CU number */ 1151 uint32_t ao_cu_mask; 1152 uint32_t bitmap[4][4]; 1153 }; 1154 1155 struct amdgpu_gfx_funcs { 1156 /* get the gpu clock counter */ 1157 uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); 1158 void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); 1159 }; 1160 1161 struct amdgpu_gfx { 1162 struct mutex gpu_clock_mutex; 1163 struct amdgpu_gca_config config; 1164 struct amdgpu_rlc rlc; 1165 struct amdgpu_mec mec; 1166 struct amdgpu_scratch scratch; 1167 const struct firmware *me_fw; /* ME firmware */ 1168 uint32_t me_fw_version; 1169 const struct firmware *pfp_fw; /* PFP firmware */ 1170 uint32_t pfp_fw_version; 1171 const struct firmware *ce_fw; /* CE firmware */ 1172 uint32_t ce_fw_version; 1173 const struct firmware *rlc_fw; /* RLC firmware */ 1174 uint32_t rlc_fw_version; 1175 const struct firmware *mec_fw; /* MEC firmware */ 1176 uint32_t mec_fw_version; 1177 const struct firmware *mec2_fw; /* MEC2 firmware */ 1178 uint32_t mec2_fw_version; 1179 uint32_t me_feature_version; 1180 uint32_t ce_feature_version; 1181 uint32_t pfp_feature_version; 1182 uint32_t rlc_feature_version; 1183 uint32_t mec_feature_version; 1184 uint32_t mec2_feature_version; 1185 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; 1186 unsigned num_gfx_rings; 1187 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; 1188 unsigned num_compute_rings; 1189 struct amdgpu_irq_src eop_irq; 1190 struct amdgpu_irq_src priv_reg_irq; 1191 struct amdgpu_irq_src priv_inst_irq; 1192 /* gfx status */ 1193 uint32_t gfx_current_status; 1194 /* ce ram size*/ 1195 unsigned ce_ram_size; 1196 struct amdgpu_cu_info cu_info; 1197 const struct amdgpu_gfx_funcs *funcs; 1198 }; 1199 1200 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, 1201 unsigned size, struct amdgpu_ib *ib); 1202 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, 1203 struct fence *f); 1204 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, 1205 struct amdgpu_ib *ib, struct fence *last_vm_update, 1206 struct amdgpu_job *job, struct fence **f); 1207 int amdgpu_ib_pool_init(struct amdgpu_device *adev); 1208 void amdgpu_ib_pool_fini(struct amdgpu_device *adev); 1209 int amdgpu_ib_ring_tests(struct amdgpu_device *adev); 1210 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); 1211 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); 1212 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); 1213 void amdgpu_ring_commit(struct amdgpu_ring *ring); 1214 void amdgpu_ring_undo(struct amdgpu_ring *ring); 1215 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, 1216 unsigned ring_size, u32 nop, u32 align_mask, 1217 struct amdgpu_irq_src *irq_src, unsigned irq_type, 1218 enum amdgpu_ring_type ring_type); 1219 void amdgpu_ring_fini(struct amdgpu_ring *ring); 1220 1221 /* 1222 * CS. 1223 */ 1224 struct amdgpu_cs_chunk { 1225 uint32_t chunk_id; 1226 uint32_t length_dw; 1227 void *kdata; 1228 }; 1229 1230 struct amdgpu_cs_parser { 1231 struct amdgpu_device *adev; 1232 struct drm_file *filp; 1233 struct amdgpu_ctx *ctx; 1234 1235 /* chunks */ 1236 unsigned nchunks; 1237 struct amdgpu_cs_chunk *chunks; 1238 1239 /* scheduler job object */ 1240 struct amdgpu_job *job; 1241 1242 /* buffer objects */ 1243 struct ww_acquire_ctx ticket; 1244 struct amdgpu_bo_list *bo_list; 1245 struct amdgpu_bo_list_entry vm_pd; 1246 struct list_head validated; 1247 struct fence *fence; 1248 uint64_t bytes_moved_threshold; 1249 uint64_t bytes_moved; 1250 1251 /* user fence */ 1252 struct amdgpu_bo_list_entry uf_entry; 1253 }; 1254 1255 struct amdgpu_job { 1256 struct amd_sched_job base; 1257 struct amdgpu_device *adev; 1258 struct amdgpu_vm *vm; 1259 struct amdgpu_ring *ring; 1260 struct amdgpu_sync sync; 1261 struct amdgpu_ib *ibs; 1262 struct fence *fence; /* the hw fence */ 1263 uint32_t num_ibs; 1264 void *owner; 1265 uint64_t ctx; 1266 bool vm_needs_flush; 1267 unsigned vm_id; 1268 uint64_t vm_pd_addr; 1269 uint32_t gds_base, gds_size; 1270 uint32_t gws_base, gws_size; 1271 uint32_t oa_base, oa_size; 1272 1273 /* user fence handling */ 1274 uint64_t uf_addr; 1275 uint64_t uf_sequence; 1276 1277 }; 1278 #define to_amdgpu_job(sched_job) \ 1279 container_of((sched_job), struct amdgpu_job, base) 1280 1281 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, 1282 uint32_t ib_idx, int idx) 1283 { 1284 return p->job->ibs[ib_idx].ptr[idx]; 1285 } 1286 1287 static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p, 1288 uint32_t ib_idx, int idx, 1289 uint32_t value) 1290 { 1291 p->job->ibs[ib_idx].ptr[idx] = value; 1292 } 1293 1294 /* 1295 * Writeback 1296 */ 1297 #define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */ 1298 1299 struct amdgpu_wb { 1300 struct amdgpu_bo *wb_obj; 1301 volatile uint32_t *wb; 1302 uint64_t gpu_addr; 1303 u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */ 1304 unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)]; 1305 }; 1306 1307 int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); 1308 void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); 1309 1310 1311 1312 enum amdgpu_int_thermal_type { 1313 THERMAL_TYPE_NONE, 1314 THERMAL_TYPE_EXTERNAL, 1315 THERMAL_TYPE_EXTERNAL_GPIO, 1316 THERMAL_TYPE_RV6XX, 1317 THERMAL_TYPE_RV770, 1318 THERMAL_TYPE_ADT7473_WITH_INTERNAL, 1319 THERMAL_TYPE_EVERGREEN, 1320 THERMAL_TYPE_SUMO, 1321 THERMAL_TYPE_NI, 1322 THERMAL_TYPE_SI, 1323 THERMAL_TYPE_EMC2103_WITH_INTERNAL, 1324 THERMAL_TYPE_CI, 1325 THERMAL_TYPE_KV, 1326 }; 1327 1328 enum amdgpu_dpm_auto_throttle_src { 1329 AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, 1330 AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL 1331 }; 1332 1333 enum amdgpu_dpm_event_src { 1334 AMDGPU_DPM_EVENT_SRC_ANALOG = 0, 1335 AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1, 1336 AMDGPU_DPM_EVENT_SRC_DIGITAL = 2, 1337 AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, 1338 AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 1339 }; 1340 1341 #define AMDGPU_MAX_VCE_LEVELS 6 1342 1343 enum amdgpu_vce_level { 1344 AMDGPU_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */ 1345 AMDGPU_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */ 1346 AMDGPU_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */ 1347 AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */ 1348 AMDGPU_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */ 1349 AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */ 1350 }; 1351 1352 struct amdgpu_ps { 1353 u32 caps; /* vbios flags */ 1354 u32 class; /* vbios flags */ 1355 u32 class2; /* vbios flags */ 1356 /* UVD clocks */ 1357 u32 vclk; 1358 u32 dclk; 1359 /* VCE clocks */ 1360 u32 evclk; 1361 u32 ecclk; 1362 bool vce_active; 1363 enum amdgpu_vce_level vce_level; 1364 /* asic priv */ 1365 void *ps_priv; 1366 }; 1367 1368 struct amdgpu_dpm_thermal { 1369 /* thermal interrupt work */ 1370 struct work_struct work; 1371 /* low temperature threshold */ 1372 int min_temp; 1373 /* high temperature threshold */ 1374 int max_temp; 1375 /* was last interrupt low to high or high to low */ 1376 bool high_to_low; 1377 /* interrupt source */ 1378 struct amdgpu_irq_src irq; 1379 }; 1380 1381 enum amdgpu_clk_action 1382 { 1383 AMDGPU_SCLK_UP = 1, 1384 AMDGPU_SCLK_DOWN 1385 }; 1386 1387 struct amdgpu_blacklist_clocks 1388 { 1389 u32 sclk; 1390 u32 mclk; 1391 enum amdgpu_clk_action action; 1392 }; 1393 1394 struct amdgpu_clock_and_voltage_limits { 1395 u32 sclk; 1396 u32 mclk; 1397 u16 vddc; 1398 u16 vddci; 1399 }; 1400 1401 struct amdgpu_clock_array { 1402 u32 count; 1403 u32 *values; 1404 }; 1405 1406 struct amdgpu_clock_voltage_dependency_entry { 1407 u32 clk; 1408 u16 v; 1409 }; 1410 1411 struct amdgpu_clock_voltage_dependency_table { 1412 u32 count; 1413 struct amdgpu_clock_voltage_dependency_entry *entries; 1414 }; 1415 1416 union amdgpu_cac_leakage_entry { 1417 struct { 1418 u16 vddc; 1419 u32 leakage; 1420 }; 1421 struct { 1422 u16 vddc1; 1423 u16 vddc2; 1424 u16 vddc3; 1425 }; 1426 }; 1427 1428 struct amdgpu_cac_leakage_table { 1429 u32 count; 1430 union amdgpu_cac_leakage_entry *entries; 1431 }; 1432 1433 struct amdgpu_phase_shedding_limits_entry { 1434 u16 voltage; 1435 u32 sclk; 1436 u32 mclk; 1437 }; 1438 1439 struct amdgpu_phase_shedding_limits_table { 1440 u32 count; 1441 struct amdgpu_phase_shedding_limits_entry *entries; 1442 }; 1443 1444 struct amdgpu_uvd_clock_voltage_dependency_entry { 1445 u32 vclk; 1446 u32 dclk; 1447 u16 v; 1448 }; 1449 1450 struct amdgpu_uvd_clock_voltage_dependency_table { 1451 u8 count; 1452 struct amdgpu_uvd_clock_voltage_dependency_entry *entries; 1453 }; 1454 1455 struct amdgpu_vce_clock_voltage_dependency_entry { 1456 u32 ecclk; 1457 u32 evclk; 1458 u16 v; 1459 }; 1460 1461 struct amdgpu_vce_clock_voltage_dependency_table { 1462 u8 count; 1463 struct amdgpu_vce_clock_voltage_dependency_entry *entries; 1464 }; 1465 1466 struct amdgpu_ppm_table { 1467 u8 ppm_design; 1468 u16 cpu_core_number; 1469 u32 platform_tdp; 1470 u32 small_ac_platform_tdp; 1471 u32 platform_tdc; 1472 u32 small_ac_platform_tdc; 1473 u32 apu_tdp; 1474 u32 dgpu_tdp; 1475 u32 dgpu_ulv_power; 1476 u32 tj_max; 1477 }; 1478 1479 struct amdgpu_cac_tdp_table { 1480 u16 tdp; 1481 u16 configurable_tdp; 1482 u16 tdc; 1483 u16 battery_power_limit; 1484 u16 small_power_limit; 1485 u16 low_cac_leakage; 1486 u16 high_cac_leakage; 1487 u16 maximum_power_delivery_limit; 1488 }; 1489 1490 struct amdgpu_dpm_dynamic_state { 1491 struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk; 1492 struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk; 1493 struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk; 1494 struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk; 1495 struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk; 1496 struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table; 1497 struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table; 1498 struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table; 1499 struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table; 1500 struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk; 1501 struct amdgpu_clock_array valid_sclk_values; 1502 struct amdgpu_clock_array valid_mclk_values; 1503 struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc; 1504 struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac; 1505 u32 mclk_sclk_ratio; 1506 u32 sclk_mclk_delta; 1507 u16 vddc_vddci_delta; 1508 u16 min_vddc_for_pcie_gen2; 1509 struct amdgpu_cac_leakage_table cac_leakage_table; 1510 struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table; 1511 struct amdgpu_ppm_table *ppm_table; 1512 struct amdgpu_cac_tdp_table *cac_tdp_table; 1513 }; 1514 1515 struct amdgpu_dpm_fan { 1516 u16 t_min; 1517 u16 t_med; 1518 u16 t_high; 1519 u16 pwm_min; 1520 u16 pwm_med; 1521 u16 pwm_high; 1522 u8 t_hyst; 1523 u32 cycle_delay; 1524 u16 t_max; 1525 u8 control_mode; 1526 u16 default_max_fan_pwm; 1527 u16 default_fan_output_sensitivity; 1528 u16 fan_output_sensitivity; 1529 bool ucode_fan_control; 1530 }; 1531 1532 enum amdgpu_pcie_gen { 1533 AMDGPU_PCIE_GEN1 = 0, 1534 AMDGPU_PCIE_GEN2 = 1, 1535 AMDGPU_PCIE_GEN3 = 2, 1536 AMDGPU_PCIE_GEN_INVALID = 0xffff 1537 }; 1538 1539 enum amdgpu_dpm_forced_level { 1540 AMDGPU_DPM_FORCED_LEVEL_AUTO = 0, 1541 AMDGPU_DPM_FORCED_LEVEL_LOW = 1, 1542 AMDGPU_DPM_FORCED_LEVEL_HIGH = 2, 1543 AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3, 1544 }; 1545 1546 struct amdgpu_vce_state { 1547 /* vce clocks */ 1548 u32 evclk; 1549 u32 ecclk; 1550 /* gpu clocks */ 1551 u32 sclk; 1552 u32 mclk; 1553 u8 clk_idx; 1554 u8 pstate; 1555 }; 1556 1557 struct amdgpu_dpm_funcs { 1558 int (*get_temperature)(struct amdgpu_device *adev); 1559 int (*pre_set_power_state)(struct amdgpu_device *adev); 1560 int (*set_power_state)(struct amdgpu_device *adev); 1561 void (*post_set_power_state)(struct amdgpu_device *adev); 1562 void (*display_configuration_changed)(struct amdgpu_device *adev); 1563 u32 (*get_sclk)(struct amdgpu_device *adev, bool low); 1564 u32 (*get_mclk)(struct amdgpu_device *adev, bool low); 1565 void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps); 1566 void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m); 1567 int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level); 1568 bool (*vblank_too_short)(struct amdgpu_device *adev); 1569 void (*powergate_uvd)(struct amdgpu_device *adev, bool gate); 1570 void (*powergate_vce)(struct amdgpu_device *adev, bool gate); 1571 void (*enable_bapm)(struct amdgpu_device *adev, bool enable); 1572 void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode); 1573 u32 (*get_fan_control_mode)(struct amdgpu_device *adev); 1574 int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed); 1575 int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed); 1576 int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask); 1577 int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf); 1578 int (*get_sclk_od)(struct amdgpu_device *adev); 1579 int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value); 1580 int (*get_mclk_od)(struct amdgpu_device *adev); 1581 int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value); 1582 }; 1583 1584 struct amdgpu_dpm { 1585 struct amdgpu_ps *ps; 1586 /* number of valid power states */ 1587 int num_ps; 1588 /* current power state that is active */ 1589 struct amdgpu_ps *current_ps; 1590 /* requested power state */ 1591 struct amdgpu_ps *requested_ps; 1592 /* boot up power state */ 1593 struct amdgpu_ps *boot_ps; 1594 /* default uvd power state */ 1595 struct amdgpu_ps *uvd_ps; 1596 /* vce requirements */ 1597 struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS]; 1598 enum amdgpu_vce_level vce_level; 1599 enum amd_pm_state_type state; 1600 enum amd_pm_state_type user_state; 1601 u32 platform_caps; 1602 u32 voltage_response_time; 1603 u32 backbias_response_time; 1604 void *priv; 1605 u32 new_active_crtcs; 1606 int new_active_crtc_count; 1607 u32 current_active_crtcs; 1608 int current_active_crtc_count; 1609 struct amdgpu_dpm_dynamic_state dyn_state; 1610 struct amdgpu_dpm_fan fan; 1611 u32 tdp_limit; 1612 u32 near_tdp_limit; 1613 u32 near_tdp_limit_adjusted; 1614 u32 sq_ramping_threshold; 1615 u32 cac_leakage; 1616 u16 tdp_od_limit; 1617 u32 tdp_adjustment; 1618 u16 load_line_slope; 1619 bool power_control; 1620 bool ac_power; 1621 /* special states active */ 1622 bool thermal_active; 1623 bool uvd_active; 1624 bool vce_active; 1625 /* thermal handling */ 1626 struct amdgpu_dpm_thermal thermal; 1627 /* forced levels */ 1628 enum amdgpu_dpm_forced_level forced_level; 1629 }; 1630 1631 struct amdgpu_pm { 1632 struct mutex mutex; 1633 u32 current_sclk; 1634 u32 current_mclk; 1635 u32 default_sclk; 1636 u32 default_mclk; 1637 struct amdgpu_i2c_chan *i2c_bus; 1638 /* internal thermal controller on rv6xx+ */ 1639 enum amdgpu_int_thermal_type int_thermal_type; 1640 struct device *int_hwmon_dev; 1641 /* fan control parameters */ 1642 bool no_fan; 1643 u8 fan_pulses_per_revolution; 1644 u8 fan_min_rpm; 1645 u8 fan_max_rpm; 1646 /* dpm */ 1647 bool dpm_enabled; 1648 bool sysfs_initialized; 1649 struct amdgpu_dpm dpm; 1650 const struct firmware *fw; /* SMC firmware */ 1651 uint32_t fw_version; 1652 const struct amdgpu_dpm_funcs *funcs; 1653 uint32_t pcie_gen_mask; 1654 uint32_t pcie_mlw_mask; 1655 struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */ 1656 }; 1657 1658 void amdgpu_get_pcie_info(struct amdgpu_device *adev); 1659 1660 /* 1661 * UVD 1662 */ 1663 #define AMDGPU_DEFAULT_UVD_HANDLES 10 1664 #define AMDGPU_MAX_UVD_HANDLES 40 1665 #define AMDGPU_UVD_STACK_SIZE (200*1024) 1666 #define AMDGPU_UVD_HEAP_SIZE (256*1024) 1667 #define AMDGPU_UVD_SESSION_SIZE (50*1024) 1668 #define AMDGPU_UVD_FIRMWARE_OFFSET 256 1669 1670 struct amdgpu_uvd { 1671 struct amdgpu_bo *vcpu_bo; 1672 void *cpu_addr; 1673 uint64_t gpu_addr; 1674 unsigned fw_version; 1675 void *saved_bo; 1676 unsigned max_handles; 1677 atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; 1678 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; 1679 struct delayed_work idle_work; 1680 const struct firmware *fw; /* UVD firmware */ 1681 struct amdgpu_ring ring; 1682 struct amdgpu_irq_src irq; 1683 bool address_64_bit; 1684 bool use_ctx_buf; 1685 struct amd_sched_entity entity; 1686 }; 1687 1688 /* 1689 * VCE 1690 */ 1691 #define AMDGPU_MAX_VCE_HANDLES 16 1692 #define AMDGPU_VCE_FIRMWARE_OFFSET 256 1693 1694 #define AMDGPU_VCE_HARVEST_VCE0 (1 << 0) 1695 #define AMDGPU_VCE_HARVEST_VCE1 (1 << 1) 1696 1697 struct amdgpu_vce { 1698 struct amdgpu_bo *vcpu_bo; 1699 uint64_t gpu_addr; 1700 unsigned fw_version; 1701 unsigned fb_version; 1702 atomic_t handles[AMDGPU_MAX_VCE_HANDLES]; 1703 struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES]; 1704 uint32_t img_size[AMDGPU_MAX_VCE_HANDLES]; 1705 struct delayed_work idle_work; 1706 struct mutex idle_mutex; 1707 const struct firmware *fw; /* VCE firmware */ 1708 struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; 1709 struct amdgpu_irq_src irq; 1710 unsigned harvest_config; 1711 struct amd_sched_entity entity; 1712 }; 1713 1714 /* 1715 * SDMA 1716 */ 1717 struct amdgpu_sdma_instance { 1718 /* SDMA firmware */ 1719 const struct firmware *fw; 1720 uint32_t fw_version; 1721 uint32_t feature_version; 1722 1723 struct amdgpu_ring ring; 1724 bool burst_nop; 1725 }; 1726 1727 struct amdgpu_sdma { 1728 struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; 1729 struct amdgpu_irq_src trap_irq; 1730 struct amdgpu_irq_src illegal_inst_irq; 1731 int num_instances; 1732 }; 1733 1734 /* 1735 * Firmware 1736 */ 1737 struct amdgpu_firmware { 1738 struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM]; 1739 bool smu_load; 1740 struct amdgpu_bo *fw_buf; 1741 unsigned int fw_size; 1742 }; 1743 1744 /* 1745 * Benchmarking 1746 */ 1747 void amdgpu_benchmark(struct amdgpu_device *adev, int test_number); 1748 1749 1750 /* 1751 * Testing 1752 */ 1753 void amdgpu_test_moves(struct amdgpu_device *adev); 1754 void amdgpu_test_ring_sync(struct amdgpu_device *adev, 1755 struct amdgpu_ring *cpA, 1756 struct amdgpu_ring *cpB); 1757 void amdgpu_test_syncing(struct amdgpu_device *adev); 1758 1759 /* 1760 * MMU Notifier 1761 */ 1762 #if defined(CONFIG_MMU_NOTIFIER) 1763 int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); 1764 void amdgpu_mn_unregister(struct amdgpu_bo *bo); 1765 #else 1766 static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) 1767 { 1768 return -ENODEV; 1769 } 1770 static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {} 1771 #endif 1772 1773 /* 1774 * Debugfs 1775 */ 1776 struct amdgpu_debugfs { 1777 const struct drm_info_list *files; 1778 unsigned num_files; 1779 }; 1780 1781 int amdgpu_debugfs_add_files(struct amdgpu_device *adev, 1782 const struct drm_info_list *files, 1783 unsigned nfiles); 1784 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev); 1785 1786 #if defined(CONFIG_DEBUG_FS) 1787 int amdgpu_debugfs_init(struct drm_minor *minor); 1788 void amdgpu_debugfs_cleanup(struct drm_minor *minor); 1789 #endif 1790 1791 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); 1792 1793 /* 1794 * amdgpu smumgr functions 1795 */ 1796 struct amdgpu_smumgr_funcs { 1797 int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype); 1798 int (*request_smu_load_fw)(struct amdgpu_device *adev); 1799 int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype); 1800 }; 1801 1802 /* 1803 * amdgpu smumgr 1804 */ 1805 struct amdgpu_smumgr { 1806 struct amdgpu_bo *toc_buf; 1807 struct amdgpu_bo *smu_buf; 1808 /* asic priv smu data */ 1809 void *priv; 1810 spinlock_t smu_lock; 1811 /* smumgr functions */ 1812 const struct amdgpu_smumgr_funcs *smumgr_funcs; 1813 /* ucode loading complete flag */ 1814 uint32_t fw_flags; 1815 }; 1816 1817 /* 1818 * ASIC specific register table accessible by UMD 1819 */ 1820 struct amdgpu_allowed_register_entry { 1821 uint32_t reg_offset; 1822 bool untouched; 1823 bool grbm_indexed; 1824 }; 1825 1826 /* 1827 * ASIC specific functions. 1828 */ 1829 struct amdgpu_asic_funcs { 1830 bool (*read_disabled_bios)(struct amdgpu_device *adev); 1831 bool (*read_bios_from_rom)(struct amdgpu_device *adev, 1832 u8 *bios, u32 length_bytes); 1833 int (*read_register)(struct amdgpu_device *adev, u32 se_num, 1834 u32 sh_num, u32 reg_offset, u32 *value); 1835 void (*set_vga_state)(struct amdgpu_device *adev, bool state); 1836 int (*reset)(struct amdgpu_device *adev); 1837 /* get the reference clock */ 1838 u32 (*get_xclk)(struct amdgpu_device *adev); 1839 /* MM block clocks */ 1840 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); 1841 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); 1842 /* query virtual capabilities */ 1843 u32 (*get_virtual_caps)(struct amdgpu_device *adev); 1844 }; 1845 1846 /* 1847 * IOCTL. 1848 */ 1849 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, 1850 struct drm_file *filp); 1851 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, 1852 struct drm_file *filp); 1853 1854 int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data, 1855 struct drm_file *filp); 1856 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, 1857 struct drm_file *filp); 1858 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, 1859 struct drm_file *filp); 1860 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 1861 struct drm_file *filp); 1862 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, 1863 struct drm_file *filp); 1864 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, 1865 struct drm_file *filp); 1866 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 1867 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 1868 1869 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, 1870 struct drm_file *filp); 1871 1872 /* VRAM scratch page for HDP bug, default vram page */ 1873 struct amdgpu_vram_scratch { 1874 struct amdgpu_bo *robj; 1875 volatile uint32_t *ptr; 1876 u64 gpu_addr; 1877 }; 1878 1879 /* 1880 * ACPI 1881 */ 1882 struct amdgpu_atif_notification_cfg { 1883 bool enabled; 1884 int command_code; 1885 }; 1886 1887 struct amdgpu_atif_notifications { 1888 bool display_switch; 1889 bool expansion_mode_change; 1890 bool thermal_state; 1891 bool forced_power_state; 1892 bool system_power_state; 1893 bool display_conf_change; 1894 bool px_gfx_switch; 1895 bool brightness_change; 1896 bool dgpu_display_event; 1897 }; 1898 1899 struct amdgpu_atif_functions { 1900 bool system_params; 1901 bool sbios_requests; 1902 bool select_active_disp; 1903 bool lid_state; 1904 bool get_tv_standard; 1905 bool set_tv_standard; 1906 bool get_panel_expansion_mode; 1907 bool set_panel_expansion_mode; 1908 bool temperature_change; 1909 bool graphics_device_types; 1910 }; 1911 1912 struct amdgpu_atif { 1913 struct amdgpu_atif_notifications notifications; 1914 struct amdgpu_atif_functions functions; 1915 struct amdgpu_atif_notification_cfg notification_cfg; 1916 struct amdgpu_encoder *encoder_for_bl; 1917 }; 1918 1919 struct amdgpu_atcs_functions { 1920 bool get_ext_state; 1921 bool pcie_perf_req; 1922 bool pcie_dev_rdy; 1923 bool pcie_bus_width; 1924 }; 1925 1926 struct amdgpu_atcs { 1927 struct amdgpu_atcs_functions functions; 1928 }; 1929 1930 /* 1931 * CGS 1932 */ 1933 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); 1934 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); 1935 1936 1937 /* GPU virtualization */ 1938 #define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0) 1939 #define AMDGPU_VIRT_CAPS_IS_VF (1 << 1) 1940 struct amdgpu_virtualization { 1941 bool supports_sr_iov; 1942 bool is_virtual; 1943 u32 caps; 1944 }; 1945 1946 /* 1947 * Core structure, functions and helpers. 1948 */ 1949 typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); 1950 typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 1951 1952 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 1953 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); 1954 1955 struct amdgpu_ip_block_status { 1956 bool valid; 1957 bool sw; 1958 bool hw; 1959 }; 1960 1961 struct amdgpu_device { 1962 struct device *dev; 1963 struct drm_device *ddev; 1964 struct pci_dev *pdev; 1965 1966 #ifdef CONFIG_DRM_AMD_ACP 1967 struct amdgpu_acp acp; 1968 #endif 1969 1970 /* ASIC */ 1971 enum amd_asic_type asic_type; 1972 uint32_t family; 1973 uint32_t rev_id; 1974 uint32_t external_rev_id; 1975 unsigned long flags; 1976 int usec_timeout; 1977 const struct amdgpu_asic_funcs *asic_funcs; 1978 bool shutdown; 1979 bool need_dma32; 1980 bool accel_working; 1981 struct work_struct reset_work; 1982 struct notifier_block acpi_nb; 1983 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; 1984 struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; 1985 unsigned debugfs_count; 1986 #if defined(CONFIG_DEBUG_FS) 1987 struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; 1988 #endif 1989 struct amdgpu_atif atif; 1990 struct amdgpu_atcs atcs; 1991 struct mutex srbm_mutex; 1992 /* GRBM index mutex. Protects concurrent access to GRBM index */ 1993 struct mutex grbm_idx_mutex; 1994 struct dev_pm_domain vga_pm_domain; 1995 bool have_disp_power_ref; 1996 1997 /* BIOS */ 1998 uint8_t *bios; 1999 bool is_atom_bios; 2000 struct amdgpu_bo *stollen_vga_memory; 2001 uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; 2002 2003 /* Register/doorbell mmio */ 2004 resource_size_t rmmio_base; 2005 resource_size_t rmmio_size; 2006 void __iomem *rmmio; 2007 /* protects concurrent MM_INDEX/DATA based register access */ 2008 spinlock_t mmio_idx_lock; 2009 /* protects concurrent SMC based register access */ 2010 spinlock_t smc_idx_lock; 2011 amdgpu_rreg_t smc_rreg; 2012 amdgpu_wreg_t smc_wreg; 2013 /* protects concurrent PCIE register access */ 2014 spinlock_t pcie_idx_lock; 2015 amdgpu_rreg_t pcie_rreg; 2016 amdgpu_wreg_t pcie_wreg; 2017 /* protects concurrent UVD register access */ 2018 spinlock_t uvd_ctx_idx_lock; 2019 amdgpu_rreg_t uvd_ctx_rreg; 2020 amdgpu_wreg_t uvd_ctx_wreg; 2021 /* protects concurrent DIDT register access */ 2022 spinlock_t didt_idx_lock; 2023 amdgpu_rreg_t didt_rreg; 2024 amdgpu_wreg_t didt_wreg; 2025 /* protects concurrent gc_cac register access */ 2026 spinlock_t gc_cac_idx_lock; 2027 amdgpu_rreg_t gc_cac_rreg; 2028 amdgpu_wreg_t gc_cac_wreg; 2029 /* protects concurrent ENDPOINT (audio) register access */ 2030 spinlock_t audio_endpt_idx_lock; 2031 amdgpu_block_rreg_t audio_endpt_rreg; 2032 amdgpu_block_wreg_t audio_endpt_wreg; 2033 void __iomem *rio_mem; 2034 resource_size_t rio_mem_size; 2035 struct amdgpu_doorbell doorbell; 2036 2037 /* clock/pll info */ 2038 struct amdgpu_clock clock; 2039 2040 /* MC */ 2041 struct amdgpu_mc mc; 2042 struct amdgpu_gart gart; 2043 struct amdgpu_dummy_page dummy_page; 2044 struct amdgpu_vm_manager vm_manager; 2045 2046 /* memory management */ 2047 struct amdgpu_mman mman; 2048 struct amdgpu_vram_scratch vram_scratch; 2049 struct amdgpu_wb wb; 2050 atomic64_t vram_usage; 2051 atomic64_t vram_vis_usage; 2052 atomic64_t gtt_usage; 2053 atomic64_t num_bytes_moved; 2054 atomic64_t num_evictions; 2055 atomic_t gpu_reset_counter; 2056 2057 /* display */ 2058 struct amdgpu_mode_info mode_info; 2059 struct work_struct hotplug_work; 2060 struct amdgpu_irq_src crtc_irq; 2061 struct amdgpu_irq_src pageflip_irq; 2062 struct amdgpu_irq_src hpd_irq; 2063 2064 /* rings */ 2065 u64 fence_context; 2066 unsigned num_rings; 2067 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; 2068 bool ib_pool_ready; 2069 struct amdgpu_sa_manager ring_tmp_bo; 2070 2071 /* interrupts */ 2072 struct amdgpu_irq irq; 2073 2074 /* powerplay */ 2075 struct amd_powerplay powerplay; 2076 bool pp_enabled; 2077 bool pp_force_state_enabled; 2078 2079 /* dpm */ 2080 struct amdgpu_pm pm; 2081 u32 cg_flags; 2082 u32 pg_flags; 2083 2084 /* amdgpu smumgr */ 2085 struct amdgpu_smumgr smu; 2086 2087 /* gfx */ 2088 struct amdgpu_gfx gfx; 2089 2090 /* sdma */ 2091 struct amdgpu_sdma sdma; 2092 2093 /* uvd */ 2094 struct amdgpu_uvd uvd; 2095 2096 /* vce */ 2097 struct amdgpu_vce vce; 2098 2099 /* firmwares */ 2100 struct amdgpu_firmware firmware; 2101 2102 /* GDS */ 2103 struct amdgpu_gds gds; 2104 2105 const struct amdgpu_ip_block_version *ip_blocks; 2106 int num_ip_blocks; 2107 struct amdgpu_ip_block_status *ip_block_status; 2108 struct mutex mn_lock; 2109 DECLARE_HASHTABLE(mn_hash, 7); 2110 2111 /* tracking pinned memory */ 2112 u64 vram_pin_size; 2113 u64 invisible_pin_size; 2114 u64 gart_pin_size; 2115 2116 /* amdkfd interface */ 2117 struct kfd_dev *kfd; 2118 2119 struct amdgpu_virtualization virtualization; 2120 }; 2121 2122 bool amdgpu_device_is_px(struct drm_device *dev); 2123 int amdgpu_device_init(struct amdgpu_device *adev, 2124 struct drm_device *ddev, 2125 struct pci_dev *pdev, 2126 uint32_t flags); 2127 void amdgpu_device_fini(struct amdgpu_device *adev); 2128 int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); 2129 2130 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, 2131 bool always_indirect); 2132 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, 2133 bool always_indirect); 2134 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg); 2135 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v); 2136 2137 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index); 2138 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); 2139 2140 /* 2141 * Registers read & write functions. 2142 */ 2143 #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false) 2144 #define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), true) 2145 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), false)) 2146 #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), false) 2147 #define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), true) 2148 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 2149 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 2150 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) 2151 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) 2152 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) 2153 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) 2154 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) 2155 #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) 2156 #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) 2157 #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) 2158 #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg)) 2159 #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v)) 2160 #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) 2161 #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) 2162 #define WREG32_P(reg, val, mask) \ 2163 do { \ 2164 uint32_t tmp_ = RREG32(reg); \ 2165 tmp_ &= (mask); \ 2166 tmp_ |= ((val) & ~(mask)); \ 2167 WREG32(reg, tmp_); \ 2168 } while (0) 2169 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) 2170 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) 2171 #define WREG32_PLL_P(reg, val, mask) \ 2172 do { \ 2173 uint32_t tmp_ = RREG32_PLL(reg); \ 2174 tmp_ &= (mask); \ 2175 tmp_ |= ((val) & ~(mask)); \ 2176 WREG32_PLL(reg, tmp_); \ 2177 } while (0) 2178 #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false)) 2179 #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg)) 2180 #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v)) 2181 2182 #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index)) 2183 #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v)) 2184 2185 #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT 2186 #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK 2187 2188 #define REG_SET_FIELD(orig_val, reg, field, field_val) \ 2189 (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \ 2190 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field)))) 2191 2192 #define REG_GET_FIELD(value, reg, field) \ 2193 (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field)) 2194 2195 /* 2196 * BIOS helpers. 2197 */ 2198 #define RBIOS8(i) (adev->bios[i]) 2199 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) 2200 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) 2201 2202 /* 2203 * RING helpers. 2204 */ 2205 static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) 2206 { 2207 if (ring->count_dw <= 0) 2208 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); 2209 ring->ring[ring->wptr++] = v; 2210 ring->wptr &= ring->ptr_mask; 2211 ring->count_dw--; 2212 } 2213 2214 static inline struct amdgpu_sdma_instance * 2215 amdgpu_get_sdma_instance(struct amdgpu_ring *ring) 2216 { 2217 struct amdgpu_device *adev = ring->adev; 2218 int i; 2219 2220 for (i = 0; i < adev->sdma.num_instances; i++) 2221 if (&adev->sdma.instance[i].ring == ring) 2222 break; 2223 2224 if (i < AMDGPU_MAX_SDMA_INSTANCES) 2225 return &adev->sdma.instance[i]; 2226 else 2227 return NULL; 2228 } 2229 2230 /* 2231 * ASICs macro. 2232 */ 2233 #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) 2234 #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) 2235 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) 2236 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) 2237 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) 2238 #define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev))) 2239 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) 2240 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) 2241 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) 2242 #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) 2243 #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) 2244 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) 2245 #define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags))) 2246 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) 2247 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) 2248 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) 2249 #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t)) 2250 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) 2251 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) 2252 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) 2253 #define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c)) 2254 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) 2255 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) 2256 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) 2257 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) 2258 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) 2259 #define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r)) 2260 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) 2261 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) 2262 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) 2263 #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) 2264 #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) 2265 #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) 2266 #define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r)) 2267 #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) 2268 #define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc)) 2269 #define amdgpu_display_is_display_hung(adev) (adev)->mode_info.funcs->is_display_hung((adev)) 2270 #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) 2271 #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e)) 2272 #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h)) 2273 #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h)) 2274 #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev)) 2275 #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev)) 2276 #define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async)) 2277 #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) 2278 #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) 2279 #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) 2280 #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s)) 2281 #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) 2282 #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) 2283 #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) 2284 #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) 2285 #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) 2286 #define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev)) 2287 #define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev)) 2288 #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) 2289 #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) 2290 #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) 2291 #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) 2292 #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) 2293 2294 #define amdgpu_dpm_get_temperature(adev) \ 2295 ((adev)->pp_enabled ? \ 2296 (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ 2297 (adev)->pm.funcs->get_temperature((adev))) 2298 2299 #define amdgpu_dpm_set_fan_control_mode(adev, m) \ 2300 ((adev)->pp_enabled ? \ 2301 (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \ 2302 (adev)->pm.funcs->set_fan_control_mode((adev), (m))) 2303 2304 #define amdgpu_dpm_get_fan_control_mode(adev) \ 2305 ((adev)->pp_enabled ? \ 2306 (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \ 2307 (adev)->pm.funcs->get_fan_control_mode((adev))) 2308 2309 #define amdgpu_dpm_set_fan_speed_percent(adev, s) \ 2310 ((adev)->pp_enabled ? \ 2311 (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ 2312 (adev)->pm.funcs->set_fan_speed_percent((adev), (s))) 2313 2314 #define amdgpu_dpm_get_fan_speed_percent(adev, s) \ 2315 ((adev)->pp_enabled ? \ 2316 (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ 2317 (adev)->pm.funcs->get_fan_speed_percent((adev), (s))) 2318 2319 #define amdgpu_dpm_get_sclk(adev, l) \ 2320 ((adev)->pp_enabled ? \ 2321 (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \ 2322 (adev)->pm.funcs->get_sclk((adev), (l))) 2323 2324 #define amdgpu_dpm_get_mclk(adev, l) \ 2325 ((adev)->pp_enabled ? \ 2326 (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \ 2327 (adev)->pm.funcs->get_mclk((adev), (l))) 2328 2329 2330 #define amdgpu_dpm_force_performance_level(adev, l) \ 2331 ((adev)->pp_enabled ? \ 2332 (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \ 2333 (adev)->pm.funcs->force_performance_level((adev), (l))) 2334 2335 #define amdgpu_dpm_powergate_uvd(adev, g) \ 2336 ((adev)->pp_enabled ? \ 2337 (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \ 2338 (adev)->pm.funcs->powergate_uvd((adev), (g))) 2339 2340 #define amdgpu_dpm_powergate_vce(adev, g) \ 2341 ((adev)->pp_enabled ? \ 2342 (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ 2343 (adev)->pm.funcs->powergate_vce((adev), (g))) 2344 2345 #define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \ 2346 ((adev)->pp_enabled ? \ 2347 (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \ 2348 (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))) 2349 2350 #define amdgpu_dpm_get_current_power_state(adev) \ 2351 (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) 2352 2353 #define amdgpu_dpm_get_performance_level(adev) \ 2354 (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) 2355 2356 #define amdgpu_dpm_get_pp_num_states(adev, data) \ 2357 (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data) 2358 2359 #define amdgpu_dpm_get_pp_table(adev, table) \ 2360 (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table) 2361 2362 #define amdgpu_dpm_set_pp_table(adev, buf, size) \ 2363 (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size) 2364 2365 #define amdgpu_dpm_print_clock_levels(adev, type, buf) \ 2366 (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf) 2367 2368 #define amdgpu_dpm_force_clock_level(adev, type, level) \ 2369 (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) 2370 2371 #define amdgpu_dpm_get_sclk_od(adev) \ 2372 (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle) 2373 2374 #define amdgpu_dpm_set_sclk_od(adev, value) \ 2375 (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value) 2376 2377 #define amdgpu_dpm_get_mclk_od(adev) \ 2378 ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle)) 2379 2380 #define amdgpu_dpm_set_mclk_od(adev, value) \ 2381 ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) 2382 2383 #define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ 2384 (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) 2385 2386 #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) 2387 2388 /* Common functions */ 2389 int amdgpu_gpu_reset(struct amdgpu_device *adev); 2390 void amdgpu_pci_config_reset(struct amdgpu_device *adev); 2391 bool amdgpu_card_posted(struct amdgpu_device *adev); 2392 void amdgpu_update_display_priority(struct amdgpu_device *adev); 2393 2394 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); 2395 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, 2396 u32 ip_instance, u32 ring, 2397 struct amdgpu_ring **out_ring); 2398 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain); 2399 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); 2400 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); 2401 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, 2402 uint32_t flags); 2403 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); 2404 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); 2405 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, 2406 unsigned long end); 2407 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, 2408 int *last_invalidated); 2409 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); 2410 uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, 2411 struct ttm_mem_reg *mem); 2412 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); 2413 void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); 2414 void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); 2415 void amdgpu_program_register_sequence(struct amdgpu_device *adev, 2416 const u32 *registers, 2417 const u32 array_size); 2418 2419 bool amdgpu_device_is_px(struct drm_device *dev); 2420 /* atpx handler */ 2421 #if defined(CONFIG_VGA_SWITCHEROO) 2422 void amdgpu_register_atpx_handler(void); 2423 void amdgpu_unregister_atpx_handler(void); 2424 bool amdgpu_has_atpx_dgpu_power_cntl(void); 2425 bool amdgpu_is_atpx_hybrid(void); 2426 #else 2427 static inline void amdgpu_register_atpx_handler(void) {} 2428 static inline void amdgpu_unregister_atpx_handler(void) {} 2429 static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } 2430 static inline bool amdgpu_is_atpx_hybrid(void) { return false; } 2431 #endif 2432 2433 /* 2434 * KMS 2435 */ 2436 extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; 2437 extern const int amdgpu_max_kms_ioctl; 2438 2439 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags); 2440 int amdgpu_driver_unload_kms(struct drm_device *dev); 2441 void amdgpu_driver_lastclose_kms(struct drm_device *dev); 2442 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); 2443 void amdgpu_driver_postclose_kms(struct drm_device *dev, 2444 struct drm_file *file_priv); 2445 void amdgpu_driver_preclose_kms(struct drm_device *dev, 2446 struct drm_file *file_priv); 2447 int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon); 2448 int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon); 2449 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe); 2450 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe); 2451 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe); 2452 int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe, 2453 int *max_error, 2454 struct timeval *vblank_time, 2455 unsigned flags); 2456 long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, 2457 unsigned long arg); 2458 2459 /* 2460 * functions used by amdgpu_encoder.c 2461 */ 2462 struct amdgpu_afmt_acr { 2463 u32 clock; 2464 2465 int n_32khz; 2466 int cts_32khz; 2467 2468 int n_44_1khz; 2469 int cts_44_1khz; 2470 2471 int n_48khz; 2472 int cts_48khz; 2473 2474 }; 2475 2476 struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock); 2477 2478 /* amdgpu_acpi.c */ 2479 #if defined(CONFIG_ACPI) 2480 int amdgpu_acpi_init(struct amdgpu_device *adev); 2481 void amdgpu_acpi_fini(struct amdgpu_device *adev); 2482 bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev); 2483 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, 2484 u8 perf_req, bool advertise); 2485 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); 2486 #else 2487 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } 2488 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } 2489 #endif 2490 2491 struct amdgpu_bo_va_mapping * 2492 amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, 2493 uint64_t addr, struct amdgpu_bo **bo); 2494 2495 #include "amdgpu_object.h" 2496 #endif 2497