1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #ifndef __AMDGPU_H__ 29 #define __AMDGPU_H__ 30 31 #include <linux/atomic.h> 32 #include <linux/wait.h> 33 #include <linux/list.h> 34 #include <linux/kref.h> 35 #include <linux/interval_tree.h> 36 #include <linux/hashtable.h> 37 #include <linux/fence.h> 38 39 #include <ttm/ttm_bo_api.h> 40 #include <ttm/ttm_bo_driver.h> 41 #include <ttm/ttm_placement.h> 42 #include <ttm/ttm_module.h> 43 #include <ttm/ttm_execbuf_util.h> 44 45 #include <drm/drmP.h> 46 #include <drm/drm_gem.h> 47 #include <drm/amdgpu_drm.h> 48 49 #include "amd_shared.h" 50 #include "amdgpu_mode.h" 51 #include "amdgpu_ih.h" 52 #include "amdgpu_irq.h" 53 #include "amdgpu_ucode.h" 54 #include "amdgpu_ttm.h" 55 #include "amdgpu_gds.h" 56 #include "amd_powerplay.h" 57 #include "amdgpu_acp.h" 58 59 #include "gpu_scheduler.h" 60 #include "amdgpu_virt.h" 61 62 /* 63 * Modules parameters. 64 */ 65 extern int amdgpu_modeset; 66 extern int amdgpu_vram_limit; 67 extern int amdgpu_gart_size; 68 extern int amdgpu_moverate; 69 extern int amdgpu_benchmarking; 70 extern int amdgpu_testing; 71 extern int amdgpu_audio; 72 extern int amdgpu_disp_priority; 73 extern int amdgpu_hw_i2c; 74 extern int amdgpu_pcie_gen2; 75 extern int amdgpu_msi; 76 extern int amdgpu_lockup_timeout; 77 extern int amdgpu_dpm; 78 extern int amdgpu_smc_load_fw; 79 extern int amdgpu_aspm; 80 extern int amdgpu_runtime_pm; 81 extern unsigned amdgpu_ip_block_mask; 82 extern int amdgpu_bapm; 83 extern int amdgpu_deep_color; 84 extern int amdgpu_vm_size; 85 extern int amdgpu_vm_block_size; 86 extern int amdgpu_vm_fault_stop; 87 extern int amdgpu_vm_debug; 88 extern int amdgpu_sched_jobs; 89 extern int amdgpu_sched_hw_submission; 90 extern int amdgpu_powerplay; 91 extern int amdgpu_powercontainment; 92 extern unsigned amdgpu_pcie_gen_cap; 93 extern unsigned amdgpu_pcie_lane_cap; 94 extern unsigned amdgpu_cg_mask; 95 extern unsigned amdgpu_pg_mask; 96 extern char *amdgpu_disable_cu; 97 extern int amdgpu_sclk_deep_sleep_en; 98 extern char *amdgpu_virtual_display; 99 extern unsigned amdgpu_pp_feature_mask; 100 101 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 102 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 103 #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) 104 /* AMDGPU_IB_POOL_SIZE must be a power of 2 */ 105 #define AMDGPU_IB_POOL_SIZE 16 106 #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32 107 #define AMDGPUFB_CONN_LIMIT 4 108 #define AMDGPU_BIOS_NUM_SCRATCH 8 109 110 /* max number of rings */ 111 #define AMDGPU_MAX_RINGS 16 112 #define AMDGPU_MAX_GFX_RINGS 1 113 #define AMDGPU_MAX_COMPUTE_RINGS 8 114 #define AMDGPU_MAX_VCE_RINGS 3 115 116 /* max number of IP instances */ 117 #define AMDGPU_MAX_SDMA_INSTANCES 2 118 119 /* hardcode that limit for now */ 120 #define AMDGPU_VA_RESERVED_SIZE (8 << 20) 121 122 /* hard reset data */ 123 #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b 124 125 /* reset flags */ 126 #define AMDGPU_RESET_GFX (1 << 0) 127 #define AMDGPU_RESET_COMPUTE (1 << 1) 128 #define AMDGPU_RESET_DMA (1 << 2) 129 #define AMDGPU_RESET_CP (1 << 3) 130 #define AMDGPU_RESET_GRBM (1 << 4) 131 #define AMDGPU_RESET_DMA1 (1 << 5) 132 #define AMDGPU_RESET_RLC (1 << 6) 133 #define AMDGPU_RESET_SEM (1 << 7) 134 #define AMDGPU_RESET_IH (1 << 8) 135 #define AMDGPU_RESET_VMC (1 << 9) 136 #define AMDGPU_RESET_MC (1 << 10) 137 #define AMDGPU_RESET_DISPLAY (1 << 11) 138 #define AMDGPU_RESET_UVD (1 << 12) 139 #define AMDGPU_RESET_VCE (1 << 13) 140 #define AMDGPU_RESET_VCE1 (1 << 14) 141 142 /* GFX current status */ 143 #define AMDGPU_GFX_NORMAL_MODE 0x00000000L 144 #define AMDGPU_GFX_SAFE_MODE 0x00000001L 145 #define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L 146 #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L 147 #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L 148 149 /* max cursor sizes (in pixels) */ 150 #define CIK_CURSOR_WIDTH 128 151 #define CIK_CURSOR_HEIGHT 128 152 153 struct amdgpu_device; 154 struct amdgpu_ib; 155 struct amdgpu_vm; 156 struct amdgpu_ring; 157 struct amdgpu_cs_parser; 158 struct amdgpu_job; 159 struct amdgpu_irq_src; 160 struct amdgpu_fpriv; 161 162 enum amdgpu_cp_irq { 163 AMDGPU_CP_IRQ_GFX_EOP = 0, 164 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP, 165 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP, 166 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP, 167 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP, 168 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP, 169 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP, 170 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP, 171 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP, 172 173 AMDGPU_CP_IRQ_LAST 174 }; 175 176 enum amdgpu_sdma_irq { 177 AMDGPU_SDMA_IRQ_TRAP0 = 0, 178 AMDGPU_SDMA_IRQ_TRAP1, 179 180 AMDGPU_SDMA_IRQ_LAST 181 }; 182 183 enum amdgpu_thermal_irq { 184 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0, 185 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW, 186 187 AMDGPU_THERMAL_IRQ_LAST 188 }; 189 190 int amdgpu_set_clockgating_state(struct amdgpu_device *adev, 191 enum amd_ip_block_type block_type, 192 enum amd_clockgating_state state); 193 int amdgpu_set_powergating_state(struct amdgpu_device *adev, 194 enum amd_ip_block_type block_type, 195 enum amd_powergating_state state); 196 int amdgpu_wait_for_idle(struct amdgpu_device *adev, 197 enum amd_ip_block_type block_type); 198 bool amdgpu_is_idle(struct amdgpu_device *adev, 199 enum amd_ip_block_type block_type); 200 201 struct amdgpu_ip_block_version { 202 enum amd_ip_block_type type; 203 u32 major; 204 u32 minor; 205 u32 rev; 206 const struct amd_ip_funcs *funcs; 207 }; 208 209 int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, 210 enum amd_ip_block_type type, 211 u32 major, u32 minor); 212 213 const struct amdgpu_ip_block_version * amdgpu_get_ip_block( 214 struct amdgpu_device *adev, 215 enum amd_ip_block_type type); 216 217 /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */ 218 struct amdgpu_buffer_funcs { 219 /* maximum bytes in a single operation */ 220 uint32_t copy_max_bytes; 221 222 /* number of dw to reserve per operation */ 223 unsigned copy_num_dw; 224 225 /* used for buffer migration */ 226 void (*emit_copy_buffer)(struct amdgpu_ib *ib, 227 /* src addr in bytes */ 228 uint64_t src_offset, 229 /* dst addr in bytes */ 230 uint64_t dst_offset, 231 /* number of byte to transfer */ 232 uint32_t byte_count); 233 234 /* maximum bytes in a single operation */ 235 uint32_t fill_max_bytes; 236 237 /* number of dw to reserve per operation */ 238 unsigned fill_num_dw; 239 240 /* used for buffer clearing */ 241 void (*emit_fill_buffer)(struct amdgpu_ib *ib, 242 /* value to write to memory */ 243 uint32_t src_data, 244 /* dst addr in bytes */ 245 uint64_t dst_offset, 246 /* number of byte to fill */ 247 uint32_t byte_count); 248 }; 249 250 /* provided by hw blocks that can write ptes, e.g., sdma */ 251 struct amdgpu_vm_pte_funcs { 252 /* copy pte entries from GART */ 253 void (*copy_pte)(struct amdgpu_ib *ib, 254 uint64_t pe, uint64_t src, 255 unsigned count); 256 /* write pte one entry at a time with addr mapping */ 257 void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, 258 uint64_t value, unsigned count, 259 uint32_t incr); 260 /* for linear pte/pde updates without addr mapping */ 261 void (*set_pte_pde)(struct amdgpu_ib *ib, 262 uint64_t pe, 263 uint64_t addr, unsigned count, 264 uint32_t incr, uint32_t flags); 265 }; 266 267 /* provided by the gmc block */ 268 struct amdgpu_gart_funcs { 269 /* flush the vm tlb via mmio */ 270 void (*flush_gpu_tlb)(struct amdgpu_device *adev, 271 uint32_t vmid); 272 /* write pte/pde updates using the cpu */ 273 int (*set_pte_pde)(struct amdgpu_device *adev, 274 void *cpu_pt_addr, /* cpu addr of page table */ 275 uint32_t gpu_page_idx, /* pte/pde to update */ 276 uint64_t addr, /* addr to write into pte/pde */ 277 uint32_t flags); /* access flags */ 278 }; 279 280 /* provided by the ih block */ 281 struct amdgpu_ih_funcs { 282 /* ring read/write ptr handling, called from interrupt context */ 283 u32 (*get_wptr)(struct amdgpu_device *adev); 284 void (*decode_iv)(struct amdgpu_device *adev, 285 struct amdgpu_iv_entry *entry); 286 void (*set_rptr)(struct amdgpu_device *adev); 287 }; 288 289 /* provided by hw blocks that expose a ring buffer for commands */ 290 struct amdgpu_ring_funcs { 291 /* ring read/write ptr handling */ 292 u32 (*get_rptr)(struct amdgpu_ring *ring); 293 u32 (*get_wptr)(struct amdgpu_ring *ring); 294 void (*set_wptr)(struct amdgpu_ring *ring); 295 /* validating and patching of IBs */ 296 int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx); 297 /* command emit functions */ 298 void (*emit_ib)(struct amdgpu_ring *ring, 299 struct amdgpu_ib *ib, 300 unsigned vm_id, bool ctx_switch); 301 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, 302 uint64_t seq, unsigned flags); 303 void (*emit_pipeline_sync)(struct amdgpu_ring *ring); 304 void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, 305 uint64_t pd_addr); 306 void (*emit_hdp_flush)(struct amdgpu_ring *ring); 307 void (*emit_hdp_invalidate)(struct amdgpu_ring *ring); 308 void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, 309 uint32_t gds_base, uint32_t gds_size, 310 uint32_t gws_base, uint32_t gws_size, 311 uint32_t oa_base, uint32_t oa_size); 312 /* testing functions */ 313 int (*test_ring)(struct amdgpu_ring *ring); 314 int (*test_ib)(struct amdgpu_ring *ring, long timeout); 315 /* insert NOP packets */ 316 void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); 317 /* pad the indirect buffer to the necessary number of dw */ 318 void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); 319 unsigned (*init_cond_exec)(struct amdgpu_ring *ring); 320 void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset); 321 /* note usage for clock and power gating */ 322 void (*begin_use)(struct amdgpu_ring *ring); 323 void (*end_use)(struct amdgpu_ring *ring); 324 void (*emit_switch_buffer) (struct amdgpu_ring *ring); 325 void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags); 326 unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring); 327 unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring); 328 }; 329 330 /* 331 * BIOS. 332 */ 333 bool amdgpu_get_bios(struct amdgpu_device *adev); 334 bool amdgpu_read_bios(struct amdgpu_device *adev); 335 336 /* 337 * Dummy page 338 */ 339 struct amdgpu_dummy_page { 340 struct page *page; 341 dma_addr_t addr; 342 }; 343 int amdgpu_dummy_page_init(struct amdgpu_device *adev); 344 void amdgpu_dummy_page_fini(struct amdgpu_device *adev); 345 346 347 /* 348 * Clocks 349 */ 350 351 #define AMDGPU_MAX_PPLL 3 352 353 struct amdgpu_clock { 354 struct amdgpu_pll ppll[AMDGPU_MAX_PPLL]; 355 struct amdgpu_pll spll; 356 struct amdgpu_pll mpll; 357 /* 10 Khz units */ 358 uint32_t default_mclk; 359 uint32_t default_sclk; 360 uint32_t default_dispclk; 361 uint32_t current_dispclk; 362 uint32_t dp_extclk; 363 uint32_t max_pixel_clock; 364 }; 365 366 /* 367 * Fences. 368 */ 369 struct amdgpu_fence_driver { 370 uint64_t gpu_addr; 371 volatile uint32_t *cpu_addr; 372 /* sync_seq is protected by ring emission lock */ 373 uint32_t sync_seq; 374 atomic_t last_seq; 375 bool initialized; 376 struct amdgpu_irq_src *irq_src; 377 unsigned irq_type; 378 struct timer_list fallback_timer; 379 unsigned num_fences_mask; 380 spinlock_t lock; 381 struct fence **fences; 382 }; 383 384 /* some special values for the owner field */ 385 #define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul) 386 #define AMDGPU_FENCE_OWNER_VM ((void*)1ul) 387 388 #define AMDGPU_FENCE_FLAG_64BIT (1 << 0) 389 #define AMDGPU_FENCE_FLAG_INT (1 << 1) 390 391 int amdgpu_fence_driver_init(struct amdgpu_device *adev); 392 void amdgpu_fence_driver_fini(struct amdgpu_device *adev); 393 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); 394 395 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, 396 unsigned num_hw_submission); 397 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 398 struct amdgpu_irq_src *irq_src, 399 unsigned irq_type); 400 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); 401 void amdgpu_fence_driver_resume(struct amdgpu_device *adev); 402 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence); 403 void amdgpu_fence_process(struct amdgpu_ring *ring); 404 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); 405 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); 406 407 /* 408 * BO. 409 */ 410 struct amdgpu_bo_list_entry { 411 struct amdgpu_bo *robj; 412 struct ttm_validate_buffer tv; 413 struct amdgpu_bo_va *bo_va; 414 uint32_t priority; 415 struct page **user_pages; 416 int user_invalidated; 417 }; 418 419 struct amdgpu_bo_va_mapping { 420 struct list_head list; 421 struct interval_tree_node it; 422 uint64_t offset; 423 uint32_t flags; 424 }; 425 426 /* bo virtual addresses in a specific vm */ 427 struct amdgpu_bo_va { 428 /* protected by bo being reserved */ 429 struct list_head bo_list; 430 struct fence *last_pt_update; 431 unsigned ref_count; 432 433 /* protected by vm mutex and spinlock */ 434 struct list_head vm_status; 435 436 /* mappings for this bo_va */ 437 struct list_head invalids; 438 struct list_head valids; 439 440 /* constant after initialization */ 441 struct amdgpu_vm *vm; 442 struct amdgpu_bo *bo; 443 }; 444 445 #define AMDGPU_GEM_DOMAIN_MAX 0x3 446 447 struct amdgpu_bo { 448 /* Protected by tbo.reserved */ 449 u32 prefered_domains; 450 u32 allowed_domains; 451 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; 452 struct ttm_placement placement; 453 struct ttm_buffer_object tbo; 454 struct ttm_bo_kmap_obj kmap; 455 u64 flags; 456 unsigned pin_count; 457 void *kptr; 458 u64 tiling_flags; 459 u64 metadata_flags; 460 void *metadata; 461 u32 metadata_size; 462 /* list of all virtual address to which this bo 463 * is associated to 464 */ 465 struct list_head va; 466 /* Constant after initialization */ 467 struct amdgpu_device *adev; 468 struct drm_gem_object gem_base; 469 struct amdgpu_bo *parent; 470 struct amdgpu_bo *shadow; 471 472 struct ttm_bo_kmap_obj dma_buf_vmap; 473 struct amdgpu_mn *mn; 474 struct list_head mn_list; 475 struct list_head shadow_list; 476 }; 477 #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) 478 479 void amdgpu_gem_object_free(struct drm_gem_object *obj); 480 int amdgpu_gem_object_open(struct drm_gem_object *obj, 481 struct drm_file *file_priv); 482 void amdgpu_gem_object_close(struct drm_gem_object *obj, 483 struct drm_file *file_priv); 484 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns); 485 struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); 486 struct drm_gem_object * 487 amdgpu_gem_prime_import_sg_table(struct drm_device *dev, 488 struct dma_buf_attachment *attach, 489 struct sg_table *sg); 490 struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, 491 struct drm_gem_object *gobj, 492 int flags); 493 int amdgpu_gem_prime_pin(struct drm_gem_object *obj); 494 void amdgpu_gem_prime_unpin(struct drm_gem_object *obj); 495 struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); 496 void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); 497 void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 498 int amdgpu_gem_debugfs_init(struct amdgpu_device *adev); 499 500 /* sub-allocation manager, it has to be protected by another lock. 501 * By conception this is an helper for other part of the driver 502 * like the indirect buffer or semaphore, which both have their 503 * locking. 504 * 505 * Principe is simple, we keep a list of sub allocation in offset 506 * order (first entry has offset == 0, last entry has the highest 507 * offset). 508 * 509 * When allocating new object we first check if there is room at 510 * the end total_size - (last_object_offset + last_object_size) >= 511 * alloc_size. If so we allocate new object there. 512 * 513 * When there is not enough room at the end, we start waiting for 514 * each sub object until we reach object_offset+object_size >= 515 * alloc_size, this object then become the sub object we return. 516 * 517 * Alignment can't be bigger than page size. 518 * 519 * Hole are not considered for allocation to keep things simple. 520 * Assumption is that there won't be hole (all object on same 521 * alignment). 522 */ 523 524 #define AMDGPU_SA_NUM_FENCE_LISTS 32 525 526 struct amdgpu_sa_manager { 527 wait_queue_head_t wq; 528 struct amdgpu_bo *bo; 529 struct list_head *hole; 530 struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS]; 531 struct list_head olist; 532 unsigned size; 533 uint64_t gpu_addr; 534 void *cpu_ptr; 535 uint32_t domain; 536 uint32_t align; 537 }; 538 539 /* sub-allocation buffer */ 540 struct amdgpu_sa_bo { 541 struct list_head olist; 542 struct list_head flist; 543 struct amdgpu_sa_manager *manager; 544 unsigned soffset; 545 unsigned eoffset; 546 struct fence *fence; 547 }; 548 549 /* 550 * GEM objects. 551 */ 552 void amdgpu_gem_force_release(struct amdgpu_device *adev); 553 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, 554 int alignment, u32 initial_domain, 555 u64 flags, bool kernel, 556 struct drm_gem_object **obj); 557 558 int amdgpu_mode_dumb_create(struct drm_file *file_priv, 559 struct drm_device *dev, 560 struct drm_mode_create_dumb *args); 561 int amdgpu_mode_dumb_mmap(struct drm_file *filp, 562 struct drm_device *dev, 563 uint32_t handle, uint64_t *offset_p); 564 /* 565 * Synchronization 566 */ 567 struct amdgpu_sync { 568 DECLARE_HASHTABLE(fences, 4); 569 struct fence *last_vm_update; 570 }; 571 572 void amdgpu_sync_create(struct amdgpu_sync *sync); 573 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, 574 struct fence *f); 575 int amdgpu_sync_resv(struct amdgpu_device *adev, 576 struct amdgpu_sync *sync, 577 struct reservation_object *resv, 578 void *owner); 579 struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, 580 struct amdgpu_ring *ring); 581 struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); 582 void amdgpu_sync_free(struct amdgpu_sync *sync); 583 int amdgpu_sync_init(void); 584 void amdgpu_sync_fini(void); 585 int amdgpu_fence_slab_init(void); 586 void amdgpu_fence_slab_fini(void); 587 588 /* 589 * GART structures, functions & helpers 590 */ 591 struct amdgpu_mc; 592 593 #define AMDGPU_GPU_PAGE_SIZE 4096 594 #define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1) 595 #define AMDGPU_GPU_PAGE_SHIFT 12 596 #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK) 597 598 struct amdgpu_gart { 599 dma_addr_t table_addr; 600 struct amdgpu_bo *robj; 601 void *ptr; 602 unsigned num_gpu_pages; 603 unsigned num_cpu_pages; 604 unsigned table_size; 605 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS 606 struct page **pages; 607 #endif 608 bool ready; 609 const struct amdgpu_gart_funcs *gart_funcs; 610 }; 611 612 int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); 613 void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); 614 int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); 615 void amdgpu_gart_table_vram_free(struct amdgpu_device *adev); 616 int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); 617 void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); 618 int amdgpu_gart_init(struct amdgpu_device *adev); 619 void amdgpu_gart_fini(struct amdgpu_device *adev); 620 void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, 621 int pages); 622 int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, 623 int pages, struct page **pagelist, 624 dma_addr_t *dma_addr, uint32_t flags); 625 int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); 626 627 /* 628 * GPU MC structures, functions & helpers 629 */ 630 struct amdgpu_mc { 631 resource_size_t aper_size; 632 resource_size_t aper_base; 633 resource_size_t agp_base; 634 /* for some chips with <= 32MB we need to lie 635 * about vram size near mc fb location */ 636 u64 mc_vram_size; 637 u64 visible_vram_size; 638 u64 gtt_size; 639 u64 gtt_start; 640 u64 gtt_end; 641 u64 vram_start; 642 u64 vram_end; 643 unsigned vram_width; 644 u64 real_vram_size; 645 int vram_mtrr; 646 u64 gtt_base_align; 647 u64 mc_mask; 648 const struct firmware *fw; /* MC firmware */ 649 uint32_t fw_version; 650 struct amdgpu_irq_src vm_fault; 651 uint32_t vram_type; 652 uint32_t srbm_soft_reset; 653 struct amdgpu_mode_mc_save save; 654 }; 655 656 /* 657 * GPU doorbell structures, functions & helpers 658 */ 659 typedef enum _AMDGPU_DOORBELL_ASSIGNMENT 660 { 661 AMDGPU_DOORBELL_KIQ = 0x000, 662 AMDGPU_DOORBELL_HIQ = 0x001, 663 AMDGPU_DOORBELL_DIQ = 0x002, 664 AMDGPU_DOORBELL_MEC_RING0 = 0x010, 665 AMDGPU_DOORBELL_MEC_RING1 = 0x011, 666 AMDGPU_DOORBELL_MEC_RING2 = 0x012, 667 AMDGPU_DOORBELL_MEC_RING3 = 0x013, 668 AMDGPU_DOORBELL_MEC_RING4 = 0x014, 669 AMDGPU_DOORBELL_MEC_RING5 = 0x015, 670 AMDGPU_DOORBELL_MEC_RING6 = 0x016, 671 AMDGPU_DOORBELL_MEC_RING7 = 0x017, 672 AMDGPU_DOORBELL_GFX_RING0 = 0x020, 673 AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0, 674 AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1, 675 AMDGPU_DOORBELL_IH = 0x1E8, 676 AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF, 677 AMDGPU_DOORBELL_INVALID = 0xFFFF 678 } AMDGPU_DOORBELL_ASSIGNMENT; 679 680 struct amdgpu_doorbell { 681 /* doorbell mmio */ 682 resource_size_t base; 683 resource_size_t size; 684 u32 __iomem *ptr; 685 u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */ 686 }; 687 688 void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, 689 phys_addr_t *aperture_base, 690 size_t *aperture_size, 691 size_t *start_offset); 692 693 /* 694 * IRQS. 695 */ 696 697 struct amdgpu_flip_work { 698 struct delayed_work flip_work; 699 struct work_struct unpin_work; 700 struct amdgpu_device *adev; 701 int crtc_id; 702 u32 target_vblank; 703 uint64_t base; 704 struct drm_pending_vblank_event *event; 705 struct amdgpu_bo *old_abo; 706 struct fence *excl; 707 unsigned shared_count; 708 struct fence **shared; 709 struct fence_cb cb; 710 bool async; 711 }; 712 713 714 /* 715 * CP & rings. 716 */ 717 718 struct amdgpu_ib { 719 struct amdgpu_sa_bo *sa_bo; 720 uint32_t length_dw; 721 uint64_t gpu_addr; 722 uint32_t *ptr; 723 uint32_t flags; 724 }; 725 726 enum amdgpu_ring_type { 727 AMDGPU_RING_TYPE_GFX, 728 AMDGPU_RING_TYPE_COMPUTE, 729 AMDGPU_RING_TYPE_SDMA, 730 AMDGPU_RING_TYPE_UVD, 731 AMDGPU_RING_TYPE_VCE 732 }; 733 734 extern const struct amd_sched_backend_ops amdgpu_sched_ops; 735 736 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, 737 struct amdgpu_job **job, struct amdgpu_vm *vm); 738 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, 739 struct amdgpu_job **job); 740 741 void amdgpu_job_free_resources(struct amdgpu_job *job); 742 void amdgpu_job_free(struct amdgpu_job *job); 743 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, 744 struct amd_sched_entity *entity, void *owner, 745 struct fence **f); 746 747 struct amdgpu_ring { 748 struct amdgpu_device *adev; 749 const struct amdgpu_ring_funcs *funcs; 750 struct amdgpu_fence_driver fence_drv; 751 struct amd_gpu_scheduler sched; 752 753 struct amdgpu_bo *ring_obj; 754 volatile uint32_t *ring; 755 unsigned rptr_offs; 756 unsigned wptr; 757 unsigned wptr_old; 758 unsigned ring_size; 759 unsigned max_dw; 760 int count_dw; 761 uint64_t gpu_addr; 762 uint32_t align_mask; 763 uint32_t ptr_mask; 764 bool ready; 765 u32 nop; 766 u32 idx; 767 u32 me; 768 u32 pipe; 769 u32 queue; 770 struct amdgpu_bo *mqd_obj; 771 u32 doorbell_index; 772 bool use_doorbell; 773 unsigned wptr_offs; 774 unsigned fence_offs; 775 uint64_t current_ctx; 776 enum amdgpu_ring_type type; 777 char name[16]; 778 unsigned cond_exe_offs; 779 u64 cond_exe_gpu_addr; 780 volatile u32 *cond_exe_cpu_addr; 781 #if defined(CONFIG_DEBUG_FS) 782 struct dentry *ent; 783 #endif 784 }; 785 786 /* 787 * VM 788 */ 789 790 /* maximum number of VMIDs */ 791 #define AMDGPU_NUM_VM 16 792 793 /* Maximum number of PTEs the hardware can write with one command */ 794 #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF 795 796 /* number of entries in page table */ 797 #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) 798 799 /* PTBs (Page Table Blocks) need to be aligned to 32K */ 800 #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 801 802 /* LOG2 number of continuous pages for the fragment field */ 803 #define AMDGPU_LOG2_PAGES_PER_FRAG 4 804 805 #define AMDGPU_PTE_VALID (1 << 0) 806 #define AMDGPU_PTE_SYSTEM (1 << 1) 807 #define AMDGPU_PTE_SNOOPED (1 << 2) 808 809 /* VI only */ 810 #define AMDGPU_PTE_EXECUTABLE (1 << 4) 811 812 #define AMDGPU_PTE_READABLE (1 << 5) 813 #define AMDGPU_PTE_WRITEABLE (1 << 6) 814 815 #define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7) 816 817 /* How to programm VM fault handling */ 818 #define AMDGPU_VM_FAULT_STOP_NEVER 0 819 #define AMDGPU_VM_FAULT_STOP_FIRST 1 820 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 821 822 struct amdgpu_vm_pt { 823 struct amdgpu_bo_list_entry entry; 824 uint64_t addr; 825 uint64_t shadow_addr; 826 }; 827 828 struct amdgpu_vm { 829 /* tree of virtual addresses mapped */ 830 struct rb_root va; 831 832 /* protecting invalidated */ 833 spinlock_t status_lock; 834 835 /* BOs moved, but not yet updated in the PT */ 836 struct list_head invalidated; 837 838 /* BOs cleared in the PT because of a move */ 839 struct list_head cleared; 840 841 /* BO mappings freed, but not yet updated in the PT */ 842 struct list_head freed; 843 844 /* contains the page directory */ 845 struct amdgpu_bo *page_directory; 846 unsigned max_pde_used; 847 struct fence *page_directory_fence; 848 uint64_t last_eviction_counter; 849 850 /* array of page tables, one for each page directory entry */ 851 struct amdgpu_vm_pt *page_tables; 852 853 /* for id and flush management per ring */ 854 struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS]; 855 856 /* protecting freed */ 857 spinlock_t freed_lock; 858 859 /* Scheduler entity for page table updates */ 860 struct amd_sched_entity entity; 861 862 /* client id */ 863 u64 client_id; 864 }; 865 866 struct amdgpu_vm_id { 867 struct list_head list; 868 struct fence *first; 869 struct amdgpu_sync active; 870 struct fence *last_flush; 871 atomic64_t owner; 872 873 uint64_t pd_gpu_addr; 874 /* last flushed PD/PT update */ 875 struct fence *flushed_updates; 876 877 uint32_t current_gpu_reset_count; 878 879 uint32_t gds_base; 880 uint32_t gds_size; 881 uint32_t gws_base; 882 uint32_t gws_size; 883 uint32_t oa_base; 884 uint32_t oa_size; 885 }; 886 887 struct amdgpu_vm_manager { 888 /* Handling of VMIDs */ 889 struct mutex lock; 890 unsigned num_ids; 891 struct list_head ids_lru; 892 struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; 893 894 /* Handling of VM fences */ 895 u64 fence_context; 896 unsigned seqno[AMDGPU_MAX_RINGS]; 897 898 uint32_t max_pfn; 899 /* vram base address for page table entry */ 900 u64 vram_base_offset; 901 /* is vm enabled? */ 902 bool enabled; 903 /* vm pte handling */ 904 const struct amdgpu_vm_pte_funcs *vm_pte_funcs; 905 struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; 906 unsigned vm_pte_num_rings; 907 atomic_t vm_pte_next_ring; 908 /* client id counter */ 909 atomic64_t client_counter; 910 }; 911 912 void amdgpu_vm_manager_init(struct amdgpu_device *adev); 913 void amdgpu_vm_manager_fini(struct amdgpu_device *adev); 914 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); 915 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); 916 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, 917 struct list_head *validated, 918 struct amdgpu_bo_list_entry *entry); 919 void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, 920 struct list_head *duplicates); 921 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, 922 struct amdgpu_vm *vm); 923 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 924 struct amdgpu_sync *sync, struct fence *fence, 925 struct amdgpu_job *job); 926 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); 927 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); 928 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, 929 struct amdgpu_vm *vm); 930 int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 931 struct amdgpu_vm *vm); 932 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, 933 struct amdgpu_sync *sync); 934 int amdgpu_vm_bo_update(struct amdgpu_device *adev, 935 struct amdgpu_bo_va *bo_va, 936 bool clear); 937 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 938 struct amdgpu_bo *bo); 939 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 940 struct amdgpu_bo *bo); 941 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 942 struct amdgpu_vm *vm, 943 struct amdgpu_bo *bo); 944 int amdgpu_vm_bo_map(struct amdgpu_device *adev, 945 struct amdgpu_bo_va *bo_va, 946 uint64_t addr, uint64_t offset, 947 uint64_t size, uint32_t flags); 948 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 949 struct amdgpu_bo_va *bo_va, 950 uint64_t addr); 951 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 952 struct amdgpu_bo_va *bo_va); 953 954 /* 955 * context related structures 956 */ 957 958 struct amdgpu_ctx_ring { 959 uint64_t sequence; 960 struct fence **fences; 961 struct amd_sched_entity entity; 962 }; 963 964 struct amdgpu_ctx { 965 struct kref refcount; 966 struct amdgpu_device *adev; 967 unsigned reset_counter; 968 spinlock_t ring_lock; 969 struct fence **fences; 970 struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; 971 bool preamble_presented; 972 }; 973 974 struct amdgpu_ctx_mgr { 975 struct amdgpu_device *adev; 976 struct mutex lock; 977 /* protected by lock */ 978 struct idr ctx_handles; 979 }; 980 981 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); 982 int amdgpu_ctx_put(struct amdgpu_ctx *ctx); 983 984 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, 985 struct fence *fence); 986 struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, 987 struct amdgpu_ring *ring, uint64_t seq); 988 989 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, 990 struct drm_file *filp); 991 992 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); 993 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); 994 995 /* 996 * file private structure 997 */ 998 999 struct amdgpu_fpriv { 1000 struct amdgpu_vm vm; 1001 struct mutex bo_list_lock; 1002 struct idr bo_list_handles; 1003 struct amdgpu_ctx_mgr ctx_mgr; 1004 }; 1005 1006 /* 1007 * residency list 1008 */ 1009 1010 struct amdgpu_bo_list { 1011 struct mutex lock; 1012 struct amdgpu_bo *gds_obj; 1013 struct amdgpu_bo *gws_obj; 1014 struct amdgpu_bo *oa_obj; 1015 unsigned first_userptr; 1016 unsigned num_entries; 1017 struct amdgpu_bo_list_entry *array; 1018 }; 1019 1020 struct amdgpu_bo_list * 1021 amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id); 1022 void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, 1023 struct list_head *validated); 1024 void amdgpu_bo_list_put(struct amdgpu_bo_list *list); 1025 void amdgpu_bo_list_free(struct amdgpu_bo_list *list); 1026 1027 /* 1028 * GFX stuff 1029 */ 1030 #include "clearstate_defs.h" 1031 1032 struct amdgpu_rlc_funcs { 1033 void (*enter_safe_mode)(struct amdgpu_device *adev); 1034 void (*exit_safe_mode)(struct amdgpu_device *adev); 1035 }; 1036 1037 struct amdgpu_rlc { 1038 /* for power gating */ 1039 struct amdgpu_bo *save_restore_obj; 1040 uint64_t save_restore_gpu_addr; 1041 volatile uint32_t *sr_ptr; 1042 const u32 *reg_list; 1043 u32 reg_list_size; 1044 /* for clear state */ 1045 struct amdgpu_bo *clear_state_obj; 1046 uint64_t clear_state_gpu_addr; 1047 volatile uint32_t *cs_ptr; 1048 const struct cs_section_def *cs_data; 1049 u32 clear_state_size; 1050 /* for cp tables */ 1051 struct amdgpu_bo *cp_table_obj; 1052 uint64_t cp_table_gpu_addr; 1053 volatile uint32_t *cp_table_ptr; 1054 u32 cp_table_size; 1055 1056 /* safe mode for updating CG/PG state */ 1057 bool in_safe_mode; 1058 const struct amdgpu_rlc_funcs *funcs; 1059 1060 /* for firmware data */ 1061 u32 save_and_restore_offset; 1062 u32 clear_state_descriptor_offset; 1063 u32 avail_scratch_ram_locations; 1064 u32 reg_restore_list_size; 1065 u32 reg_list_format_start; 1066 u32 reg_list_format_separate_start; 1067 u32 starting_offsets_start; 1068 u32 reg_list_format_size_bytes; 1069 u32 reg_list_size_bytes; 1070 1071 u32 *register_list_format; 1072 u32 *register_restore; 1073 }; 1074 1075 struct amdgpu_mec { 1076 struct amdgpu_bo *hpd_eop_obj; 1077 u64 hpd_eop_gpu_addr; 1078 u32 num_pipe; 1079 u32 num_mec; 1080 u32 num_queue; 1081 }; 1082 1083 /* 1084 * GPU scratch registers structures, functions & helpers 1085 */ 1086 struct amdgpu_scratch { 1087 unsigned num_reg; 1088 uint32_t reg_base; 1089 bool free[32]; 1090 uint32_t reg[32]; 1091 }; 1092 1093 /* 1094 * GFX configurations 1095 */ 1096 struct amdgpu_gca_config { 1097 unsigned max_shader_engines; 1098 unsigned max_tile_pipes; 1099 unsigned max_cu_per_sh; 1100 unsigned max_sh_per_se; 1101 unsigned max_backends_per_se; 1102 unsigned max_texture_channel_caches; 1103 unsigned max_gprs; 1104 unsigned max_gs_threads; 1105 unsigned max_hw_contexts; 1106 unsigned sc_prim_fifo_size_frontend; 1107 unsigned sc_prim_fifo_size_backend; 1108 unsigned sc_hiz_tile_fifo_size; 1109 unsigned sc_earlyz_tile_fifo_size; 1110 1111 unsigned num_tile_pipes; 1112 unsigned backend_enable_mask; 1113 unsigned mem_max_burst_length_bytes; 1114 unsigned mem_row_size_in_kb; 1115 unsigned shader_engine_tile_size; 1116 unsigned num_gpus; 1117 unsigned multi_gpu_tile_size; 1118 unsigned mc_arb_ramcfg; 1119 unsigned gb_addr_config; 1120 unsigned num_rbs; 1121 1122 uint32_t tile_mode_array[32]; 1123 uint32_t macrotile_mode_array[16]; 1124 }; 1125 1126 struct amdgpu_cu_info { 1127 uint32_t number; /* total active CU number */ 1128 uint32_t ao_cu_mask; 1129 uint32_t bitmap[4][4]; 1130 }; 1131 1132 struct amdgpu_gfx_funcs { 1133 /* get the gpu clock counter */ 1134 uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); 1135 void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); 1136 }; 1137 1138 struct amdgpu_gfx { 1139 struct mutex gpu_clock_mutex; 1140 struct amdgpu_gca_config config; 1141 struct amdgpu_rlc rlc; 1142 struct amdgpu_mec mec; 1143 struct amdgpu_scratch scratch; 1144 const struct firmware *me_fw; /* ME firmware */ 1145 uint32_t me_fw_version; 1146 const struct firmware *pfp_fw; /* PFP firmware */ 1147 uint32_t pfp_fw_version; 1148 const struct firmware *ce_fw; /* CE firmware */ 1149 uint32_t ce_fw_version; 1150 const struct firmware *rlc_fw; /* RLC firmware */ 1151 uint32_t rlc_fw_version; 1152 const struct firmware *mec_fw; /* MEC firmware */ 1153 uint32_t mec_fw_version; 1154 const struct firmware *mec2_fw; /* MEC2 firmware */ 1155 uint32_t mec2_fw_version; 1156 uint32_t me_feature_version; 1157 uint32_t ce_feature_version; 1158 uint32_t pfp_feature_version; 1159 uint32_t rlc_feature_version; 1160 uint32_t mec_feature_version; 1161 uint32_t mec2_feature_version; 1162 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; 1163 unsigned num_gfx_rings; 1164 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; 1165 unsigned num_compute_rings; 1166 struct amdgpu_irq_src eop_irq; 1167 struct amdgpu_irq_src priv_reg_irq; 1168 struct amdgpu_irq_src priv_inst_irq; 1169 /* gfx status */ 1170 uint32_t gfx_current_status; 1171 /* ce ram size*/ 1172 unsigned ce_ram_size; 1173 struct amdgpu_cu_info cu_info; 1174 const struct amdgpu_gfx_funcs *funcs; 1175 1176 /* reset mask */ 1177 uint32_t grbm_soft_reset; 1178 uint32_t srbm_soft_reset; 1179 }; 1180 1181 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, 1182 unsigned size, struct amdgpu_ib *ib); 1183 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, 1184 struct fence *f); 1185 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, 1186 struct amdgpu_ib *ib, struct fence *last_vm_update, 1187 struct amdgpu_job *job, struct fence **f); 1188 int amdgpu_ib_pool_init(struct amdgpu_device *adev); 1189 void amdgpu_ib_pool_fini(struct amdgpu_device *adev); 1190 int amdgpu_ib_ring_tests(struct amdgpu_device *adev); 1191 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); 1192 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); 1193 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); 1194 void amdgpu_ring_commit(struct amdgpu_ring *ring); 1195 void amdgpu_ring_undo(struct amdgpu_ring *ring); 1196 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, 1197 unsigned ring_size, u32 nop, u32 align_mask, 1198 struct amdgpu_irq_src *irq_src, unsigned irq_type, 1199 enum amdgpu_ring_type ring_type); 1200 void amdgpu_ring_fini(struct amdgpu_ring *ring); 1201 1202 /* 1203 * CS. 1204 */ 1205 struct amdgpu_cs_chunk { 1206 uint32_t chunk_id; 1207 uint32_t length_dw; 1208 void *kdata; 1209 }; 1210 1211 struct amdgpu_cs_parser { 1212 struct amdgpu_device *adev; 1213 struct drm_file *filp; 1214 struct amdgpu_ctx *ctx; 1215 1216 /* chunks */ 1217 unsigned nchunks; 1218 struct amdgpu_cs_chunk *chunks; 1219 1220 /* scheduler job object */ 1221 struct amdgpu_job *job; 1222 1223 /* buffer objects */ 1224 struct ww_acquire_ctx ticket; 1225 struct amdgpu_bo_list *bo_list; 1226 struct amdgpu_bo_list_entry vm_pd; 1227 struct list_head validated; 1228 struct fence *fence; 1229 uint64_t bytes_moved_threshold; 1230 uint64_t bytes_moved; 1231 struct amdgpu_bo_list_entry *evictable; 1232 1233 /* user fence */ 1234 struct amdgpu_bo_list_entry uf_entry; 1235 }; 1236 1237 #define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */ 1238 #define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */ 1239 #define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */ 1240 1241 struct amdgpu_job { 1242 struct amd_sched_job base; 1243 struct amdgpu_device *adev; 1244 struct amdgpu_vm *vm; 1245 struct amdgpu_ring *ring; 1246 struct amdgpu_sync sync; 1247 struct amdgpu_ib *ibs; 1248 struct fence *fence; /* the hw fence */ 1249 uint32_t preamble_status; 1250 uint32_t num_ibs; 1251 void *owner; 1252 uint64_t fence_ctx; /* the fence_context this job uses */ 1253 bool vm_needs_flush; 1254 unsigned vm_id; 1255 uint64_t vm_pd_addr; 1256 uint32_t gds_base, gds_size; 1257 uint32_t gws_base, gws_size; 1258 uint32_t oa_base, oa_size; 1259 1260 /* user fence handling */ 1261 uint64_t uf_addr; 1262 uint64_t uf_sequence; 1263 1264 }; 1265 #define to_amdgpu_job(sched_job) \ 1266 container_of((sched_job), struct amdgpu_job, base) 1267 1268 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, 1269 uint32_t ib_idx, int idx) 1270 { 1271 return p->job->ibs[ib_idx].ptr[idx]; 1272 } 1273 1274 static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p, 1275 uint32_t ib_idx, int idx, 1276 uint32_t value) 1277 { 1278 p->job->ibs[ib_idx].ptr[idx] = value; 1279 } 1280 1281 /* 1282 * Writeback 1283 */ 1284 #define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */ 1285 1286 struct amdgpu_wb { 1287 struct amdgpu_bo *wb_obj; 1288 volatile uint32_t *wb; 1289 uint64_t gpu_addr; 1290 u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */ 1291 unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)]; 1292 }; 1293 1294 int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); 1295 void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); 1296 1297 1298 1299 enum amdgpu_int_thermal_type { 1300 THERMAL_TYPE_NONE, 1301 THERMAL_TYPE_EXTERNAL, 1302 THERMAL_TYPE_EXTERNAL_GPIO, 1303 THERMAL_TYPE_RV6XX, 1304 THERMAL_TYPE_RV770, 1305 THERMAL_TYPE_ADT7473_WITH_INTERNAL, 1306 THERMAL_TYPE_EVERGREEN, 1307 THERMAL_TYPE_SUMO, 1308 THERMAL_TYPE_NI, 1309 THERMAL_TYPE_SI, 1310 THERMAL_TYPE_EMC2103_WITH_INTERNAL, 1311 THERMAL_TYPE_CI, 1312 THERMAL_TYPE_KV, 1313 }; 1314 1315 enum amdgpu_dpm_auto_throttle_src { 1316 AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, 1317 AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL 1318 }; 1319 1320 enum amdgpu_dpm_event_src { 1321 AMDGPU_DPM_EVENT_SRC_ANALOG = 0, 1322 AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1, 1323 AMDGPU_DPM_EVENT_SRC_DIGITAL = 2, 1324 AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, 1325 AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 1326 }; 1327 1328 #define AMDGPU_MAX_VCE_LEVELS 6 1329 1330 enum amdgpu_vce_level { 1331 AMDGPU_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */ 1332 AMDGPU_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */ 1333 AMDGPU_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */ 1334 AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */ 1335 AMDGPU_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */ 1336 AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */ 1337 }; 1338 1339 struct amdgpu_ps { 1340 u32 caps; /* vbios flags */ 1341 u32 class; /* vbios flags */ 1342 u32 class2; /* vbios flags */ 1343 /* UVD clocks */ 1344 u32 vclk; 1345 u32 dclk; 1346 /* VCE clocks */ 1347 u32 evclk; 1348 u32 ecclk; 1349 bool vce_active; 1350 enum amdgpu_vce_level vce_level; 1351 /* asic priv */ 1352 void *ps_priv; 1353 }; 1354 1355 struct amdgpu_dpm_thermal { 1356 /* thermal interrupt work */ 1357 struct work_struct work; 1358 /* low temperature threshold */ 1359 int min_temp; 1360 /* high temperature threshold */ 1361 int max_temp; 1362 /* was last interrupt low to high or high to low */ 1363 bool high_to_low; 1364 /* interrupt source */ 1365 struct amdgpu_irq_src irq; 1366 }; 1367 1368 enum amdgpu_clk_action 1369 { 1370 AMDGPU_SCLK_UP = 1, 1371 AMDGPU_SCLK_DOWN 1372 }; 1373 1374 struct amdgpu_blacklist_clocks 1375 { 1376 u32 sclk; 1377 u32 mclk; 1378 enum amdgpu_clk_action action; 1379 }; 1380 1381 struct amdgpu_clock_and_voltage_limits { 1382 u32 sclk; 1383 u32 mclk; 1384 u16 vddc; 1385 u16 vddci; 1386 }; 1387 1388 struct amdgpu_clock_array { 1389 u32 count; 1390 u32 *values; 1391 }; 1392 1393 struct amdgpu_clock_voltage_dependency_entry { 1394 u32 clk; 1395 u16 v; 1396 }; 1397 1398 struct amdgpu_clock_voltage_dependency_table { 1399 u32 count; 1400 struct amdgpu_clock_voltage_dependency_entry *entries; 1401 }; 1402 1403 union amdgpu_cac_leakage_entry { 1404 struct { 1405 u16 vddc; 1406 u32 leakage; 1407 }; 1408 struct { 1409 u16 vddc1; 1410 u16 vddc2; 1411 u16 vddc3; 1412 }; 1413 }; 1414 1415 struct amdgpu_cac_leakage_table { 1416 u32 count; 1417 union amdgpu_cac_leakage_entry *entries; 1418 }; 1419 1420 struct amdgpu_phase_shedding_limits_entry { 1421 u16 voltage; 1422 u32 sclk; 1423 u32 mclk; 1424 }; 1425 1426 struct amdgpu_phase_shedding_limits_table { 1427 u32 count; 1428 struct amdgpu_phase_shedding_limits_entry *entries; 1429 }; 1430 1431 struct amdgpu_uvd_clock_voltage_dependency_entry { 1432 u32 vclk; 1433 u32 dclk; 1434 u16 v; 1435 }; 1436 1437 struct amdgpu_uvd_clock_voltage_dependency_table { 1438 u8 count; 1439 struct amdgpu_uvd_clock_voltage_dependency_entry *entries; 1440 }; 1441 1442 struct amdgpu_vce_clock_voltage_dependency_entry { 1443 u32 ecclk; 1444 u32 evclk; 1445 u16 v; 1446 }; 1447 1448 struct amdgpu_vce_clock_voltage_dependency_table { 1449 u8 count; 1450 struct amdgpu_vce_clock_voltage_dependency_entry *entries; 1451 }; 1452 1453 struct amdgpu_ppm_table { 1454 u8 ppm_design; 1455 u16 cpu_core_number; 1456 u32 platform_tdp; 1457 u32 small_ac_platform_tdp; 1458 u32 platform_tdc; 1459 u32 small_ac_platform_tdc; 1460 u32 apu_tdp; 1461 u32 dgpu_tdp; 1462 u32 dgpu_ulv_power; 1463 u32 tj_max; 1464 }; 1465 1466 struct amdgpu_cac_tdp_table { 1467 u16 tdp; 1468 u16 configurable_tdp; 1469 u16 tdc; 1470 u16 battery_power_limit; 1471 u16 small_power_limit; 1472 u16 low_cac_leakage; 1473 u16 high_cac_leakage; 1474 u16 maximum_power_delivery_limit; 1475 }; 1476 1477 struct amdgpu_dpm_dynamic_state { 1478 struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk; 1479 struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk; 1480 struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk; 1481 struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk; 1482 struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk; 1483 struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table; 1484 struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table; 1485 struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table; 1486 struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table; 1487 struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk; 1488 struct amdgpu_clock_array valid_sclk_values; 1489 struct amdgpu_clock_array valid_mclk_values; 1490 struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc; 1491 struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac; 1492 u32 mclk_sclk_ratio; 1493 u32 sclk_mclk_delta; 1494 u16 vddc_vddci_delta; 1495 u16 min_vddc_for_pcie_gen2; 1496 struct amdgpu_cac_leakage_table cac_leakage_table; 1497 struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table; 1498 struct amdgpu_ppm_table *ppm_table; 1499 struct amdgpu_cac_tdp_table *cac_tdp_table; 1500 }; 1501 1502 struct amdgpu_dpm_fan { 1503 u16 t_min; 1504 u16 t_med; 1505 u16 t_high; 1506 u16 pwm_min; 1507 u16 pwm_med; 1508 u16 pwm_high; 1509 u8 t_hyst; 1510 u32 cycle_delay; 1511 u16 t_max; 1512 u8 control_mode; 1513 u16 default_max_fan_pwm; 1514 u16 default_fan_output_sensitivity; 1515 u16 fan_output_sensitivity; 1516 bool ucode_fan_control; 1517 }; 1518 1519 enum amdgpu_pcie_gen { 1520 AMDGPU_PCIE_GEN1 = 0, 1521 AMDGPU_PCIE_GEN2 = 1, 1522 AMDGPU_PCIE_GEN3 = 2, 1523 AMDGPU_PCIE_GEN_INVALID = 0xffff 1524 }; 1525 1526 enum amdgpu_dpm_forced_level { 1527 AMDGPU_DPM_FORCED_LEVEL_AUTO = 0, 1528 AMDGPU_DPM_FORCED_LEVEL_LOW = 1, 1529 AMDGPU_DPM_FORCED_LEVEL_HIGH = 2, 1530 AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3, 1531 }; 1532 1533 struct amdgpu_vce_state { 1534 /* vce clocks */ 1535 u32 evclk; 1536 u32 ecclk; 1537 /* gpu clocks */ 1538 u32 sclk; 1539 u32 mclk; 1540 u8 clk_idx; 1541 u8 pstate; 1542 }; 1543 1544 struct amdgpu_dpm_funcs { 1545 int (*get_temperature)(struct amdgpu_device *adev); 1546 int (*pre_set_power_state)(struct amdgpu_device *adev); 1547 int (*set_power_state)(struct amdgpu_device *adev); 1548 void (*post_set_power_state)(struct amdgpu_device *adev); 1549 void (*display_configuration_changed)(struct amdgpu_device *adev); 1550 u32 (*get_sclk)(struct amdgpu_device *adev, bool low); 1551 u32 (*get_mclk)(struct amdgpu_device *adev, bool low); 1552 void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps); 1553 void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m); 1554 int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level); 1555 bool (*vblank_too_short)(struct amdgpu_device *adev); 1556 void (*powergate_uvd)(struct amdgpu_device *adev, bool gate); 1557 void (*powergate_vce)(struct amdgpu_device *adev, bool gate); 1558 void (*enable_bapm)(struct amdgpu_device *adev, bool enable); 1559 void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode); 1560 u32 (*get_fan_control_mode)(struct amdgpu_device *adev); 1561 int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed); 1562 int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed); 1563 int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask); 1564 int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf); 1565 int (*get_sclk_od)(struct amdgpu_device *adev); 1566 int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value); 1567 int (*get_mclk_od)(struct amdgpu_device *adev); 1568 int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value); 1569 }; 1570 1571 struct amdgpu_dpm { 1572 struct amdgpu_ps *ps; 1573 /* number of valid power states */ 1574 int num_ps; 1575 /* current power state that is active */ 1576 struct amdgpu_ps *current_ps; 1577 /* requested power state */ 1578 struct amdgpu_ps *requested_ps; 1579 /* boot up power state */ 1580 struct amdgpu_ps *boot_ps; 1581 /* default uvd power state */ 1582 struct amdgpu_ps *uvd_ps; 1583 /* vce requirements */ 1584 struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS]; 1585 enum amdgpu_vce_level vce_level; 1586 enum amd_pm_state_type state; 1587 enum amd_pm_state_type user_state; 1588 u32 platform_caps; 1589 u32 voltage_response_time; 1590 u32 backbias_response_time; 1591 void *priv; 1592 u32 new_active_crtcs; 1593 int new_active_crtc_count; 1594 u32 current_active_crtcs; 1595 int current_active_crtc_count; 1596 struct amdgpu_dpm_dynamic_state dyn_state; 1597 struct amdgpu_dpm_fan fan; 1598 u32 tdp_limit; 1599 u32 near_tdp_limit; 1600 u32 near_tdp_limit_adjusted; 1601 u32 sq_ramping_threshold; 1602 u32 cac_leakage; 1603 u16 tdp_od_limit; 1604 u32 tdp_adjustment; 1605 u16 load_line_slope; 1606 bool power_control; 1607 bool ac_power; 1608 /* special states active */ 1609 bool thermal_active; 1610 bool uvd_active; 1611 bool vce_active; 1612 /* thermal handling */ 1613 struct amdgpu_dpm_thermal thermal; 1614 /* forced levels */ 1615 enum amdgpu_dpm_forced_level forced_level; 1616 }; 1617 1618 struct amdgpu_pm { 1619 struct mutex mutex; 1620 u32 current_sclk; 1621 u32 current_mclk; 1622 u32 default_sclk; 1623 u32 default_mclk; 1624 struct amdgpu_i2c_chan *i2c_bus; 1625 /* internal thermal controller on rv6xx+ */ 1626 enum amdgpu_int_thermal_type int_thermal_type; 1627 struct device *int_hwmon_dev; 1628 /* fan control parameters */ 1629 bool no_fan; 1630 u8 fan_pulses_per_revolution; 1631 u8 fan_min_rpm; 1632 u8 fan_max_rpm; 1633 /* dpm */ 1634 bool dpm_enabled; 1635 bool sysfs_initialized; 1636 struct amdgpu_dpm dpm; 1637 const struct firmware *fw; /* SMC firmware */ 1638 uint32_t fw_version; 1639 const struct amdgpu_dpm_funcs *funcs; 1640 uint32_t pcie_gen_mask; 1641 uint32_t pcie_mlw_mask; 1642 struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */ 1643 }; 1644 1645 void amdgpu_get_pcie_info(struct amdgpu_device *adev); 1646 1647 /* 1648 * UVD 1649 */ 1650 #define AMDGPU_DEFAULT_UVD_HANDLES 10 1651 #define AMDGPU_MAX_UVD_HANDLES 40 1652 #define AMDGPU_UVD_STACK_SIZE (200*1024) 1653 #define AMDGPU_UVD_HEAP_SIZE (256*1024) 1654 #define AMDGPU_UVD_SESSION_SIZE (50*1024) 1655 #define AMDGPU_UVD_FIRMWARE_OFFSET 256 1656 1657 struct amdgpu_uvd { 1658 struct amdgpu_bo *vcpu_bo; 1659 void *cpu_addr; 1660 uint64_t gpu_addr; 1661 unsigned fw_version; 1662 void *saved_bo; 1663 unsigned max_handles; 1664 atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; 1665 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; 1666 struct delayed_work idle_work; 1667 const struct firmware *fw; /* UVD firmware */ 1668 struct amdgpu_ring ring; 1669 struct amdgpu_irq_src irq; 1670 bool address_64_bit; 1671 bool use_ctx_buf; 1672 struct amd_sched_entity entity; 1673 uint32_t srbm_soft_reset; 1674 }; 1675 1676 /* 1677 * VCE 1678 */ 1679 #define AMDGPU_MAX_VCE_HANDLES 16 1680 #define AMDGPU_VCE_FIRMWARE_OFFSET 256 1681 1682 #define AMDGPU_VCE_HARVEST_VCE0 (1 << 0) 1683 #define AMDGPU_VCE_HARVEST_VCE1 (1 << 1) 1684 1685 struct amdgpu_vce { 1686 struct amdgpu_bo *vcpu_bo; 1687 uint64_t gpu_addr; 1688 unsigned fw_version; 1689 unsigned fb_version; 1690 atomic_t handles[AMDGPU_MAX_VCE_HANDLES]; 1691 struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES]; 1692 uint32_t img_size[AMDGPU_MAX_VCE_HANDLES]; 1693 struct delayed_work idle_work; 1694 struct mutex idle_mutex; 1695 const struct firmware *fw; /* VCE firmware */ 1696 struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; 1697 struct amdgpu_irq_src irq; 1698 unsigned harvest_config; 1699 struct amd_sched_entity entity; 1700 uint32_t srbm_soft_reset; 1701 unsigned num_rings; 1702 }; 1703 1704 /* 1705 * SDMA 1706 */ 1707 struct amdgpu_sdma_instance { 1708 /* SDMA firmware */ 1709 const struct firmware *fw; 1710 uint32_t fw_version; 1711 uint32_t feature_version; 1712 1713 struct amdgpu_ring ring; 1714 bool burst_nop; 1715 }; 1716 1717 struct amdgpu_sdma { 1718 struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; 1719 #ifdef CONFIG_DRM_AMDGPU_SI 1720 //SI DMA has a difference trap irq number for the second engine 1721 struct amdgpu_irq_src trap_irq_1; 1722 #endif 1723 struct amdgpu_irq_src trap_irq; 1724 struct amdgpu_irq_src illegal_inst_irq; 1725 int num_instances; 1726 uint32_t srbm_soft_reset; 1727 }; 1728 1729 /* 1730 * Firmware 1731 */ 1732 struct amdgpu_firmware { 1733 struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM]; 1734 bool smu_load; 1735 struct amdgpu_bo *fw_buf; 1736 unsigned int fw_size; 1737 }; 1738 1739 /* 1740 * Benchmarking 1741 */ 1742 void amdgpu_benchmark(struct amdgpu_device *adev, int test_number); 1743 1744 1745 /* 1746 * Testing 1747 */ 1748 void amdgpu_test_moves(struct amdgpu_device *adev); 1749 void amdgpu_test_ring_sync(struct amdgpu_device *adev, 1750 struct amdgpu_ring *cpA, 1751 struct amdgpu_ring *cpB); 1752 void amdgpu_test_syncing(struct amdgpu_device *adev); 1753 1754 /* 1755 * MMU Notifier 1756 */ 1757 #if defined(CONFIG_MMU_NOTIFIER) 1758 int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); 1759 void amdgpu_mn_unregister(struct amdgpu_bo *bo); 1760 #else 1761 static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) 1762 { 1763 return -ENODEV; 1764 } 1765 static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {} 1766 #endif 1767 1768 /* 1769 * Debugfs 1770 */ 1771 struct amdgpu_debugfs { 1772 const struct drm_info_list *files; 1773 unsigned num_files; 1774 }; 1775 1776 int amdgpu_debugfs_add_files(struct amdgpu_device *adev, 1777 const struct drm_info_list *files, 1778 unsigned nfiles); 1779 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev); 1780 1781 #if defined(CONFIG_DEBUG_FS) 1782 int amdgpu_debugfs_init(struct drm_minor *minor); 1783 void amdgpu_debugfs_cleanup(struct drm_minor *minor); 1784 #endif 1785 1786 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); 1787 1788 /* 1789 * amdgpu smumgr functions 1790 */ 1791 struct amdgpu_smumgr_funcs { 1792 int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype); 1793 int (*request_smu_load_fw)(struct amdgpu_device *adev); 1794 int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype); 1795 }; 1796 1797 /* 1798 * amdgpu smumgr 1799 */ 1800 struct amdgpu_smumgr { 1801 struct amdgpu_bo *toc_buf; 1802 struct amdgpu_bo *smu_buf; 1803 /* asic priv smu data */ 1804 void *priv; 1805 spinlock_t smu_lock; 1806 /* smumgr functions */ 1807 const struct amdgpu_smumgr_funcs *smumgr_funcs; 1808 /* ucode loading complete flag */ 1809 uint32_t fw_flags; 1810 }; 1811 1812 /* 1813 * ASIC specific register table accessible by UMD 1814 */ 1815 struct amdgpu_allowed_register_entry { 1816 uint32_t reg_offset; 1817 bool untouched; 1818 bool grbm_indexed; 1819 }; 1820 1821 /* 1822 * ASIC specific functions. 1823 */ 1824 struct amdgpu_asic_funcs { 1825 bool (*read_disabled_bios)(struct amdgpu_device *adev); 1826 bool (*read_bios_from_rom)(struct amdgpu_device *adev, 1827 u8 *bios, u32 length_bytes); 1828 void (*detect_hw_virtualization) (struct amdgpu_device *adev); 1829 int (*read_register)(struct amdgpu_device *adev, u32 se_num, 1830 u32 sh_num, u32 reg_offset, u32 *value); 1831 void (*set_vga_state)(struct amdgpu_device *adev, bool state); 1832 int (*reset)(struct amdgpu_device *adev); 1833 /* get the reference clock */ 1834 u32 (*get_xclk)(struct amdgpu_device *adev); 1835 /* MM block clocks */ 1836 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); 1837 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); 1838 /* static power management */ 1839 int (*get_pcie_lanes)(struct amdgpu_device *adev); 1840 void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes); 1841 }; 1842 1843 /* 1844 * IOCTL. 1845 */ 1846 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, 1847 struct drm_file *filp); 1848 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, 1849 struct drm_file *filp); 1850 1851 int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data, 1852 struct drm_file *filp); 1853 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, 1854 struct drm_file *filp); 1855 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, 1856 struct drm_file *filp); 1857 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 1858 struct drm_file *filp); 1859 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, 1860 struct drm_file *filp); 1861 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, 1862 struct drm_file *filp); 1863 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 1864 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 1865 1866 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, 1867 struct drm_file *filp); 1868 1869 /* VRAM scratch page for HDP bug, default vram page */ 1870 struct amdgpu_vram_scratch { 1871 struct amdgpu_bo *robj; 1872 volatile uint32_t *ptr; 1873 u64 gpu_addr; 1874 }; 1875 1876 /* 1877 * ACPI 1878 */ 1879 struct amdgpu_atif_notification_cfg { 1880 bool enabled; 1881 int command_code; 1882 }; 1883 1884 struct amdgpu_atif_notifications { 1885 bool display_switch; 1886 bool expansion_mode_change; 1887 bool thermal_state; 1888 bool forced_power_state; 1889 bool system_power_state; 1890 bool display_conf_change; 1891 bool px_gfx_switch; 1892 bool brightness_change; 1893 bool dgpu_display_event; 1894 }; 1895 1896 struct amdgpu_atif_functions { 1897 bool system_params; 1898 bool sbios_requests; 1899 bool select_active_disp; 1900 bool lid_state; 1901 bool get_tv_standard; 1902 bool set_tv_standard; 1903 bool get_panel_expansion_mode; 1904 bool set_panel_expansion_mode; 1905 bool temperature_change; 1906 bool graphics_device_types; 1907 }; 1908 1909 struct amdgpu_atif { 1910 struct amdgpu_atif_notifications notifications; 1911 struct amdgpu_atif_functions functions; 1912 struct amdgpu_atif_notification_cfg notification_cfg; 1913 struct amdgpu_encoder *encoder_for_bl; 1914 }; 1915 1916 struct amdgpu_atcs_functions { 1917 bool get_ext_state; 1918 bool pcie_perf_req; 1919 bool pcie_dev_rdy; 1920 bool pcie_bus_width; 1921 }; 1922 1923 struct amdgpu_atcs { 1924 struct amdgpu_atcs_functions functions; 1925 }; 1926 1927 /* 1928 * CGS 1929 */ 1930 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); 1931 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); 1932 1933 /* 1934 * Core structure, functions and helpers. 1935 */ 1936 typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); 1937 typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 1938 1939 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 1940 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); 1941 1942 struct amdgpu_ip_block_status { 1943 bool valid; 1944 bool sw; 1945 bool hw; 1946 bool late_initialized; 1947 bool hang; 1948 }; 1949 1950 struct amdgpu_device { 1951 struct device *dev; 1952 struct drm_device *ddev; 1953 struct pci_dev *pdev; 1954 1955 #ifdef CONFIG_DRM_AMD_ACP 1956 struct amdgpu_acp acp; 1957 #endif 1958 1959 /* ASIC */ 1960 enum amd_asic_type asic_type; 1961 uint32_t family; 1962 uint32_t rev_id; 1963 uint32_t external_rev_id; 1964 unsigned long flags; 1965 int usec_timeout; 1966 const struct amdgpu_asic_funcs *asic_funcs; 1967 bool shutdown; 1968 bool need_dma32; 1969 bool accel_working; 1970 struct work_struct reset_work; 1971 struct notifier_block acpi_nb; 1972 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; 1973 struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; 1974 unsigned debugfs_count; 1975 #if defined(CONFIG_DEBUG_FS) 1976 struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; 1977 #endif 1978 struct amdgpu_atif atif; 1979 struct amdgpu_atcs atcs; 1980 struct mutex srbm_mutex; 1981 /* GRBM index mutex. Protects concurrent access to GRBM index */ 1982 struct mutex grbm_idx_mutex; 1983 struct dev_pm_domain vga_pm_domain; 1984 bool have_disp_power_ref; 1985 1986 /* BIOS */ 1987 uint8_t *bios; 1988 bool is_atom_bios; 1989 struct amdgpu_bo *stollen_vga_memory; 1990 uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; 1991 1992 /* Register/doorbell mmio */ 1993 resource_size_t rmmio_base; 1994 resource_size_t rmmio_size; 1995 void __iomem *rmmio; 1996 /* protects concurrent MM_INDEX/DATA based register access */ 1997 spinlock_t mmio_idx_lock; 1998 /* protects concurrent SMC based register access */ 1999 spinlock_t smc_idx_lock; 2000 amdgpu_rreg_t smc_rreg; 2001 amdgpu_wreg_t smc_wreg; 2002 /* protects concurrent PCIE register access */ 2003 spinlock_t pcie_idx_lock; 2004 amdgpu_rreg_t pcie_rreg; 2005 amdgpu_wreg_t pcie_wreg; 2006 amdgpu_rreg_t pciep_rreg; 2007 amdgpu_wreg_t pciep_wreg; 2008 /* protects concurrent UVD register access */ 2009 spinlock_t uvd_ctx_idx_lock; 2010 amdgpu_rreg_t uvd_ctx_rreg; 2011 amdgpu_wreg_t uvd_ctx_wreg; 2012 /* protects concurrent DIDT register access */ 2013 spinlock_t didt_idx_lock; 2014 amdgpu_rreg_t didt_rreg; 2015 amdgpu_wreg_t didt_wreg; 2016 /* protects concurrent gc_cac register access */ 2017 spinlock_t gc_cac_idx_lock; 2018 amdgpu_rreg_t gc_cac_rreg; 2019 amdgpu_wreg_t gc_cac_wreg; 2020 /* protects concurrent ENDPOINT (audio) register access */ 2021 spinlock_t audio_endpt_idx_lock; 2022 amdgpu_block_rreg_t audio_endpt_rreg; 2023 amdgpu_block_wreg_t audio_endpt_wreg; 2024 void __iomem *rio_mem; 2025 resource_size_t rio_mem_size; 2026 struct amdgpu_doorbell doorbell; 2027 2028 /* clock/pll info */ 2029 struct amdgpu_clock clock; 2030 2031 /* MC */ 2032 struct amdgpu_mc mc; 2033 struct amdgpu_gart gart; 2034 struct amdgpu_dummy_page dummy_page; 2035 struct amdgpu_vm_manager vm_manager; 2036 2037 /* memory management */ 2038 struct amdgpu_mman mman; 2039 struct amdgpu_vram_scratch vram_scratch; 2040 struct amdgpu_wb wb; 2041 atomic64_t vram_usage; 2042 atomic64_t vram_vis_usage; 2043 atomic64_t gtt_usage; 2044 atomic64_t num_bytes_moved; 2045 atomic64_t num_evictions; 2046 atomic_t gpu_reset_counter; 2047 2048 /* data for buffer migration throttling */ 2049 struct { 2050 spinlock_t lock; 2051 s64 last_update_us; 2052 s64 accum_us; /* accumulated microseconds */ 2053 u32 log2_max_MBps; 2054 } mm_stats; 2055 2056 /* display */ 2057 bool enable_virtual_display; 2058 struct amdgpu_mode_info mode_info; 2059 struct work_struct hotplug_work; 2060 struct amdgpu_irq_src crtc_irq; 2061 struct amdgpu_irq_src pageflip_irq; 2062 struct amdgpu_irq_src hpd_irq; 2063 2064 /* rings */ 2065 u64 fence_context; 2066 unsigned num_rings; 2067 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; 2068 bool ib_pool_ready; 2069 struct amdgpu_sa_manager ring_tmp_bo; 2070 2071 /* interrupts */ 2072 struct amdgpu_irq irq; 2073 2074 /* powerplay */ 2075 struct amd_powerplay powerplay; 2076 bool pp_enabled; 2077 bool pp_force_state_enabled; 2078 2079 /* dpm */ 2080 struct amdgpu_pm pm; 2081 u32 cg_flags; 2082 u32 pg_flags; 2083 2084 /* amdgpu smumgr */ 2085 struct amdgpu_smumgr smu; 2086 2087 /* gfx */ 2088 struct amdgpu_gfx gfx; 2089 2090 /* sdma */ 2091 struct amdgpu_sdma sdma; 2092 2093 /* uvd */ 2094 struct amdgpu_uvd uvd; 2095 2096 /* vce */ 2097 struct amdgpu_vce vce; 2098 2099 /* firmwares */ 2100 struct amdgpu_firmware firmware; 2101 2102 /* GDS */ 2103 struct amdgpu_gds gds; 2104 2105 const struct amdgpu_ip_block_version *ip_blocks; 2106 int num_ip_blocks; 2107 struct amdgpu_ip_block_status *ip_block_status; 2108 struct mutex mn_lock; 2109 DECLARE_HASHTABLE(mn_hash, 7); 2110 2111 /* tracking pinned memory */ 2112 u64 vram_pin_size; 2113 u64 invisible_pin_size; 2114 u64 gart_pin_size; 2115 2116 /* amdkfd interface */ 2117 struct kfd_dev *kfd; 2118 2119 struct amdgpu_virtualization virtualization; 2120 2121 /* link all shadow bo */ 2122 struct list_head shadow_list; 2123 struct mutex shadow_list_lock; 2124 /* link all gtt */ 2125 spinlock_t gtt_list_lock; 2126 struct list_head gtt_list; 2127 2128 }; 2129 2130 bool amdgpu_device_is_px(struct drm_device *dev); 2131 int amdgpu_device_init(struct amdgpu_device *adev, 2132 struct drm_device *ddev, 2133 struct pci_dev *pdev, 2134 uint32_t flags); 2135 void amdgpu_device_fini(struct amdgpu_device *adev); 2136 int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); 2137 2138 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, 2139 bool always_indirect); 2140 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, 2141 bool always_indirect); 2142 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg); 2143 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v); 2144 2145 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index); 2146 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); 2147 2148 /* 2149 * Registers read & write functions. 2150 */ 2151 #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false) 2152 #define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), true) 2153 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), false)) 2154 #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), false) 2155 #define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), true) 2156 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 2157 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 2158 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) 2159 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) 2160 #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg)) 2161 #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v)) 2162 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) 2163 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) 2164 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) 2165 #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) 2166 #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) 2167 #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) 2168 #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg)) 2169 #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v)) 2170 #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) 2171 #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) 2172 #define WREG32_P(reg, val, mask) \ 2173 do { \ 2174 uint32_t tmp_ = RREG32(reg); \ 2175 tmp_ &= (mask); \ 2176 tmp_ |= ((val) & ~(mask)); \ 2177 WREG32(reg, tmp_); \ 2178 } while (0) 2179 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) 2180 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) 2181 #define WREG32_PLL_P(reg, val, mask) \ 2182 do { \ 2183 uint32_t tmp_ = RREG32_PLL(reg); \ 2184 tmp_ &= (mask); \ 2185 tmp_ |= ((val) & ~(mask)); \ 2186 WREG32_PLL(reg, tmp_); \ 2187 } while (0) 2188 #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false)) 2189 #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg)) 2190 #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v)) 2191 2192 #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index)) 2193 #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v)) 2194 2195 #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT 2196 #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK 2197 2198 #define REG_SET_FIELD(orig_val, reg, field, field_val) \ 2199 (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \ 2200 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field)))) 2201 2202 #define REG_GET_FIELD(value, reg, field) \ 2203 (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field)) 2204 2205 #define WREG32_FIELD(reg, field, val) \ 2206 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) 2207 2208 /* 2209 * BIOS helpers. 2210 */ 2211 #define RBIOS8(i) (adev->bios[i]) 2212 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) 2213 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) 2214 2215 /* 2216 * RING helpers. 2217 */ 2218 static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) 2219 { 2220 if (ring->count_dw <= 0) 2221 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); 2222 ring->ring[ring->wptr++] = v; 2223 ring->wptr &= ring->ptr_mask; 2224 ring->count_dw--; 2225 } 2226 2227 static inline struct amdgpu_sdma_instance * 2228 amdgpu_get_sdma_instance(struct amdgpu_ring *ring) 2229 { 2230 struct amdgpu_device *adev = ring->adev; 2231 int i; 2232 2233 for (i = 0; i < adev->sdma.num_instances; i++) 2234 if (&adev->sdma.instance[i].ring == ring) 2235 break; 2236 2237 if (i < AMDGPU_MAX_SDMA_INSTANCES) 2238 return &adev->sdma.instance[i]; 2239 else 2240 return NULL; 2241 } 2242 2243 /* 2244 * ASICs macro. 2245 */ 2246 #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) 2247 #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) 2248 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) 2249 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) 2250 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) 2251 #define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev)) 2252 #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l)) 2253 #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) 2254 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) 2255 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) 2256 #define amdgpu_asic_detect_hw_virtualization(adev) (adev)->asic_funcs->detect_hw_virtualization((adev)) 2257 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) 2258 #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) 2259 #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) 2260 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) 2261 #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) 2262 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) 2263 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) 2264 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) 2265 #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t)) 2266 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) 2267 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) 2268 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) 2269 #define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c)) 2270 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) 2271 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) 2272 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) 2273 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) 2274 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) 2275 #define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r)) 2276 #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r)) 2277 #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d)) 2278 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) 2279 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) 2280 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) 2281 #define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r)) 2282 #define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r)) 2283 #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) 2284 #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) 2285 #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) 2286 #define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r)) 2287 #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) 2288 #define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc)) 2289 #define amdgpu_display_is_display_hung(adev) (adev)->mode_info.funcs->is_display_hung((adev)) 2290 #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) 2291 #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e)) 2292 #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h)) 2293 #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h)) 2294 #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev)) 2295 #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev)) 2296 #define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async)) 2297 #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) 2298 #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) 2299 #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) 2300 #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s)) 2301 #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) 2302 #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) 2303 #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) 2304 #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) 2305 #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) 2306 #define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev)) 2307 #define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev)) 2308 #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) 2309 #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) 2310 #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) 2311 #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) 2312 #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) 2313 2314 #define amdgpu_dpm_read_sensor(adev, idx, value) \ 2315 ((adev)->pp_enabled ? \ 2316 (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \ 2317 -EINVAL) 2318 2319 #define amdgpu_dpm_get_temperature(adev) \ 2320 ((adev)->pp_enabled ? \ 2321 (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ 2322 (adev)->pm.funcs->get_temperature((adev))) 2323 2324 #define amdgpu_dpm_set_fan_control_mode(adev, m) \ 2325 ((adev)->pp_enabled ? \ 2326 (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \ 2327 (adev)->pm.funcs->set_fan_control_mode((adev), (m))) 2328 2329 #define amdgpu_dpm_get_fan_control_mode(adev) \ 2330 ((adev)->pp_enabled ? \ 2331 (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \ 2332 (adev)->pm.funcs->get_fan_control_mode((adev))) 2333 2334 #define amdgpu_dpm_set_fan_speed_percent(adev, s) \ 2335 ((adev)->pp_enabled ? \ 2336 (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ 2337 (adev)->pm.funcs->set_fan_speed_percent((adev), (s))) 2338 2339 #define amdgpu_dpm_get_fan_speed_percent(adev, s) \ 2340 ((adev)->pp_enabled ? \ 2341 (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ 2342 (adev)->pm.funcs->get_fan_speed_percent((adev), (s))) 2343 2344 #define amdgpu_dpm_get_sclk(adev, l) \ 2345 ((adev)->pp_enabled ? \ 2346 (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \ 2347 (adev)->pm.funcs->get_sclk((adev), (l))) 2348 2349 #define amdgpu_dpm_get_mclk(adev, l) \ 2350 ((adev)->pp_enabled ? \ 2351 (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \ 2352 (adev)->pm.funcs->get_mclk((adev), (l))) 2353 2354 2355 #define amdgpu_dpm_force_performance_level(adev, l) \ 2356 ((adev)->pp_enabled ? \ 2357 (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \ 2358 (adev)->pm.funcs->force_performance_level((adev), (l))) 2359 2360 #define amdgpu_dpm_powergate_uvd(adev, g) \ 2361 ((adev)->pp_enabled ? \ 2362 (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \ 2363 (adev)->pm.funcs->powergate_uvd((adev), (g))) 2364 2365 #define amdgpu_dpm_powergate_vce(adev, g) \ 2366 ((adev)->pp_enabled ? \ 2367 (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ 2368 (adev)->pm.funcs->powergate_vce((adev), (g))) 2369 2370 #define amdgpu_dpm_get_current_power_state(adev) \ 2371 (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) 2372 2373 #define amdgpu_dpm_get_performance_level(adev) \ 2374 (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) 2375 2376 #define amdgpu_dpm_get_pp_num_states(adev, data) \ 2377 (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data) 2378 2379 #define amdgpu_dpm_get_pp_table(adev, table) \ 2380 (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table) 2381 2382 #define amdgpu_dpm_set_pp_table(adev, buf, size) \ 2383 (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size) 2384 2385 #define amdgpu_dpm_print_clock_levels(adev, type, buf) \ 2386 (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf) 2387 2388 #define amdgpu_dpm_force_clock_level(adev, type, level) \ 2389 (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) 2390 2391 #define amdgpu_dpm_get_sclk_od(adev) \ 2392 (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle) 2393 2394 #define amdgpu_dpm_set_sclk_od(adev, value) \ 2395 (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value) 2396 2397 #define amdgpu_dpm_get_mclk_od(adev) \ 2398 ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle)) 2399 2400 #define amdgpu_dpm_set_mclk_od(adev, value) \ 2401 ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) 2402 2403 #define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ 2404 (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) 2405 2406 #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) 2407 2408 /* Common functions */ 2409 int amdgpu_gpu_reset(struct amdgpu_device *adev); 2410 bool amdgpu_need_backup(struct amdgpu_device *adev); 2411 void amdgpu_pci_config_reset(struct amdgpu_device *adev); 2412 bool amdgpu_card_posted(struct amdgpu_device *adev); 2413 void amdgpu_update_display_priority(struct amdgpu_device *adev); 2414 2415 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); 2416 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, 2417 u32 ip_instance, u32 ring, 2418 struct amdgpu_ring **out_ring); 2419 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); 2420 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); 2421 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); 2422 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, 2423 uint32_t flags); 2424 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); 2425 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); 2426 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, 2427 unsigned long end); 2428 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, 2429 int *last_invalidated); 2430 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); 2431 uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, 2432 struct ttm_mem_reg *mem); 2433 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); 2434 void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); 2435 void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); 2436 u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev); 2437 int amdgpu_ttm_global_init(struct amdgpu_device *adev); 2438 int amdgpu_ttm_init(struct amdgpu_device *adev); 2439 void amdgpu_ttm_fini(struct amdgpu_device *adev); 2440 void amdgpu_program_register_sequence(struct amdgpu_device *adev, 2441 const u32 *registers, 2442 const u32 array_size); 2443 2444 bool amdgpu_device_is_px(struct drm_device *dev); 2445 /* atpx handler */ 2446 #if defined(CONFIG_VGA_SWITCHEROO) 2447 void amdgpu_register_atpx_handler(void); 2448 void amdgpu_unregister_atpx_handler(void); 2449 bool amdgpu_has_atpx_dgpu_power_cntl(void); 2450 bool amdgpu_is_atpx_hybrid(void); 2451 bool amdgpu_atpx_dgpu_req_power_for_displays(void); 2452 #else 2453 static inline void amdgpu_register_atpx_handler(void) {} 2454 static inline void amdgpu_unregister_atpx_handler(void) {} 2455 static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } 2456 static inline bool amdgpu_is_atpx_hybrid(void) { return false; } 2457 static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; } 2458 #endif 2459 2460 /* 2461 * KMS 2462 */ 2463 extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; 2464 extern const int amdgpu_max_kms_ioctl; 2465 2466 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags); 2467 int amdgpu_driver_unload_kms(struct drm_device *dev); 2468 void amdgpu_driver_lastclose_kms(struct drm_device *dev); 2469 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); 2470 void amdgpu_driver_postclose_kms(struct drm_device *dev, 2471 struct drm_file *file_priv); 2472 void amdgpu_driver_preclose_kms(struct drm_device *dev, 2473 struct drm_file *file_priv); 2474 int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon); 2475 int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon); 2476 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe); 2477 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe); 2478 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe); 2479 int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe, 2480 int *max_error, 2481 struct timeval *vblank_time, 2482 unsigned flags); 2483 long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, 2484 unsigned long arg); 2485 2486 /* 2487 * functions used by amdgpu_encoder.c 2488 */ 2489 struct amdgpu_afmt_acr { 2490 u32 clock; 2491 2492 int n_32khz; 2493 int cts_32khz; 2494 2495 int n_44_1khz; 2496 int cts_44_1khz; 2497 2498 int n_48khz; 2499 int cts_48khz; 2500 2501 }; 2502 2503 struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock); 2504 2505 /* amdgpu_acpi.c */ 2506 #if defined(CONFIG_ACPI) 2507 int amdgpu_acpi_init(struct amdgpu_device *adev); 2508 void amdgpu_acpi_fini(struct amdgpu_device *adev); 2509 bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev); 2510 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, 2511 u8 perf_req, bool advertise); 2512 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); 2513 #else 2514 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } 2515 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } 2516 #endif 2517 2518 struct amdgpu_bo_va_mapping * 2519 amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, 2520 uint64_t addr, struct amdgpu_bo **bo); 2521 int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser); 2522 2523 #include "amdgpu_object.h" 2524 #endif 2525