1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #ifndef __AMDGPU_H__ 29 #define __AMDGPU_H__ 30 31 #include "amdgpu_ctx.h" 32 33 #include <linux/atomic.h> 34 #include <linux/wait.h> 35 #include <linux/list.h> 36 #include <linux/kref.h> 37 #include <linux/rbtree.h> 38 #include <linux/hashtable.h> 39 #include <linux/dma-fence.h> 40 41 #include <drm/ttm/ttm_bo_api.h> 42 #include <drm/ttm/ttm_bo_driver.h> 43 #include <drm/ttm/ttm_placement.h> 44 #include <drm/ttm/ttm_module.h> 45 #include <drm/ttm/ttm_execbuf_util.h> 46 47 #include <drm/amdgpu_drm.h> 48 #include <drm/drm_gem.h> 49 #include <drm/drm_ioctl.h> 50 #include <drm/gpu_scheduler.h> 51 52 #include <kgd_kfd_interface.h> 53 #include "dm_pp_interface.h" 54 #include "kgd_pp_interface.h" 55 56 #include "amd_shared.h" 57 #include "amdgpu_mode.h" 58 #include "amdgpu_ih.h" 59 #include "amdgpu_irq.h" 60 #include "amdgpu_ucode.h" 61 #include "amdgpu_ttm.h" 62 #include "amdgpu_psp.h" 63 #include "amdgpu_gds.h" 64 #include "amdgpu_sync.h" 65 #include "amdgpu_ring.h" 66 #include "amdgpu_vm.h" 67 #include "amdgpu_dpm.h" 68 #include "amdgpu_acp.h" 69 #include "amdgpu_uvd.h" 70 #include "amdgpu_vce.h" 71 #include "amdgpu_vcn.h" 72 #include "amdgpu_jpeg.h" 73 #include "amdgpu_mn.h" 74 #include "amdgpu_gmc.h" 75 #include "amdgpu_gfx.h" 76 #include "amdgpu_sdma.h" 77 #include "amdgpu_nbio.h" 78 #include "amdgpu_dm.h" 79 #include "amdgpu_virt.h" 80 #include "amdgpu_csa.h" 81 #include "amdgpu_gart.h" 82 #include "amdgpu_debugfs.h" 83 #include "amdgpu_job.h" 84 #include "amdgpu_bo_list.h" 85 #include "amdgpu_gem.h" 86 #include "amdgpu_doorbell.h" 87 #include "amdgpu_amdkfd.h" 88 #include "amdgpu_smu.h" 89 #include "amdgpu_discovery.h" 90 #include "amdgpu_mes.h" 91 #include "amdgpu_umc.h" 92 #include "amdgpu_mmhub.h" 93 #include "amdgpu_df.h" 94 95 #define MAX_GPU_INSTANCE 16 96 97 struct amdgpu_gpu_instance 98 { 99 struct amdgpu_device *adev; 100 int mgpu_fan_enabled; 101 }; 102 103 struct amdgpu_mgpu_info 104 { 105 struct amdgpu_gpu_instance gpu_ins[MAX_GPU_INSTANCE]; 106 struct mutex mutex; 107 uint32_t num_gpu; 108 uint32_t num_dgpu; 109 uint32_t num_apu; 110 }; 111 112 #define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256 113 114 /* 115 * Modules parameters. 116 */ 117 extern int amdgpu_modeset; 118 extern int amdgpu_vram_limit; 119 extern int amdgpu_vis_vram_limit; 120 extern int amdgpu_gart_size; 121 extern int amdgpu_gtt_size; 122 extern int amdgpu_moverate; 123 extern int amdgpu_benchmarking; 124 extern int amdgpu_testing; 125 extern int amdgpu_audio; 126 extern int amdgpu_disp_priority; 127 extern int amdgpu_hw_i2c; 128 extern int amdgpu_pcie_gen2; 129 extern int amdgpu_msi; 130 extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH]; 131 extern int amdgpu_dpm; 132 extern int amdgpu_fw_load_type; 133 extern int amdgpu_aspm; 134 extern int amdgpu_runtime_pm; 135 extern uint amdgpu_ip_block_mask; 136 extern int amdgpu_bapm; 137 extern int amdgpu_deep_color; 138 extern int amdgpu_vm_size; 139 extern int amdgpu_vm_block_size; 140 extern int amdgpu_vm_fragment_size; 141 extern int amdgpu_vm_fault_stop; 142 extern int amdgpu_vm_debug; 143 extern int amdgpu_vm_update_mode; 144 extern int amdgpu_exp_hw_support; 145 extern int amdgpu_dc; 146 extern int amdgpu_sched_jobs; 147 extern int amdgpu_sched_hw_submission; 148 extern uint amdgpu_pcie_gen_cap; 149 extern uint amdgpu_pcie_lane_cap; 150 extern uint amdgpu_cg_mask; 151 extern uint amdgpu_pg_mask; 152 extern uint amdgpu_sdma_phase_quantum; 153 extern char *amdgpu_disable_cu; 154 extern char *amdgpu_virtual_display; 155 extern uint amdgpu_pp_feature_mask; 156 extern uint amdgpu_force_long_training; 157 extern int amdgpu_job_hang_limit; 158 extern int amdgpu_lbpw; 159 extern int amdgpu_compute_multipipe; 160 extern int amdgpu_gpu_recovery; 161 extern int amdgpu_emu_mode; 162 extern uint amdgpu_smu_memory_pool_size; 163 extern uint amdgpu_dc_feature_mask; 164 extern uint amdgpu_dm_abm_level; 165 extern struct amdgpu_mgpu_info mgpu_info; 166 extern int amdgpu_ras_enable; 167 extern uint amdgpu_ras_mask; 168 extern int amdgpu_async_gfx_ring; 169 extern int amdgpu_mcbp; 170 extern int amdgpu_discovery; 171 extern int amdgpu_mes; 172 extern int amdgpu_noretry; 173 extern int amdgpu_force_asic_type; 174 #ifdef CONFIG_HSA_AMD 175 extern int sched_policy; 176 #else 177 static const int sched_policy = KFD_SCHED_POLICY_HWS; 178 #endif 179 180 #ifdef CONFIG_DRM_AMDGPU_SI 181 extern int amdgpu_si_support; 182 #endif 183 #ifdef CONFIG_DRM_AMDGPU_CIK 184 extern int amdgpu_cik_support; 185 #endif 186 187 #define AMDGPU_VM_MAX_NUM_CTX 4096 188 #define AMDGPU_SG_THRESHOLD (256*1024*1024) 189 #define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */ 190 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 191 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 192 #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) 193 /* AMDGPU_IB_POOL_SIZE must be a power of 2 */ 194 #define AMDGPU_IB_POOL_SIZE 16 195 #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32 196 #define AMDGPUFB_CONN_LIMIT 4 197 #define AMDGPU_BIOS_NUM_SCRATCH 16 198 199 /* hard reset data */ 200 #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b 201 202 /* reset flags */ 203 #define AMDGPU_RESET_GFX (1 << 0) 204 #define AMDGPU_RESET_COMPUTE (1 << 1) 205 #define AMDGPU_RESET_DMA (1 << 2) 206 #define AMDGPU_RESET_CP (1 << 3) 207 #define AMDGPU_RESET_GRBM (1 << 4) 208 #define AMDGPU_RESET_DMA1 (1 << 5) 209 #define AMDGPU_RESET_RLC (1 << 6) 210 #define AMDGPU_RESET_SEM (1 << 7) 211 #define AMDGPU_RESET_IH (1 << 8) 212 #define AMDGPU_RESET_VMC (1 << 9) 213 #define AMDGPU_RESET_MC (1 << 10) 214 #define AMDGPU_RESET_DISPLAY (1 << 11) 215 #define AMDGPU_RESET_UVD (1 << 12) 216 #define AMDGPU_RESET_VCE (1 << 13) 217 #define AMDGPU_RESET_VCE1 (1 << 14) 218 219 /* max cursor sizes (in pixels) */ 220 #define CIK_CURSOR_WIDTH 128 221 #define CIK_CURSOR_HEIGHT 128 222 223 struct amdgpu_device; 224 struct amdgpu_ib; 225 struct amdgpu_cs_parser; 226 struct amdgpu_job; 227 struct amdgpu_irq_src; 228 struct amdgpu_fpriv; 229 struct amdgpu_bo_va_mapping; 230 struct amdgpu_atif; 231 struct kfd_vm_fault_info; 232 233 enum amdgpu_cp_irq { 234 AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0, 235 AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP, 236 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP, 237 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP, 238 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP, 239 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP, 240 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP, 241 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP, 242 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP, 243 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP, 244 245 AMDGPU_CP_IRQ_LAST 246 }; 247 248 enum amdgpu_thermal_irq { 249 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0, 250 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW, 251 252 AMDGPU_THERMAL_IRQ_LAST 253 }; 254 255 enum amdgpu_kiq_irq { 256 AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0, 257 AMDGPU_CP_KIQ_IRQ_LAST 258 }; 259 260 #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ 261 #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ 262 #define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */ 263 264 int amdgpu_device_ip_set_clockgating_state(void *dev, 265 enum amd_ip_block_type block_type, 266 enum amd_clockgating_state state); 267 int amdgpu_device_ip_set_powergating_state(void *dev, 268 enum amd_ip_block_type block_type, 269 enum amd_powergating_state state); 270 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, 271 u32 *flags); 272 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, 273 enum amd_ip_block_type block_type); 274 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, 275 enum amd_ip_block_type block_type); 276 277 #define AMDGPU_MAX_IP_NUM 16 278 279 struct amdgpu_ip_block_status { 280 bool valid; 281 bool sw; 282 bool hw; 283 bool late_initialized; 284 bool hang; 285 }; 286 287 struct amdgpu_ip_block_version { 288 const enum amd_ip_block_type type; 289 const u32 major; 290 const u32 minor; 291 const u32 rev; 292 const struct amd_ip_funcs *funcs; 293 }; 294 295 #define HW_REV(_Major, _Minor, _Rev) \ 296 ((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev))) 297 298 struct amdgpu_ip_block { 299 struct amdgpu_ip_block_status status; 300 const struct amdgpu_ip_block_version *version; 301 }; 302 303 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, 304 enum amd_ip_block_type type, 305 u32 major, u32 minor); 306 307 struct amdgpu_ip_block * 308 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev, 309 enum amd_ip_block_type type); 310 311 int amdgpu_device_ip_block_add(struct amdgpu_device *adev, 312 const struct amdgpu_ip_block_version *ip_block_version); 313 314 /* 315 * BIOS. 316 */ 317 bool amdgpu_get_bios(struct amdgpu_device *adev); 318 bool amdgpu_read_bios(struct amdgpu_device *adev); 319 320 /* 321 * Clocks 322 */ 323 324 #define AMDGPU_MAX_PPLL 3 325 326 struct amdgpu_clock { 327 struct amdgpu_pll ppll[AMDGPU_MAX_PPLL]; 328 struct amdgpu_pll spll; 329 struct amdgpu_pll mpll; 330 /* 10 Khz units */ 331 uint32_t default_mclk; 332 uint32_t default_sclk; 333 uint32_t default_dispclk; 334 uint32_t current_dispclk; 335 uint32_t dp_extclk; 336 uint32_t max_pixel_clock; 337 }; 338 339 /* sub-allocation manager, it has to be protected by another lock. 340 * By conception this is an helper for other part of the driver 341 * like the indirect buffer or semaphore, which both have their 342 * locking. 343 * 344 * Principe is simple, we keep a list of sub allocation in offset 345 * order (first entry has offset == 0, last entry has the highest 346 * offset). 347 * 348 * When allocating new object we first check if there is room at 349 * the end total_size - (last_object_offset + last_object_size) >= 350 * alloc_size. If so we allocate new object there. 351 * 352 * When there is not enough room at the end, we start waiting for 353 * each sub object until we reach object_offset+object_size >= 354 * alloc_size, this object then become the sub object we return. 355 * 356 * Alignment can't be bigger than page size. 357 * 358 * Hole are not considered for allocation to keep things simple. 359 * Assumption is that there won't be hole (all object on same 360 * alignment). 361 */ 362 363 #define AMDGPU_SA_NUM_FENCE_LISTS 32 364 365 struct amdgpu_sa_manager { 366 wait_queue_head_t wq; 367 struct amdgpu_bo *bo; 368 struct list_head *hole; 369 struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS]; 370 struct list_head olist; 371 unsigned size; 372 uint64_t gpu_addr; 373 void *cpu_ptr; 374 uint32_t domain; 375 uint32_t align; 376 }; 377 378 /* sub-allocation buffer */ 379 struct amdgpu_sa_bo { 380 struct list_head olist; 381 struct list_head flist; 382 struct amdgpu_sa_manager *manager; 383 unsigned soffset; 384 unsigned eoffset; 385 struct dma_fence *fence; 386 }; 387 388 int amdgpu_fence_slab_init(void); 389 void amdgpu_fence_slab_fini(void); 390 391 /* 392 * IRQS. 393 */ 394 395 struct amdgpu_flip_work { 396 struct delayed_work flip_work; 397 struct work_struct unpin_work; 398 struct amdgpu_device *adev; 399 int crtc_id; 400 u32 target_vblank; 401 uint64_t base; 402 struct drm_pending_vblank_event *event; 403 struct amdgpu_bo *old_abo; 404 struct dma_fence *excl; 405 unsigned shared_count; 406 struct dma_fence **shared; 407 struct dma_fence_cb cb; 408 bool async; 409 }; 410 411 412 /* 413 * CP & rings. 414 */ 415 416 struct amdgpu_ib { 417 struct amdgpu_sa_bo *sa_bo; 418 uint32_t length_dw; 419 uint64_t gpu_addr; 420 uint32_t *ptr; 421 uint32_t flags; 422 }; 423 424 extern const struct drm_sched_backend_ops amdgpu_sched_ops; 425 426 /* 427 * file private structure 428 */ 429 430 struct amdgpu_fpriv { 431 struct amdgpu_vm vm; 432 struct amdgpu_bo_va *prt_va; 433 struct amdgpu_bo_va *csa_va; 434 struct mutex bo_list_lock; 435 struct idr bo_list_handles; 436 struct amdgpu_ctx_mgr ctx_mgr; 437 }; 438 439 int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv); 440 441 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, 442 unsigned size, struct amdgpu_ib *ib); 443 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, 444 struct dma_fence *f); 445 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, 446 struct amdgpu_ib *ibs, struct amdgpu_job *job, 447 struct dma_fence **f); 448 int amdgpu_ib_pool_init(struct amdgpu_device *adev); 449 void amdgpu_ib_pool_fini(struct amdgpu_device *adev); 450 int amdgpu_ib_ring_tests(struct amdgpu_device *adev); 451 452 /* 453 * CS. 454 */ 455 struct amdgpu_cs_chunk { 456 uint32_t chunk_id; 457 uint32_t length_dw; 458 void *kdata; 459 }; 460 461 struct amdgpu_cs_post_dep { 462 struct drm_syncobj *syncobj; 463 struct dma_fence_chain *chain; 464 u64 point; 465 }; 466 467 struct amdgpu_cs_parser { 468 struct amdgpu_device *adev; 469 struct drm_file *filp; 470 struct amdgpu_ctx *ctx; 471 472 /* chunks */ 473 unsigned nchunks; 474 struct amdgpu_cs_chunk *chunks; 475 476 /* scheduler job object */ 477 struct amdgpu_job *job; 478 struct drm_sched_entity *entity; 479 480 /* buffer objects */ 481 struct ww_acquire_ctx ticket; 482 struct amdgpu_bo_list *bo_list; 483 struct amdgpu_mn *mn; 484 struct amdgpu_bo_list_entry vm_pd; 485 struct list_head validated; 486 struct dma_fence *fence; 487 uint64_t bytes_moved_threshold; 488 uint64_t bytes_moved_vis_threshold; 489 uint64_t bytes_moved; 490 uint64_t bytes_moved_vis; 491 492 /* user fence */ 493 struct amdgpu_bo_list_entry uf_entry; 494 495 unsigned num_post_deps; 496 struct amdgpu_cs_post_dep *post_deps; 497 }; 498 499 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, 500 uint32_t ib_idx, int idx) 501 { 502 return p->job->ibs[ib_idx].ptr[idx]; 503 } 504 505 static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p, 506 uint32_t ib_idx, int idx, 507 uint32_t value) 508 { 509 p->job->ibs[ib_idx].ptr[idx] = value; 510 } 511 512 /* 513 * Writeback 514 */ 515 #define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */ 516 517 struct amdgpu_wb { 518 struct amdgpu_bo *wb_obj; 519 volatile uint32_t *wb; 520 uint64_t gpu_addr; 521 u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */ 522 unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)]; 523 }; 524 525 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb); 526 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb); 527 528 /* 529 * Benchmarking 530 */ 531 void amdgpu_benchmark(struct amdgpu_device *adev, int test_number); 532 533 534 /* 535 * Testing 536 */ 537 void amdgpu_test_moves(struct amdgpu_device *adev); 538 539 /* 540 * ASIC specific register table accessible by UMD 541 */ 542 struct amdgpu_allowed_register_entry { 543 uint32_t reg_offset; 544 bool grbm_indexed; 545 }; 546 547 enum amd_reset_method { 548 AMD_RESET_METHOD_LEGACY = 0, 549 AMD_RESET_METHOD_MODE0, 550 AMD_RESET_METHOD_MODE1, 551 AMD_RESET_METHOD_MODE2, 552 AMD_RESET_METHOD_BACO 553 }; 554 555 /* 556 * ASIC specific functions. 557 */ 558 struct amdgpu_asic_funcs { 559 bool (*read_disabled_bios)(struct amdgpu_device *adev); 560 bool (*read_bios_from_rom)(struct amdgpu_device *adev, 561 u8 *bios, u32 length_bytes); 562 int (*read_register)(struct amdgpu_device *adev, u32 se_num, 563 u32 sh_num, u32 reg_offset, u32 *value); 564 void (*set_vga_state)(struct amdgpu_device *adev, bool state); 565 int (*reset)(struct amdgpu_device *adev); 566 enum amd_reset_method (*reset_method)(struct amdgpu_device *adev); 567 /* get the reference clock */ 568 u32 (*get_xclk)(struct amdgpu_device *adev); 569 /* MM block clocks */ 570 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); 571 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); 572 /* static power management */ 573 int (*get_pcie_lanes)(struct amdgpu_device *adev); 574 void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes); 575 /* get config memsize register */ 576 u32 (*get_config_memsize)(struct amdgpu_device *adev); 577 /* flush hdp write queue */ 578 void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring); 579 /* invalidate hdp read cache */ 580 void (*invalidate_hdp)(struct amdgpu_device *adev, 581 struct amdgpu_ring *ring); 582 /* check if the asic needs a full reset of if soft reset will work */ 583 bool (*need_full_reset)(struct amdgpu_device *adev); 584 /* initialize doorbell layout for specific asic*/ 585 void (*init_doorbell_index)(struct amdgpu_device *adev); 586 /* PCIe bandwidth usage */ 587 void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0, 588 uint64_t *count1); 589 /* do we need to reset the asic at init time (e.g., kexec) */ 590 bool (*need_reset_on_init)(struct amdgpu_device *adev); 591 /* PCIe replay counter */ 592 uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev); 593 /* device supports BACO */ 594 bool (*supports_baco)(struct amdgpu_device *adev); 595 }; 596 597 /* 598 * IOCTL. 599 */ 600 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, 601 struct drm_file *filp); 602 603 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 604 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, 605 struct drm_file *filp); 606 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 607 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, 608 struct drm_file *filp); 609 610 /* VRAM scratch page for HDP bug, default vram page */ 611 struct amdgpu_vram_scratch { 612 struct amdgpu_bo *robj; 613 volatile uint32_t *ptr; 614 u64 gpu_addr; 615 }; 616 617 /* 618 * ACPI 619 */ 620 struct amdgpu_atcs_functions { 621 bool get_ext_state; 622 bool pcie_perf_req; 623 bool pcie_dev_rdy; 624 bool pcie_bus_width; 625 }; 626 627 struct amdgpu_atcs { 628 struct amdgpu_atcs_functions functions; 629 }; 630 631 /* 632 * Firmware VRAM reservation 633 */ 634 struct amdgpu_fw_vram_usage { 635 u64 start_offset; 636 u64 size; 637 struct amdgpu_bo *reserved_bo; 638 void *va; 639 640 /* GDDR6 training support flag. 641 */ 642 bool mem_train_support; 643 }; 644 645 /* 646 * CGS 647 */ 648 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); 649 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); 650 651 /* 652 * Core structure, functions and helpers. 653 */ 654 typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); 655 typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 656 657 typedef uint64_t (*amdgpu_rreg64_t)(struct amdgpu_device*, uint32_t); 658 typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t); 659 660 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 661 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); 662 663 struct amdgpu_mmio_remap { 664 u32 reg_offset; 665 resource_size_t bus_addr; 666 }; 667 668 /* Define the HW IP blocks will be used in driver , add more if necessary */ 669 enum amd_hw_ip_block_type { 670 GC_HWIP = 1, 671 HDP_HWIP, 672 SDMA0_HWIP, 673 SDMA1_HWIP, 674 SDMA2_HWIP, 675 SDMA3_HWIP, 676 SDMA4_HWIP, 677 SDMA5_HWIP, 678 SDMA6_HWIP, 679 SDMA7_HWIP, 680 MMHUB_HWIP, 681 ATHUB_HWIP, 682 NBIO_HWIP, 683 MP0_HWIP, 684 MP1_HWIP, 685 UVD_HWIP, 686 VCN_HWIP = UVD_HWIP, 687 JPEG_HWIP = VCN_HWIP, 688 VCE_HWIP, 689 DF_HWIP, 690 DCE_HWIP, 691 OSSSYS_HWIP, 692 SMUIO_HWIP, 693 PWR_HWIP, 694 NBIF_HWIP, 695 THM_HWIP, 696 CLK_HWIP, 697 UMC_HWIP, 698 RSMU_HWIP, 699 MAX_HWIP 700 }; 701 702 #define HWIP_MAX_INSTANCE 8 703 704 struct amd_powerplay { 705 void *pp_handle; 706 const struct amd_pm_funcs *pp_funcs; 707 }; 708 709 #define AMDGPU_RESET_MAGIC_NUM 64 710 #define AMDGPU_MAX_DF_PERFMONS 4 711 struct amdgpu_device { 712 struct device *dev; 713 struct drm_device *ddev; 714 struct pci_dev *pdev; 715 716 #ifdef CONFIG_DRM_AMD_ACP 717 struct amdgpu_acp acp; 718 #endif 719 720 /* ASIC */ 721 enum amd_asic_type asic_type; 722 uint32_t family; 723 uint32_t rev_id; 724 uint32_t external_rev_id; 725 unsigned long flags; 726 int usec_timeout; 727 const struct amdgpu_asic_funcs *asic_funcs; 728 bool shutdown; 729 bool need_swiotlb; 730 bool accel_working; 731 struct notifier_block acpi_nb; 732 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; 733 struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; 734 unsigned debugfs_count; 735 #if defined(CONFIG_DEBUG_FS) 736 struct dentry *debugfs_preempt; 737 struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; 738 #endif 739 struct amdgpu_atif *atif; 740 struct amdgpu_atcs atcs; 741 struct mutex srbm_mutex; 742 /* GRBM index mutex. Protects concurrent access to GRBM index */ 743 struct mutex grbm_idx_mutex; 744 struct dev_pm_domain vga_pm_domain; 745 bool have_disp_power_ref; 746 bool have_atomics_support; 747 748 /* BIOS */ 749 bool is_atom_fw; 750 uint8_t *bios; 751 uint32_t bios_size; 752 struct amdgpu_bo *stolen_vga_memory; 753 struct amdgpu_bo *discovery_memory; 754 uint32_t bios_scratch_reg_offset; 755 uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; 756 757 /* Register/doorbell mmio */ 758 resource_size_t rmmio_base; 759 resource_size_t rmmio_size; 760 void __iomem *rmmio; 761 /* protects concurrent MM_INDEX/DATA based register access */ 762 spinlock_t mmio_idx_lock; 763 struct amdgpu_mmio_remap rmmio_remap; 764 /* protects concurrent SMC based register access */ 765 spinlock_t smc_idx_lock; 766 amdgpu_rreg_t smc_rreg; 767 amdgpu_wreg_t smc_wreg; 768 /* protects concurrent PCIE register access */ 769 spinlock_t pcie_idx_lock; 770 amdgpu_rreg_t pcie_rreg; 771 amdgpu_wreg_t pcie_wreg; 772 amdgpu_rreg_t pciep_rreg; 773 amdgpu_wreg_t pciep_wreg; 774 amdgpu_rreg64_t pcie_rreg64; 775 amdgpu_wreg64_t pcie_wreg64; 776 /* protects concurrent UVD register access */ 777 spinlock_t uvd_ctx_idx_lock; 778 amdgpu_rreg_t uvd_ctx_rreg; 779 amdgpu_wreg_t uvd_ctx_wreg; 780 /* protects concurrent DIDT register access */ 781 spinlock_t didt_idx_lock; 782 amdgpu_rreg_t didt_rreg; 783 amdgpu_wreg_t didt_wreg; 784 /* protects concurrent gc_cac register access */ 785 spinlock_t gc_cac_idx_lock; 786 amdgpu_rreg_t gc_cac_rreg; 787 amdgpu_wreg_t gc_cac_wreg; 788 /* protects concurrent se_cac register access */ 789 spinlock_t se_cac_idx_lock; 790 amdgpu_rreg_t se_cac_rreg; 791 amdgpu_wreg_t se_cac_wreg; 792 /* protects concurrent ENDPOINT (audio) register access */ 793 spinlock_t audio_endpt_idx_lock; 794 amdgpu_block_rreg_t audio_endpt_rreg; 795 amdgpu_block_wreg_t audio_endpt_wreg; 796 void __iomem *rio_mem; 797 resource_size_t rio_mem_size; 798 struct amdgpu_doorbell doorbell; 799 800 /* clock/pll info */ 801 struct amdgpu_clock clock; 802 803 /* MC */ 804 struct amdgpu_gmc gmc; 805 struct amdgpu_gart gart; 806 dma_addr_t dummy_page_addr; 807 struct amdgpu_vm_manager vm_manager; 808 struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS]; 809 unsigned num_vmhubs; 810 811 /* memory management */ 812 struct amdgpu_mman mman; 813 struct amdgpu_vram_scratch vram_scratch; 814 struct amdgpu_wb wb; 815 atomic64_t num_bytes_moved; 816 atomic64_t num_evictions; 817 atomic64_t num_vram_cpu_page_faults; 818 atomic_t gpu_reset_counter; 819 atomic_t vram_lost_counter; 820 821 /* data for buffer migration throttling */ 822 struct { 823 spinlock_t lock; 824 s64 last_update_us; 825 s64 accum_us; /* accumulated microseconds */ 826 s64 accum_us_vis; /* for visible VRAM */ 827 u32 log2_max_MBps; 828 } mm_stats; 829 830 /* display */ 831 bool enable_virtual_display; 832 struct amdgpu_mode_info mode_info; 833 /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */ 834 struct work_struct hotplug_work; 835 struct amdgpu_irq_src crtc_irq; 836 struct amdgpu_irq_src vupdate_irq; 837 struct amdgpu_irq_src pageflip_irq; 838 struct amdgpu_irq_src hpd_irq; 839 840 /* rings */ 841 u64 fence_context; 842 unsigned num_rings; 843 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; 844 bool ib_pool_ready; 845 struct amdgpu_sa_manager ring_tmp_bo; 846 847 /* interrupts */ 848 struct amdgpu_irq irq; 849 850 /* powerplay */ 851 struct amd_powerplay powerplay; 852 bool pp_force_state_enabled; 853 854 /* smu */ 855 struct smu_context smu; 856 857 /* dpm */ 858 struct amdgpu_pm pm; 859 u32 cg_flags; 860 u32 pg_flags; 861 862 /* nbio */ 863 struct amdgpu_nbio nbio; 864 865 /* mmhub */ 866 struct amdgpu_mmhub mmhub; 867 868 /* gfx */ 869 struct amdgpu_gfx gfx; 870 871 /* sdma */ 872 struct amdgpu_sdma sdma; 873 874 /* uvd */ 875 struct amdgpu_uvd uvd; 876 877 /* vce */ 878 struct amdgpu_vce vce; 879 880 /* vcn */ 881 struct amdgpu_vcn vcn; 882 883 /* jpeg */ 884 struct amdgpu_jpeg jpeg; 885 886 /* firmwares */ 887 struct amdgpu_firmware firmware; 888 889 /* PSP */ 890 struct psp_context psp; 891 892 /* GDS */ 893 struct amdgpu_gds gds; 894 895 /* KFD */ 896 struct amdgpu_kfd_dev kfd; 897 898 /* UMC */ 899 struct amdgpu_umc umc; 900 901 /* display related functionality */ 902 struct amdgpu_display_manager dm; 903 904 /* discovery */ 905 uint8_t *discovery; 906 907 /* mes */ 908 bool enable_mes; 909 struct amdgpu_mes mes; 910 911 /* df */ 912 struct amdgpu_df df; 913 914 struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM]; 915 int num_ip_blocks; 916 struct mutex mn_lock; 917 DECLARE_HASHTABLE(mn_hash, 7); 918 919 /* tracking pinned memory */ 920 atomic64_t vram_pin_size; 921 atomic64_t visible_pin_size; 922 atomic64_t gart_pin_size; 923 924 /* soc15 register offset based on ip, instance and segment */ 925 uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; 926 927 /* delayed work_func for deferring clockgating during resume */ 928 struct delayed_work delayed_init_work; 929 930 struct amdgpu_virt virt; 931 /* firmware VRAM reservation */ 932 struct amdgpu_fw_vram_usage fw_vram_usage; 933 934 /* link all shadow bo */ 935 struct list_head shadow_list; 936 struct mutex shadow_list_lock; 937 /* keep an lru list of rings by HW IP */ 938 struct list_head ring_lru_list; 939 spinlock_t ring_lru_list_lock; 940 941 /* record hw reset is performed */ 942 bool has_hw_reset; 943 u8 reset_magic[AMDGPU_RESET_MAGIC_NUM]; 944 945 /* s3/s4 mask */ 946 bool in_suspend; 947 948 /* record last mm index being written through WREG32*/ 949 unsigned long last_mm_index; 950 bool in_gpu_reset; 951 enum pp_mp1_state mp1_state; 952 struct mutex lock_reset; 953 struct amdgpu_doorbell_index doorbell_index; 954 955 struct mutex notifier_lock; 956 957 int asic_reset_res; 958 struct work_struct xgmi_reset_work; 959 960 long gfx_timeout; 961 long sdma_timeout; 962 long video_timeout; 963 long compute_timeout; 964 965 uint64_t unique_id; 966 uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS]; 967 968 /* device pstate */ 969 int pstate; 970 /* enable runtime pm on the device */ 971 bool runpm; 972 973 bool pm_sysfs_en; 974 bool ucode_sysfs_en; 975 }; 976 977 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) 978 { 979 return container_of(bdev, struct amdgpu_device, mman.bdev); 980 } 981 982 int amdgpu_device_init(struct amdgpu_device *adev, 983 struct drm_device *ddev, 984 struct pci_dev *pdev, 985 uint32_t flags); 986 void amdgpu_device_fini(struct amdgpu_device *adev); 987 int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); 988 989 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, 990 uint32_t *buf, size_t size, bool write); 991 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, 992 uint32_t acc_flags); 993 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, 994 uint32_t acc_flags); 995 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value); 996 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset); 997 998 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg); 999 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v); 1000 1001 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); 1002 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); 1003 1004 int emu_soc_asic_init(struct amdgpu_device *adev); 1005 1006 /* 1007 * Registers read & write functions. 1008 */ 1009 1010 #define AMDGPU_REGS_IDX (1<<0) 1011 #define AMDGPU_REGS_NO_KIQ (1<<1) 1012 #define AMDGPU_REGS_KIQ (1<<2) 1013 1014 #define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) 1015 #define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) 1016 1017 #define RREG32_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_KIQ) 1018 #define WREG32_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_KIQ) 1019 1020 #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg)) 1021 #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v)) 1022 1023 #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0) 1024 #define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX) 1025 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0)) 1026 #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0) 1027 #define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_IDX) 1028 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 1029 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 1030 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) 1031 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) 1032 #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg)) 1033 #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v)) 1034 #define RREG64_PCIE(reg) adev->pcie_rreg64(adev, (reg)) 1035 #define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v)) 1036 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) 1037 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) 1038 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) 1039 #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) 1040 #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) 1041 #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) 1042 #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg)) 1043 #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v)) 1044 #define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg)) 1045 #define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v)) 1046 #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) 1047 #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) 1048 #define WREG32_P(reg, val, mask) \ 1049 do { \ 1050 uint32_t tmp_ = RREG32(reg); \ 1051 tmp_ &= (mask); \ 1052 tmp_ |= ((val) & ~(mask)); \ 1053 WREG32(reg, tmp_); \ 1054 } while (0) 1055 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) 1056 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) 1057 #define WREG32_PLL_P(reg, val, mask) \ 1058 do { \ 1059 uint32_t tmp_ = RREG32_PLL(reg); \ 1060 tmp_ &= (mask); \ 1061 tmp_ |= ((val) & ~(mask)); \ 1062 WREG32_PLL(reg, tmp_); \ 1063 } while (0) 1064 #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false)) 1065 #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg)) 1066 #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v)) 1067 1068 #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT 1069 #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK 1070 1071 #define REG_SET_FIELD(orig_val, reg, field, field_val) \ 1072 (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \ 1073 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field)))) 1074 1075 #define REG_GET_FIELD(value, reg, field) \ 1076 (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field)) 1077 1078 #define WREG32_FIELD(reg, field, val) \ 1079 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) 1080 1081 #define WREG32_FIELD_OFFSET(reg, offset, field, val) \ 1082 WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) 1083 1084 /* 1085 * BIOS helpers. 1086 */ 1087 #define RBIOS8(i) (adev->bios[i]) 1088 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) 1089 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) 1090 1091 /* 1092 * ASICs macro. 1093 */ 1094 #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) 1095 #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) 1096 #define amdgpu_asic_reset_method(adev) (adev)->asic_funcs->reset_method((adev)) 1097 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) 1098 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) 1099 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) 1100 #define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev)) 1101 #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l)) 1102 #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) 1103 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) 1104 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) 1105 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) 1106 #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) 1107 #define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r)) 1108 #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r)) 1109 #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) 1110 #define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev)) 1111 #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1))) 1112 #define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev)) 1113 #define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev))) 1114 #define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev)) 1115 1116 #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter)); 1117 1118 /* Common functions */ 1119 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); 1120 int amdgpu_device_gpu_recover(struct amdgpu_device *adev, 1121 struct amdgpu_job* job); 1122 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); 1123 bool amdgpu_device_need_post(struct amdgpu_device *adev); 1124 1125 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, 1126 u64 num_vis_bytes); 1127 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev); 1128 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, 1129 const u32 *registers, 1130 const u32 array_size); 1131 1132 bool amdgpu_device_supports_boco(struct drm_device *dev); 1133 bool amdgpu_device_supports_baco(struct drm_device *dev); 1134 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, 1135 struct amdgpu_device *peer_adev); 1136 int amdgpu_device_baco_enter(struct drm_device *dev); 1137 int amdgpu_device_baco_exit(struct drm_device *dev); 1138 1139 /* atpx handler */ 1140 #if defined(CONFIG_VGA_SWITCHEROO) 1141 void amdgpu_register_atpx_handler(void); 1142 void amdgpu_unregister_atpx_handler(void); 1143 bool amdgpu_has_atpx_dgpu_power_cntl(void); 1144 bool amdgpu_is_atpx_hybrid(void); 1145 bool amdgpu_atpx_dgpu_req_power_for_displays(void); 1146 bool amdgpu_has_atpx(void); 1147 #else 1148 static inline void amdgpu_register_atpx_handler(void) {} 1149 static inline void amdgpu_unregister_atpx_handler(void) {} 1150 static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } 1151 static inline bool amdgpu_is_atpx_hybrid(void) { return false; } 1152 static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; } 1153 static inline bool amdgpu_has_atpx(void) { return false; } 1154 #endif 1155 1156 #if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI) 1157 void *amdgpu_atpx_get_dhandle(void); 1158 #else 1159 static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; } 1160 #endif 1161 1162 /* 1163 * KMS 1164 */ 1165 extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; 1166 extern const int amdgpu_max_kms_ioctl; 1167 1168 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags); 1169 void amdgpu_driver_unload_kms(struct drm_device *dev); 1170 void amdgpu_driver_lastclose_kms(struct drm_device *dev); 1171 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); 1172 void amdgpu_driver_postclose_kms(struct drm_device *dev, 1173 struct drm_file *file_priv); 1174 int amdgpu_device_ip_suspend(struct amdgpu_device *adev); 1175 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); 1176 int amdgpu_device_resume(struct drm_device *dev, bool fbcon); 1177 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe); 1178 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe); 1179 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe); 1180 long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, 1181 unsigned long arg); 1182 1183 /* 1184 * functions used by amdgpu_encoder.c 1185 */ 1186 struct amdgpu_afmt_acr { 1187 u32 clock; 1188 1189 int n_32khz; 1190 int cts_32khz; 1191 1192 int n_44_1khz; 1193 int cts_44_1khz; 1194 1195 int n_48khz; 1196 int cts_48khz; 1197 1198 }; 1199 1200 struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock); 1201 1202 /* amdgpu_acpi.c */ 1203 #if defined(CONFIG_ACPI) 1204 int amdgpu_acpi_init(struct amdgpu_device *adev); 1205 void amdgpu_acpi_fini(struct amdgpu_device *adev); 1206 bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev); 1207 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, 1208 u8 perf_req, bool advertise); 1209 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); 1210 1211 void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev, 1212 struct amdgpu_dm_backlight_caps *caps); 1213 #else 1214 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } 1215 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } 1216 #endif 1217 1218 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, 1219 uint64_t addr, struct amdgpu_bo **bo, 1220 struct amdgpu_bo_va_mapping **mapping); 1221 1222 #if defined(CONFIG_DRM_AMD_DC) 1223 int amdgpu_dm_display_resume(struct amdgpu_device *adev ); 1224 #else 1225 static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; } 1226 #endif 1227 1228 1229 void amdgpu_register_gpu_instance(struct amdgpu_device *adev); 1230 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev); 1231 1232 #include "amdgpu_object.h" 1233 1234 /* used by df_v3_6.c and amdgpu_pmu.c */ 1235 #define AMDGPU_PMU_ATTR(_name, _object) \ 1236 static ssize_t \ 1237 _name##_show(struct device *dev, \ 1238 struct device_attribute *attr, \ 1239 char *page) \ 1240 { \ 1241 BUILD_BUG_ON(sizeof(_object) >= PAGE_SIZE - 1); \ 1242 return sprintf(page, _object "\n"); \ 1243 } \ 1244 \ 1245 static struct device_attribute pmu_attr_##_name = __ATTR_RO(_name) 1246 1247 #endif 1248 1249