197b2e202SAlex Deucher /* 297b2e202SAlex Deucher * Copyright 2008 Advanced Micro Devices, Inc. 397b2e202SAlex Deucher * Copyright 2008 Red Hat Inc. 497b2e202SAlex Deucher * Copyright 2009 Jerome Glisse. 597b2e202SAlex Deucher * 697b2e202SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a 797b2e202SAlex Deucher * copy of this software and associated documentation files (the "Software"), 897b2e202SAlex Deucher * to deal in the Software without restriction, including without limitation 997b2e202SAlex Deucher * the rights to use, copy, modify, merge, publish, distribute, sublicense, 1097b2e202SAlex Deucher * and/or sell copies of the Software, and to permit persons to whom the 1197b2e202SAlex Deucher * Software is furnished to do so, subject to the following conditions: 1297b2e202SAlex Deucher * 1397b2e202SAlex Deucher * The above copyright notice and this permission notice shall be included in 1497b2e202SAlex Deucher * all copies or substantial portions of the Software. 1597b2e202SAlex Deucher * 1697b2e202SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1797b2e202SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 1897b2e202SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 1997b2e202SAlex Deucher * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 2097b2e202SAlex Deucher * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 2197b2e202SAlex Deucher * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 2297b2e202SAlex Deucher * OTHER DEALINGS IN THE SOFTWARE. 2397b2e202SAlex Deucher * 2497b2e202SAlex Deucher * Authors: Dave Airlie 2597b2e202SAlex Deucher * Alex Deucher 2697b2e202SAlex Deucher * Jerome Glisse 2797b2e202SAlex Deucher */ 2897b2e202SAlex Deucher #ifndef __AMDGPU_H__ 2997b2e202SAlex Deucher #define __AMDGPU_H__ 3097b2e202SAlex Deucher 31d57229b1SAurabindo Pillai #ifdef pr_fmt 32d57229b1SAurabindo Pillai #undef pr_fmt 33d57229b1SAurabindo Pillai #endif 34d57229b1SAurabindo Pillai 35d57229b1SAurabindo Pillai #define pr_fmt(fmt) "amdgpu: " fmt 36d57229b1SAurabindo Pillai 37539489fcSAurabindo Pillai #ifdef dev_fmt 38539489fcSAurabindo Pillai #undef dev_fmt 39539489fcSAurabindo Pillai #endif 40539489fcSAurabindo Pillai 41539489fcSAurabindo Pillai #define dev_fmt(fmt) "amdgpu: " fmt 42539489fcSAurabindo Pillai 438290268fSChristian König #include "amdgpu_ctx.h" 448290268fSChristian König 4597b2e202SAlex Deucher #include <linux/atomic.h> 4697b2e202SAlex Deucher #include <linux/wait.h> 4797b2e202SAlex Deucher #include <linux/list.h> 4897b2e202SAlex Deucher #include <linux/kref.h> 49a9f87f64SChristian König #include <linux/rbtree.h> 5097b2e202SAlex Deucher #include <linux/hashtable.h> 51f54d1867SChris Wilson #include <linux/dma-fence.h> 5297b2e202SAlex Deucher 53248a1d6fSMasahiro Yamada #include <drm/ttm/ttm_bo_api.h> 54248a1d6fSMasahiro Yamada #include <drm/ttm/ttm_bo_driver.h> 55248a1d6fSMasahiro Yamada #include <drm/ttm/ttm_placement.h> 56248a1d6fSMasahiro Yamada #include <drm/ttm/ttm_module.h> 57248a1d6fSMasahiro Yamada #include <drm/ttm/ttm_execbuf_util.h> 5897b2e202SAlex Deucher 597e5a547fSChunming Zhou #include <drm/amdgpu_drm.h> 60f867723bSSam Ravnborg #include <drm/drm_gem.h> 61f867723bSSam Ravnborg #include <drm/drm_ioctl.h> 621b1f42d8SLucas Stach #include <drm/gpu_scheduler.h> 6397b2e202SAlex Deucher 6478c16834SAndres Rodriguez #include <kgd_kfd_interface.h> 65c79563a3SRex Zhu #include "dm_pp_interface.h" 66c79563a3SRex Zhu #include "kgd_pp_interface.h" 6778c16834SAndres Rodriguez 685fc3aeebSyanyang1 #include "amd_shared.h" 6997b2e202SAlex Deucher #include "amdgpu_mode.h" 7097b2e202SAlex Deucher #include "amdgpu_ih.h" 7197b2e202SAlex Deucher #include "amdgpu_irq.h" 7297b2e202SAlex Deucher #include "amdgpu_ucode.h" 73c632d799SFlora Cui #include "amdgpu_ttm.h" 740e5ca0d1SHuang Rui #include "amdgpu_psp.h" 7597b2e202SAlex Deucher #include "amdgpu_gds.h" 7656113504SChristian König #include "amdgpu_sync.h" 7778023016SChristian König #include "amdgpu_ring.h" 78073440d2SChristian König #include "amdgpu_vm.h" 79cf097881SAlex Deucher #include "amdgpu_dpm.h" 80a8fe58ceSMaruthi Bayyavarapu #include "amdgpu_acp.h" 814df654d2SLeo Liu #include "amdgpu_uvd.h" 825e568178SLeo Liu #include "amdgpu_vce.h" 8395aa13f6SLeo Liu #include "amdgpu_vcn.h" 8488a1c40aSLeo Liu #include "amdgpu_jpeg.h" 859a189996SChristian König #include "amdgpu_mn.h" 86770d13b1SChristian König #include "amdgpu_gmc.h" 87448fe192SHuang Rui #include "amdgpu_gfx.h" 88bb7743bcSHuang Rui #include "amdgpu_sdma.h" 89bebc0762SHawking Zhang #include "amdgpu_nbio.h" 904562236bSHarry Wentland #include "amdgpu_dm.h" 91ceeb50edSMonk Liu #include "amdgpu_virt.h" 927946340fSRex Zhu #include "amdgpu_csa.h" 933490bdb5SChristian König #include "amdgpu_gart.h" 9475758255SAlex Deucher #include "amdgpu_debugfs.h" 95050d9d43SChristian König #include "amdgpu_job.h" 964a8c21a1SChristian König #include "amdgpu_bo_list.h" 972cddc50eSHuang Rui #include "amdgpu_gem.h" 98cde577bdSOak Zeng #include "amdgpu_doorbell.h" 99611736d8SFelix Kuehling #include "amdgpu_amdkfd.h" 100137d63abSHuang Rui #include "amdgpu_smu.h" 101f39f5bb1SXiaojie Yuan #include "amdgpu_discovery.h" 102a538bbe7SJack Xiao #include "amdgpu_mes.h" 1039e585a52SHawking Zhang #include "amdgpu_umc.h" 1043d093da0STao Zhou #include "amdgpu_mmhub.h" 105bdf84a80SJoseph Greathouse #include "amdgpu_df.h" 106c79563a3SRex Zhu 10762d73fbcSEvan Quan #define MAX_GPU_INSTANCE 16 10862d73fbcSEvan Quan 10962d73fbcSEvan Quan struct amdgpu_gpu_instance 11062d73fbcSEvan Quan { 11162d73fbcSEvan Quan struct amdgpu_device *adev; 11262d73fbcSEvan Quan int mgpu_fan_enabled; 11362d73fbcSEvan Quan }; 11462d73fbcSEvan Quan 11562d73fbcSEvan Quan struct amdgpu_mgpu_info 11662d73fbcSEvan Quan { 11762d73fbcSEvan Quan struct amdgpu_gpu_instance gpu_ins[MAX_GPU_INSTANCE]; 11862d73fbcSEvan Quan struct mutex mutex; 11962d73fbcSEvan Quan uint32_t num_gpu; 12062d73fbcSEvan Quan uint32_t num_dgpu; 12162d73fbcSEvan Quan uint32_t num_apu; 12262d73fbcSEvan Quan }; 12362d73fbcSEvan Quan 124f440ff44SWambui Karuga #define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256 12571f98027SAlex Deucher 12697b2e202SAlex Deucher /* 12797b2e202SAlex Deucher * Modules parameters. 12897b2e202SAlex Deucher */ 12997b2e202SAlex Deucher extern int amdgpu_modeset; 13097b2e202SAlex Deucher extern int amdgpu_vram_limit; 131218b5dcdSJohn Brooks extern int amdgpu_vis_vram_limit; 13283e74db6SAlex Deucher extern int amdgpu_gart_size; 13336d38372SChristian König extern int amdgpu_gtt_size; 13495844d20SMarek Olšák extern int amdgpu_moverate; 13597b2e202SAlex Deucher extern int amdgpu_benchmarking; 13697b2e202SAlex Deucher extern int amdgpu_testing; 13797b2e202SAlex Deucher extern int amdgpu_audio; 13897b2e202SAlex Deucher extern int amdgpu_disp_priority; 13997b2e202SAlex Deucher extern int amdgpu_hw_i2c; 14097b2e202SAlex Deucher extern int amdgpu_pcie_gen2; 14197b2e202SAlex Deucher extern int amdgpu_msi; 142f440ff44SWambui Karuga extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH]; 14397b2e202SAlex Deucher extern int amdgpu_dpm; 144e635ee07SHuang Rui extern int amdgpu_fw_load_type; 14597b2e202SAlex Deucher extern int amdgpu_aspm; 14697b2e202SAlex Deucher extern int amdgpu_runtime_pm; 1470b693f0bSRex Zhu extern uint amdgpu_ip_block_mask; 14897b2e202SAlex Deucher extern int amdgpu_bapm; 14997b2e202SAlex Deucher extern int amdgpu_deep_color; 15097b2e202SAlex Deucher extern int amdgpu_vm_size; 15197b2e202SAlex Deucher extern int amdgpu_vm_block_size; 152d07f14beSRoger He extern int amdgpu_vm_fragment_size; 153d9c13156SChristian König extern int amdgpu_vm_fault_stop; 154b495bd3aSChristian König extern int amdgpu_vm_debug; 1559a4b7d4cSHarish Kasiviswanathan extern int amdgpu_vm_update_mode; 1567e0ff20cSWambui Karuga extern int amdgpu_exp_hw_support; 1574562236bSHarry Wentland extern int amdgpu_dc; 1581333f723SJammy Zhou extern int amdgpu_sched_jobs; 1594afcb303SJammy Zhou extern int amdgpu_sched_hw_submission; 1600b693f0bSRex Zhu extern uint amdgpu_pcie_gen_cap; 1610b693f0bSRex Zhu extern uint amdgpu_pcie_lane_cap; 1620b693f0bSRex Zhu extern uint amdgpu_cg_mask; 1630b693f0bSRex Zhu extern uint amdgpu_pg_mask; 1640b693f0bSRex Zhu extern uint amdgpu_sdma_phase_quantum; 1656f8941a2SNicolai Hähnle extern char *amdgpu_disable_cu; 1669accf2fdSEmily Deng extern char *amdgpu_virtual_display; 1670b693f0bSRex Zhu extern uint amdgpu_pp_feature_mask; 168367039bfSTianci.Yin extern uint amdgpu_force_long_training; 16965781c78SMonk Liu extern int amdgpu_job_hang_limit; 170e8835e0eSHawking Zhang extern int amdgpu_lbpw; 1714a75aefeSAndres Rodriguez extern int amdgpu_compute_multipipe; 172dcebf026SAndrey Grodzovsky extern int amdgpu_gpu_recovery; 173bfca0289SShaoyun Liu extern int amdgpu_emu_mode; 1747951e376SRex Zhu extern uint amdgpu_smu_memory_pool_size; 1757875a226SAlex Deucher extern uint amdgpu_dc_feature_mask; 1768a791dabSHarry Wentland extern uint amdgpu_dc_debug_mask; 177ad4de27fSNicholas Kazlauskas extern uint amdgpu_dm_abm_level; 17862d73fbcSEvan Quan extern struct amdgpu_mgpu_info mgpu_info; 1791218252fSxinhui pan extern int amdgpu_ras_enable; 1801218252fSxinhui pan extern uint amdgpu_ras_mask; 181acc0204cSGuchun Chen extern int amdgpu_bad_page_threshold; 18251bcce46SHawking Zhang extern int amdgpu_async_gfx_ring; 183b239c017SJack Xiao extern int amdgpu_mcbp; 184a190d1c7SXiaojie Yuan extern int amdgpu_discovery; 18538487284SJack Xiao extern int amdgpu_mes; 18675ee6487SFelix Kuehling extern int amdgpu_noretry; 1874e66d7d2SYong Zhao extern int amdgpu_force_asic_type; 1888c9f69bcSShirish S #ifdef CONFIG_HSA_AMD 189aa978594SHuang Rui extern int sched_policy; 190b2057956SFelix Kuehling extern bool debug_evictions; 191b80f050fSPhilip Yang extern bool no_system_mem_limit; 192a35ad98bSShirish S #else 193a35ad98bSShirish S static const int sched_policy = KFD_SCHED_POLICY_HWS; 194b2057956SFelix Kuehling static const bool debug_evictions; /* = false */ 195b80f050fSPhilip Yang static const bool no_system_mem_limit; 1968c9f69bcSShirish S #endif 19797b2e202SAlex Deucher 198d7ccb38dSHuang Rui extern int amdgpu_tmz; 199273da6ffSWenhui Sheng extern int amdgpu_reset_method; 200d7ccb38dSHuang Rui 2016dd13096SFelix Kuehling #ifdef CONFIG_DRM_AMDGPU_SI 2026dd13096SFelix Kuehling extern int amdgpu_si_support; 2036dd13096SFelix Kuehling #endif 2047df28986SFelix Kuehling #ifdef CONFIG_DRM_AMDGPU_CIK 2057df28986SFelix Kuehling extern int amdgpu_cik_support; 2067df28986SFelix Kuehling #endif 207a300de40SMonk Liu extern int amdgpu_num_kcq; 20897b2e202SAlex Deucher 20908d1bdd4SRex Zhu #define AMDGPU_VM_MAX_NUM_CTX 4096 2106c8d74caSSamuel Li #define AMDGPU_SG_THRESHOLD (256*1024*1024) 21155ed8cafSChunming Zhou #define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */ 2124b559c90SChunming Zhou #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 21397b2e202SAlex Deucher #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 2148c5e13ecSAndrey Grodzovsky #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) 21597b2e202SAlex Deucher #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32 21697b2e202SAlex Deucher #define AMDGPUFB_CONN_LIMIT 4 217a5bde2f9SAlex Deucher #define AMDGPU_BIOS_NUM_SCRATCH 16 21897b2e202SAlex Deucher 21981b54fb7SAlex Deucher #define AMDGPU_VBIOS_VGA_ALLOCATION (9 * 1024 * 1024) /* reserve 8MB for vga emulator and 1 MB for FB */ 22081b54fb7SAlex Deucher 22197b2e202SAlex Deucher /* hard reset data */ 22297b2e202SAlex Deucher #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b 22397b2e202SAlex Deucher 22497b2e202SAlex Deucher /* reset flags */ 22597b2e202SAlex Deucher #define AMDGPU_RESET_GFX (1 << 0) 22697b2e202SAlex Deucher #define AMDGPU_RESET_COMPUTE (1 << 1) 22797b2e202SAlex Deucher #define AMDGPU_RESET_DMA (1 << 2) 22897b2e202SAlex Deucher #define AMDGPU_RESET_CP (1 << 3) 22997b2e202SAlex Deucher #define AMDGPU_RESET_GRBM (1 << 4) 23097b2e202SAlex Deucher #define AMDGPU_RESET_DMA1 (1 << 5) 23197b2e202SAlex Deucher #define AMDGPU_RESET_RLC (1 << 6) 23297b2e202SAlex Deucher #define AMDGPU_RESET_SEM (1 << 7) 23397b2e202SAlex Deucher #define AMDGPU_RESET_IH (1 << 8) 23497b2e202SAlex Deucher #define AMDGPU_RESET_VMC (1 << 9) 23597b2e202SAlex Deucher #define AMDGPU_RESET_MC (1 << 10) 23697b2e202SAlex Deucher #define AMDGPU_RESET_DISPLAY (1 << 11) 23797b2e202SAlex Deucher #define AMDGPU_RESET_UVD (1 << 12) 23897b2e202SAlex Deucher #define AMDGPU_RESET_VCE (1 << 13) 23997b2e202SAlex Deucher #define AMDGPU_RESET_VCE1 (1 << 14) 24097b2e202SAlex Deucher 24197b2e202SAlex Deucher /* max cursor sizes (in pixels) */ 24297b2e202SAlex Deucher #define CIK_CURSOR_WIDTH 128 24397b2e202SAlex Deucher #define CIK_CURSOR_HEIGHT 128 24497b2e202SAlex Deucher 24597b2e202SAlex Deucher struct amdgpu_device; 24697b2e202SAlex Deucher struct amdgpu_ib; 24797b2e202SAlex Deucher struct amdgpu_cs_parser; 248bb977d37SChunming Zhou struct amdgpu_job; 24997b2e202SAlex Deucher struct amdgpu_irq_src; 2500b492a4cSAlex Deucher struct amdgpu_fpriv; 2519cca0b8eSChristian König struct amdgpu_bo_va_mapping; 252102c16a0SLyude Paul struct amdgpu_atif; 253992af942SJonathan Kim struct kfd_vm_fault_info; 25497b2e202SAlex Deucher 25597b2e202SAlex Deucher enum amdgpu_cp_irq { 25653b2fe41SHawking Zhang AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0, 25753b2fe41SHawking Zhang AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP, 25897b2e202SAlex Deucher AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP, 25997b2e202SAlex Deucher AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP, 26097b2e202SAlex Deucher AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP, 26197b2e202SAlex Deucher AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP, 26297b2e202SAlex Deucher AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP, 26397b2e202SAlex Deucher AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP, 26497b2e202SAlex Deucher AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP, 26597b2e202SAlex Deucher AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP, 26697b2e202SAlex Deucher 26797b2e202SAlex Deucher AMDGPU_CP_IRQ_LAST 26897b2e202SAlex Deucher }; 26997b2e202SAlex Deucher 27097b2e202SAlex Deucher enum amdgpu_thermal_irq { 27197b2e202SAlex Deucher AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0, 27297b2e202SAlex Deucher AMDGPU_THERMAL_IRQ_HIGH_TO_LOW, 27397b2e202SAlex Deucher 27497b2e202SAlex Deucher AMDGPU_THERMAL_IRQ_LAST 27597b2e202SAlex Deucher }; 27697b2e202SAlex Deucher 2774e638ae9SXiangliang Yu enum amdgpu_kiq_irq { 2784e638ae9SXiangliang Yu AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0, 2794e638ae9SXiangliang Yu AMDGPU_CP_KIQ_IRQ_LAST 2804e638ae9SXiangliang Yu }; 2814e638ae9SXiangliang Yu 2823890d111SEmily Deng #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ 2833890d111SEmily Deng #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ 2844944af67Swentalou #define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */ 2853890d111SEmily Deng 28643fa561fSRex Zhu int amdgpu_device_ip_set_clockgating_state(void *dev, 2875fc3aeebSyanyang1 enum amd_ip_block_type block_type, 2885fc3aeebSyanyang1 enum amd_clockgating_state state); 28943fa561fSRex Zhu int amdgpu_device_ip_set_powergating_state(void *dev, 2905fc3aeebSyanyang1 enum amd_ip_block_type block_type, 2915fc3aeebSyanyang1 enum amd_powergating_state state); 2922990a1fcSAlex Deucher void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, 2932990a1fcSAlex Deucher u32 *flags); 2942990a1fcSAlex Deucher int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, 2955dbbb60bSAlex Deucher enum amd_ip_block_type block_type); 2962990a1fcSAlex Deucher bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, 2975dbbb60bSAlex Deucher enum amd_ip_block_type block_type); 29897b2e202SAlex Deucher 299a1255107SAlex Deucher #define AMDGPU_MAX_IP_NUM 16 300a1255107SAlex Deucher 301a1255107SAlex Deucher struct amdgpu_ip_block_status { 302a1255107SAlex Deucher bool valid; 303a1255107SAlex Deucher bool sw; 304a1255107SAlex Deucher bool hw; 305a1255107SAlex Deucher bool late_initialized; 306a1255107SAlex Deucher bool hang; 307a1255107SAlex Deucher }; 308a1255107SAlex Deucher 30997b2e202SAlex Deucher struct amdgpu_ip_block_version { 310a1255107SAlex Deucher const enum amd_ip_block_type type; 311a1255107SAlex Deucher const u32 major; 312a1255107SAlex Deucher const u32 minor; 313a1255107SAlex Deucher const u32 rev; 3145fc3aeebSyanyang1 const struct amd_ip_funcs *funcs; 31597b2e202SAlex Deucher }; 31697b2e202SAlex Deucher 317efe4f000STianci.Yin #define HW_REV(_Major, _Minor, _Rev) \ 318efe4f000STianci.Yin ((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev))) 319efe4f000STianci.Yin 320a1255107SAlex Deucher struct amdgpu_ip_block { 321a1255107SAlex Deucher struct amdgpu_ip_block_status status; 322a1255107SAlex Deucher const struct amdgpu_ip_block_version *version; 323a1255107SAlex Deucher }; 324a1255107SAlex Deucher 3252990a1fcSAlex Deucher int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, 3265fc3aeebSyanyang1 enum amd_ip_block_type type, 32797b2e202SAlex Deucher u32 major, u32 minor); 32897b2e202SAlex Deucher 3292990a1fcSAlex Deucher struct amdgpu_ip_block * 3302990a1fcSAlex Deucher amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev, 3315fc3aeebSyanyang1 enum amd_ip_block_type type); 33297b2e202SAlex Deucher 3332990a1fcSAlex Deucher int amdgpu_device_ip_block_add(struct amdgpu_device *adev, 334a1255107SAlex Deucher const struct amdgpu_ip_block_version *ip_block_version); 335a1255107SAlex Deucher 33697b2e202SAlex Deucher /* 33797b2e202SAlex Deucher * BIOS. 33897b2e202SAlex Deucher */ 33997b2e202SAlex Deucher bool amdgpu_get_bios(struct amdgpu_device *adev); 34097b2e202SAlex Deucher bool amdgpu_read_bios(struct amdgpu_device *adev); 34197b2e202SAlex Deucher 34297b2e202SAlex Deucher /* 34397b2e202SAlex Deucher * Clocks 34497b2e202SAlex Deucher */ 34597b2e202SAlex Deucher 34697b2e202SAlex Deucher #define AMDGPU_MAX_PPLL 3 34797b2e202SAlex Deucher 34897b2e202SAlex Deucher struct amdgpu_clock { 34997b2e202SAlex Deucher struct amdgpu_pll ppll[AMDGPU_MAX_PPLL]; 35097b2e202SAlex Deucher struct amdgpu_pll spll; 35197b2e202SAlex Deucher struct amdgpu_pll mpll; 35297b2e202SAlex Deucher /* 10 Khz units */ 35397b2e202SAlex Deucher uint32_t default_mclk; 35497b2e202SAlex Deucher uint32_t default_sclk; 35597b2e202SAlex Deucher uint32_t default_dispclk; 35697b2e202SAlex Deucher uint32_t current_dispclk; 35797b2e202SAlex Deucher uint32_t dp_extclk; 35897b2e202SAlex Deucher uint32_t max_pixel_clock; 35997b2e202SAlex Deucher }; 36097b2e202SAlex Deucher 36197b2e202SAlex Deucher /* sub-allocation manager, it has to be protected by another lock. 36297b2e202SAlex Deucher * By conception this is an helper for other part of the driver 36397b2e202SAlex Deucher * like the indirect buffer or semaphore, which both have their 36497b2e202SAlex Deucher * locking. 36597b2e202SAlex Deucher * 36697b2e202SAlex Deucher * Principe is simple, we keep a list of sub allocation in offset 36797b2e202SAlex Deucher * order (first entry has offset == 0, last entry has the highest 36897b2e202SAlex Deucher * offset). 36997b2e202SAlex Deucher * 37097b2e202SAlex Deucher * When allocating new object we first check if there is room at 37197b2e202SAlex Deucher * the end total_size - (last_object_offset + last_object_size) >= 37297b2e202SAlex Deucher * alloc_size. If so we allocate new object there. 37397b2e202SAlex Deucher * 37497b2e202SAlex Deucher * When there is not enough room at the end, we start waiting for 37597b2e202SAlex Deucher * each sub object until we reach object_offset+object_size >= 37697b2e202SAlex Deucher * alloc_size, this object then become the sub object we return. 37797b2e202SAlex Deucher * 37897b2e202SAlex Deucher * Alignment can't be bigger than page size. 37997b2e202SAlex Deucher * 38097b2e202SAlex Deucher * Hole are not considered for allocation to keep things simple. 38197b2e202SAlex Deucher * Assumption is that there won't be hole (all object on same 38297b2e202SAlex Deucher * alignment). 38397b2e202SAlex Deucher */ 3846ba60b89SChristian König 3856ba60b89SChristian König #define AMDGPU_SA_NUM_FENCE_LISTS 32 3866ba60b89SChristian König 38797b2e202SAlex Deucher struct amdgpu_sa_manager { 38897b2e202SAlex Deucher wait_queue_head_t wq; 38997b2e202SAlex Deucher struct amdgpu_bo *bo; 39097b2e202SAlex Deucher struct list_head *hole; 3916ba60b89SChristian König struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS]; 39297b2e202SAlex Deucher struct list_head olist; 39397b2e202SAlex Deucher unsigned size; 39497b2e202SAlex Deucher uint64_t gpu_addr; 39597b2e202SAlex Deucher void *cpu_ptr; 39697b2e202SAlex Deucher uint32_t domain; 39797b2e202SAlex Deucher uint32_t align; 39897b2e202SAlex Deucher }; 39997b2e202SAlex Deucher 40097b2e202SAlex Deucher /* sub-allocation buffer */ 40197b2e202SAlex Deucher struct amdgpu_sa_bo { 40297b2e202SAlex Deucher struct list_head olist; 40397b2e202SAlex Deucher struct list_head flist; 40497b2e202SAlex Deucher struct amdgpu_sa_manager *manager; 40597b2e202SAlex Deucher unsigned soffset; 40697b2e202SAlex Deucher unsigned eoffset; 407f54d1867SChris Wilson struct dma_fence *fence; 40897b2e202SAlex Deucher }; 40997b2e202SAlex Deucher 410d573de2dSRex Zhu int amdgpu_fence_slab_init(void); 411d573de2dSRex Zhu void amdgpu_fence_slab_fini(void); 41297b2e202SAlex Deucher 41397b2e202SAlex Deucher /* 41497b2e202SAlex Deucher * IRQS. 41597b2e202SAlex Deucher */ 41697b2e202SAlex Deucher 41797b2e202SAlex Deucher struct amdgpu_flip_work { 418325cbba1SMichel Dänzer struct delayed_work flip_work; 41997b2e202SAlex Deucher struct work_struct unpin_work; 42097b2e202SAlex Deucher struct amdgpu_device *adev; 42197b2e202SAlex Deucher int crtc_id; 422325cbba1SMichel Dänzer u32 target_vblank; 42397b2e202SAlex Deucher uint64_t base; 42497b2e202SAlex Deucher struct drm_pending_vblank_event *event; 425765e7fbfSChristian König struct amdgpu_bo *old_abo; 426f54d1867SChris Wilson struct dma_fence *excl; 4271ffd2652SChristian König unsigned shared_count; 428f54d1867SChris Wilson struct dma_fence **shared; 429f54d1867SChris Wilson struct dma_fence_cb cb; 430cb9e59d7SAlex Deucher bool async; 43197b2e202SAlex Deucher }; 43297b2e202SAlex Deucher 43397b2e202SAlex Deucher 43497b2e202SAlex Deucher /* 43597b2e202SAlex Deucher * CP & rings. 43697b2e202SAlex Deucher */ 43797b2e202SAlex Deucher 43897b2e202SAlex Deucher struct amdgpu_ib { 43997b2e202SAlex Deucher struct amdgpu_sa_bo *sa_bo; 44097b2e202SAlex Deucher uint32_t length_dw; 44197b2e202SAlex Deucher uint64_t gpu_addr; 44297b2e202SAlex Deucher uint32_t *ptr; 443de807f81SJammy Zhou uint32_t flags; 44497b2e202SAlex Deucher }; 44597b2e202SAlex Deucher 4461b1f42d8SLucas Stach extern const struct drm_sched_backend_ops amdgpu_sched_ops; 447c1b69ed0SChunming Zhou 44897b2e202SAlex Deucher /* 44997b2e202SAlex Deucher * file private structure 45097b2e202SAlex Deucher */ 45197b2e202SAlex Deucher 45297b2e202SAlex Deucher struct amdgpu_fpriv { 45397b2e202SAlex Deucher struct amdgpu_vm vm; 454b85891bdSJunwei Zhang struct amdgpu_bo_va *prt_va; 4550f4b3c68SChristian König struct amdgpu_bo_va *csa_va; 45697b2e202SAlex Deucher struct mutex bo_list_lock; 45797b2e202SAlex Deucher struct idr bo_list_handles; 45897b2e202SAlex Deucher struct amdgpu_ctx_mgr ctx_mgr; 45997b2e202SAlex Deucher }; 46097b2e202SAlex Deucher 461021830d2SBas Nieuwenhuizen int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv); 462021830d2SBas Nieuwenhuizen 463b07c60c0SChristian König int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, 464c8e42d57Sxinhui pan unsigned size, 465c8e42d57Sxinhui pan enum amdgpu_ib_pool_type pool, 466c8e42d57Sxinhui pan struct amdgpu_ib *ib); 4674d9c514dSChristian König void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, 468f54d1867SChris Wilson struct dma_fence *f); 469b07c60c0SChristian König int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, 47050ddc75eSJunwei Zhang struct amdgpu_ib *ibs, struct amdgpu_job *job, 47150ddc75eSJunwei Zhang struct dma_fence **f); 47297b2e202SAlex Deucher int amdgpu_ib_pool_init(struct amdgpu_device *adev); 47397b2e202SAlex Deucher void amdgpu_ib_pool_fini(struct amdgpu_device *adev); 47497b2e202SAlex Deucher int amdgpu_ib_ring_tests(struct amdgpu_device *adev); 47597b2e202SAlex Deucher 47697b2e202SAlex Deucher /* 47797b2e202SAlex Deucher * CS. 47897b2e202SAlex Deucher */ 47997b2e202SAlex Deucher struct amdgpu_cs_chunk { 48097b2e202SAlex Deucher uint32_t chunk_id; 48197b2e202SAlex Deucher uint32_t length_dw; 482758ac17fSChristian König void *kdata; 48397b2e202SAlex Deucher }; 48497b2e202SAlex Deucher 4852624dd15SChunming Zhou struct amdgpu_cs_post_dep { 4862624dd15SChunming Zhou struct drm_syncobj *syncobj; 4872624dd15SChunming Zhou struct dma_fence_chain *chain; 4882624dd15SChunming Zhou u64 point; 4892624dd15SChunming Zhou }; 4902624dd15SChunming Zhou 49197b2e202SAlex Deucher struct amdgpu_cs_parser { 49297b2e202SAlex Deucher struct amdgpu_device *adev; 49397b2e202SAlex Deucher struct drm_file *filp; 4943cb485f3SChristian König struct amdgpu_ctx *ctx; 495c3cca41eSChristian König 49697b2e202SAlex Deucher /* chunks */ 49797b2e202SAlex Deucher unsigned nchunks; 49897b2e202SAlex Deucher struct amdgpu_cs_chunk *chunks; 499c3cca41eSChristian König 50050838c8cSChristian König /* scheduler job object */ 50150838c8cSChristian König struct amdgpu_job *job; 5020d346a14SChristian König struct drm_sched_entity *entity; 503c3cca41eSChristian König 504c3cca41eSChristian König /* buffer objects */ 505c3cca41eSChristian König struct ww_acquire_ctx ticket; 506c3cca41eSChristian König struct amdgpu_bo_list *bo_list; 5073fe89771SChristian König struct amdgpu_mn *mn; 50856467ebfSChristian König struct amdgpu_bo_list_entry vm_pd; 50997b2e202SAlex Deucher struct list_head validated; 510f54d1867SChris Wilson struct dma_fence *fence; 511f69f90a1SChristian König uint64_t bytes_moved_threshold; 51200f06b24SJohn Brooks uint64_t bytes_moved_vis_threshold; 513f69f90a1SChristian König uint64_t bytes_moved; 51400f06b24SJohn Brooks uint64_t bytes_moved_vis; 51597b2e202SAlex Deucher 51697b2e202SAlex Deucher /* user fence */ 51791acbeb6SChristian König struct amdgpu_bo_list_entry uf_entry; 518660e8558SDave Airlie 5192624dd15SChunming Zhou unsigned num_post_deps; 5202624dd15SChunming Zhou struct amdgpu_cs_post_dep *post_deps; 52197b2e202SAlex Deucher }; 52297b2e202SAlex Deucher 5237270f839SChristian König static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, 5247270f839SChristian König uint32_t ib_idx, int idx) 52597b2e202SAlex Deucher { 52650838c8cSChristian König return p->job->ibs[ib_idx].ptr[idx]; 52797b2e202SAlex Deucher } 52897b2e202SAlex Deucher 5297270f839SChristian König static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p, 5307270f839SChristian König uint32_t ib_idx, int idx, 5317270f839SChristian König uint32_t value) 5327270f839SChristian König { 53350838c8cSChristian König p->job->ibs[ib_idx].ptr[idx] = value; 5347270f839SChristian König } 5357270f839SChristian König 53697b2e202SAlex Deucher /* 53797b2e202SAlex Deucher * Writeback 53897b2e202SAlex Deucher */ 53954208194SYintian Tao #define AMDGPU_MAX_WB 256 /* Reserve at most 256 WB slots for amdgpu-owned rings. */ 54097b2e202SAlex Deucher 54197b2e202SAlex Deucher struct amdgpu_wb { 54297b2e202SAlex Deucher struct amdgpu_bo *wb_obj; 54397b2e202SAlex Deucher volatile uint32_t *wb; 54497b2e202SAlex Deucher uint64_t gpu_addr; 54597b2e202SAlex Deucher u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */ 54697b2e202SAlex Deucher unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)]; 54797b2e202SAlex Deucher }; 54897b2e202SAlex Deucher 549131b4b36SAlex Deucher int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb); 550131b4b36SAlex Deucher void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb); 55197b2e202SAlex Deucher 55297b2e202SAlex Deucher /* 55397b2e202SAlex Deucher * Benchmarking 55497b2e202SAlex Deucher */ 55597b2e202SAlex Deucher void amdgpu_benchmark(struct amdgpu_device *adev, int test_number); 55697b2e202SAlex Deucher 55797b2e202SAlex Deucher 55897b2e202SAlex Deucher /* 55997b2e202SAlex Deucher * Testing 56097b2e202SAlex Deucher */ 56197b2e202SAlex Deucher void amdgpu_test_moves(struct amdgpu_device *adev); 56297b2e202SAlex Deucher 56397b2e202SAlex Deucher /* 56497b2e202SAlex Deucher * ASIC specific register table accessible by UMD 56597b2e202SAlex Deucher */ 56697b2e202SAlex Deucher struct amdgpu_allowed_register_entry { 56797b2e202SAlex Deucher uint32_t reg_offset; 56897b2e202SAlex Deucher bool grbm_indexed; 56997b2e202SAlex Deucher }; 57097b2e202SAlex Deucher 5710cf3c64fSAlex Deucher enum amd_reset_method { 5720cf3c64fSAlex Deucher AMD_RESET_METHOD_LEGACY = 0, 5730cf3c64fSAlex Deucher AMD_RESET_METHOD_MODE0, 5740cf3c64fSAlex Deucher AMD_RESET_METHOD_MODE1, 5750cf3c64fSAlex Deucher AMD_RESET_METHOD_MODE2, 5760cf3c64fSAlex Deucher AMD_RESET_METHOD_BACO 5770cf3c64fSAlex Deucher }; 5780cf3c64fSAlex Deucher 57997b2e202SAlex Deucher /* 58097b2e202SAlex Deucher * ASIC specific functions. 58197b2e202SAlex Deucher */ 58297b2e202SAlex Deucher struct amdgpu_asic_funcs { 58397b2e202SAlex Deucher bool (*read_disabled_bios)(struct amdgpu_device *adev); 5847946b878SAlex Deucher bool (*read_bios_from_rom)(struct amdgpu_device *adev, 5857946b878SAlex Deucher u8 *bios, u32 length_bytes); 58697b2e202SAlex Deucher int (*read_register)(struct amdgpu_device *adev, u32 se_num, 58797b2e202SAlex Deucher u32 sh_num, u32 reg_offset, u32 *value); 58897b2e202SAlex Deucher void (*set_vga_state)(struct amdgpu_device *adev, bool state); 58997b2e202SAlex Deucher int (*reset)(struct amdgpu_device *adev); 5900cf3c64fSAlex Deucher enum amd_reset_method (*reset_method)(struct amdgpu_device *adev); 59197b2e202SAlex Deucher /* get the reference clock */ 59297b2e202SAlex Deucher u32 (*get_xclk)(struct amdgpu_device *adev); 59397b2e202SAlex Deucher /* MM block clocks */ 59497b2e202SAlex Deucher int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); 59597b2e202SAlex Deucher int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); 596841686dfSMaruthi Bayyavarapu /* static power management */ 597841686dfSMaruthi Bayyavarapu int (*get_pcie_lanes)(struct amdgpu_device *adev); 598841686dfSMaruthi Bayyavarapu void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes); 599bbf282d8SAlex Deucher /* get config memsize register */ 600bbf282d8SAlex Deucher u32 (*get_config_memsize)(struct amdgpu_device *adev); 6012df1b8b6SAlex Deucher /* flush hdp write queue */ 60269882565SChristian König void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring); 6032df1b8b6SAlex Deucher /* invalidate hdp read cache */ 60469882565SChristian König void (*invalidate_hdp)(struct amdgpu_device *adev, 60569882565SChristian König struct amdgpu_ring *ring); 6064a89ad9bSHawking Zhang void (*reset_hdp_ras_error_count)(struct amdgpu_device *adev); 60769070690SAlex Deucher /* check if the asic needs a full reset of if soft reset will work */ 60869070690SAlex Deucher bool (*need_full_reset)(struct amdgpu_device *adev); 6095253163aSOak Zeng /* initialize doorbell layout for specific asic*/ 6105253163aSOak Zeng void (*init_doorbell_index)(struct amdgpu_device *adev); 611b45e18acSKent Russell /* PCIe bandwidth usage */ 612b45e18acSKent Russell void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0, 613b45e18acSKent Russell uint64_t *count1); 61444401889SAlex Deucher /* do we need to reset the asic at init time (e.g., kexec) */ 61544401889SAlex Deucher bool (*need_reset_on_init)(struct amdgpu_device *adev); 616dcea6e65SKent Russell /* PCIe replay counter */ 617dcea6e65SKent Russell uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev); 61869d5436dSAlex Deucher /* device supports BACO */ 61969d5436dSAlex Deucher bool (*supports_baco)(struct amdgpu_device *adev); 62097b2e202SAlex Deucher }; 62197b2e202SAlex Deucher 62297b2e202SAlex Deucher /* 62397b2e202SAlex Deucher * IOCTL. 62497b2e202SAlex Deucher */ 62597b2e202SAlex Deucher int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, 62697b2e202SAlex Deucher struct drm_file *filp); 62797b2e202SAlex Deucher 62897b2e202SAlex Deucher int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 6297ca24cf2SMarek Olšák int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, 6307ca24cf2SMarek Olšák struct drm_file *filp); 63197b2e202SAlex Deucher int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 632eef18a82SJunwei Zhang int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, 633eef18a82SJunwei Zhang struct drm_file *filp); 63497b2e202SAlex Deucher 63597b2e202SAlex Deucher /* VRAM scratch page for HDP bug, default vram page */ 63697b2e202SAlex Deucher struct amdgpu_vram_scratch { 63797b2e202SAlex Deucher struct amdgpu_bo *robj; 63897b2e202SAlex Deucher volatile uint32_t *ptr; 63997b2e202SAlex Deucher u64 gpu_addr; 64097b2e202SAlex Deucher }; 64197b2e202SAlex Deucher 64297b2e202SAlex Deucher /* 64397b2e202SAlex Deucher * ACPI 64497b2e202SAlex Deucher */ 64597b2e202SAlex Deucher struct amdgpu_atcs_functions { 64697b2e202SAlex Deucher bool get_ext_state; 64797b2e202SAlex Deucher bool pcie_perf_req; 64897b2e202SAlex Deucher bool pcie_dev_rdy; 64997b2e202SAlex Deucher bool pcie_bus_width; 65097b2e202SAlex Deucher }; 65197b2e202SAlex Deucher 65297b2e202SAlex Deucher struct amdgpu_atcs { 65397b2e202SAlex Deucher struct amdgpu_atcs_functions functions; 65497b2e202SAlex Deucher }; 65597b2e202SAlex Deucher 65697b2e202SAlex Deucher /* 657d03846afSChunming Zhou * CGS 658d03846afSChunming Zhou */ 659110e6f26SDave Airlie struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); 660110e6f26SDave Airlie void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); 661a8fe58ceSMaruthi Bayyavarapu 662a8fe58ceSMaruthi Bayyavarapu /* 66397b2e202SAlex Deucher * Core structure, functions and helpers. 66497b2e202SAlex Deucher */ 66597b2e202SAlex Deucher typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); 66697b2e202SAlex Deucher typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 66797b2e202SAlex Deucher 6684fa1c6a6STao Zhou typedef uint64_t (*amdgpu_rreg64_t)(struct amdgpu_device*, uint32_t); 6694fa1c6a6STao Zhou typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t); 6704fa1c6a6STao Zhou 67197b2e202SAlex Deucher typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 67297b2e202SAlex Deucher typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); 67397b2e202SAlex Deucher 67488807dc8SOak Zeng struct amdgpu_mmio_remap { 67588807dc8SOak Zeng u32 reg_offset; 67688807dc8SOak Zeng resource_size_t bus_addr; 67788807dc8SOak Zeng }; 67888807dc8SOak Zeng 6794522824cSShaoyun Liu /* Define the HW IP blocks will be used in driver , add more if necessary */ 6804522824cSShaoyun Liu enum amd_hw_ip_block_type { 6814522824cSShaoyun Liu GC_HWIP = 1, 6824522824cSShaoyun Liu HDP_HWIP, 6834522824cSShaoyun Liu SDMA0_HWIP, 6844522824cSShaoyun Liu SDMA1_HWIP, 685fa5d2e6fSLe Ma SDMA2_HWIP, 686fa5d2e6fSLe Ma SDMA3_HWIP, 687fa5d2e6fSLe Ma SDMA4_HWIP, 688fa5d2e6fSLe Ma SDMA5_HWIP, 689fa5d2e6fSLe Ma SDMA6_HWIP, 690fa5d2e6fSLe Ma SDMA7_HWIP, 6914522824cSShaoyun Liu MMHUB_HWIP, 6924522824cSShaoyun Liu ATHUB_HWIP, 6934522824cSShaoyun Liu NBIO_HWIP, 6944522824cSShaoyun Liu MP0_HWIP, 695e6636ae1SEvan Quan MP1_HWIP, 6964522824cSShaoyun Liu UVD_HWIP, 6974522824cSShaoyun Liu VCN_HWIP = UVD_HWIP, 69888a1c40aSLeo Liu JPEG_HWIP = VCN_HWIP, 6994522824cSShaoyun Liu VCE_HWIP, 7004522824cSShaoyun Liu DF_HWIP, 7014522824cSShaoyun Liu DCE_HWIP, 7024522824cSShaoyun Liu OSSSYS_HWIP, 7034522824cSShaoyun Liu SMUIO_HWIP, 7044522824cSShaoyun Liu PWR_HWIP, 7054522824cSShaoyun Liu NBIF_HWIP, 706e6636ae1SEvan Quan THM_HWIP, 70773b19174SRex Zhu CLK_HWIP, 7086501a771SHawking Zhang UMC_HWIP, 7096501a771SHawking Zhang RSMU_HWIP, 7104522824cSShaoyun Liu MAX_HWIP 7114522824cSShaoyun Liu }; 7124522824cSShaoyun Liu 713113b47e7SLe Ma #define HWIP_MAX_INSTANCE 8 7144522824cSShaoyun Liu 71511dc9364SRex Zhu struct amd_powerplay { 71611dc9364SRex Zhu void *pp_handle; 71711dc9364SRex Zhu const struct amd_pm_funcs *pp_funcs; 71811dc9364SRex Zhu }; 71911dc9364SRex Zhu 7200c49e0b8SChunming Zhou #define AMDGPU_RESET_MAGIC_NUM 64 721e4cf4bf5SJonathan Kim #define AMDGPU_MAX_DF_PERFMONS 4 72297b2e202SAlex Deucher struct amdgpu_device { 72397b2e202SAlex Deucher struct device *dev; 72497b2e202SAlex Deucher struct drm_device *ddev; 72597b2e202SAlex Deucher struct pci_dev *pdev; 72697b2e202SAlex Deucher 727a8fe58ceSMaruthi Bayyavarapu #ifdef CONFIG_DRM_AMD_ACP 728a8fe58ceSMaruthi Bayyavarapu struct amdgpu_acp acp; 729a8fe58ceSMaruthi Bayyavarapu #endif 730a8fe58ceSMaruthi Bayyavarapu 73197b2e202SAlex Deucher /* ASIC */ 7322f7d10b3SJammy Zhou enum amd_asic_type asic_type; 73397b2e202SAlex Deucher uint32_t family; 73497b2e202SAlex Deucher uint32_t rev_id; 73597b2e202SAlex Deucher uint32_t external_rev_id; 73697b2e202SAlex Deucher unsigned long flags; 73754f78a76SAlex Deucher unsigned long apu_flags; 73897b2e202SAlex Deucher int usec_timeout; 73997b2e202SAlex Deucher const struct amdgpu_asic_funcs *asic_funcs; 74097b2e202SAlex Deucher bool shutdown; 741fd5fd480SChunming Zhou bool need_swiotlb; 74297b2e202SAlex Deucher bool accel_working; 74397b2e202SAlex Deucher struct notifier_block acpi_nb; 74497b2e202SAlex Deucher struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; 74597b2e202SAlex Deucher struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; 74697b2e202SAlex Deucher unsigned debugfs_count; 74797b2e202SAlex Deucher #if defined(CONFIG_DEBUG_FS) 7486698a3d0SJack Xiao struct dentry *debugfs_preempt; 749adcec288STom St Denis struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; 75097b2e202SAlex Deucher #endif 751102c16a0SLyude Paul struct amdgpu_atif *atif; 75297b2e202SAlex Deucher struct amdgpu_atcs atcs; 75397b2e202SAlex Deucher struct mutex srbm_mutex; 75497b2e202SAlex Deucher /* GRBM index mutex. Protects concurrent access to GRBM index */ 75597b2e202SAlex Deucher struct mutex grbm_idx_mutex; 75697b2e202SAlex Deucher struct dev_pm_domain vga_pm_domain; 75797b2e202SAlex Deucher bool have_disp_power_ref; 758bae17d2aSJack Xiao bool have_atomics_support; 75997b2e202SAlex Deucher 76097b2e202SAlex Deucher /* BIOS */ 7610cdd5005SAlex Deucher bool is_atom_fw; 76297b2e202SAlex Deucher uint8_t *bios; 763a9f5db9cSEvan Quan uint32_t bios_size; 764a5bde2f9SAlex Deucher uint32_t bios_scratch_reg_offset; 76597b2e202SAlex Deucher uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; 76697b2e202SAlex Deucher 76797b2e202SAlex Deucher /* Register/doorbell mmio */ 76897b2e202SAlex Deucher resource_size_t rmmio_base; 76997b2e202SAlex Deucher resource_size_t rmmio_size; 77097b2e202SAlex Deucher void __iomem *rmmio; 77197b2e202SAlex Deucher /* protects concurrent MM_INDEX/DATA based register access */ 77297b2e202SAlex Deucher spinlock_t mmio_idx_lock; 77388807dc8SOak Zeng struct amdgpu_mmio_remap rmmio_remap; 77497b2e202SAlex Deucher /* protects concurrent SMC based register access */ 77597b2e202SAlex Deucher spinlock_t smc_idx_lock; 77697b2e202SAlex Deucher amdgpu_rreg_t smc_rreg; 77797b2e202SAlex Deucher amdgpu_wreg_t smc_wreg; 77897b2e202SAlex Deucher /* protects concurrent PCIE register access */ 77997b2e202SAlex Deucher spinlock_t pcie_idx_lock; 78097b2e202SAlex Deucher amdgpu_rreg_t pcie_rreg; 78197b2e202SAlex Deucher amdgpu_wreg_t pcie_wreg; 78236b9a952SHuang Rui amdgpu_rreg_t pciep_rreg; 78336b9a952SHuang Rui amdgpu_wreg_t pciep_wreg; 7844fa1c6a6STao Zhou amdgpu_rreg64_t pcie_rreg64; 7854fa1c6a6STao Zhou amdgpu_wreg64_t pcie_wreg64; 78697b2e202SAlex Deucher /* protects concurrent UVD register access */ 78797b2e202SAlex Deucher spinlock_t uvd_ctx_idx_lock; 78897b2e202SAlex Deucher amdgpu_rreg_t uvd_ctx_rreg; 78997b2e202SAlex Deucher amdgpu_wreg_t uvd_ctx_wreg; 79097b2e202SAlex Deucher /* protects concurrent DIDT register access */ 79197b2e202SAlex Deucher spinlock_t didt_idx_lock; 79297b2e202SAlex Deucher amdgpu_rreg_t didt_rreg; 79397b2e202SAlex Deucher amdgpu_wreg_t didt_wreg; 794ccdbb20aSRex Zhu /* protects concurrent gc_cac register access */ 795ccdbb20aSRex Zhu spinlock_t gc_cac_idx_lock; 796ccdbb20aSRex Zhu amdgpu_rreg_t gc_cac_rreg; 797ccdbb20aSRex Zhu amdgpu_wreg_t gc_cac_wreg; 79816abb5d2SEvan Quan /* protects concurrent se_cac register access */ 79916abb5d2SEvan Quan spinlock_t se_cac_idx_lock; 80016abb5d2SEvan Quan amdgpu_rreg_t se_cac_rreg; 80116abb5d2SEvan Quan amdgpu_wreg_t se_cac_wreg; 80297b2e202SAlex Deucher /* protects concurrent ENDPOINT (audio) register access */ 80397b2e202SAlex Deucher spinlock_t audio_endpt_idx_lock; 80497b2e202SAlex Deucher amdgpu_block_rreg_t audio_endpt_rreg; 80597b2e202SAlex Deucher amdgpu_block_wreg_t audio_endpt_wreg; 80697b2e202SAlex Deucher void __iomem *rio_mem; 80797b2e202SAlex Deucher resource_size_t rio_mem_size; 80897b2e202SAlex Deucher struct amdgpu_doorbell doorbell; 80997b2e202SAlex Deucher 81097b2e202SAlex Deucher /* clock/pll info */ 81197b2e202SAlex Deucher struct amdgpu_clock clock; 81297b2e202SAlex Deucher 81397b2e202SAlex Deucher /* MC */ 814770d13b1SChristian König struct amdgpu_gmc gmc; 81597b2e202SAlex Deucher struct amdgpu_gart gart; 81692e71b06SChristian König dma_addr_t dummy_page_addr; 81797b2e202SAlex Deucher struct amdgpu_vm_manager vm_manager; 818e60f8db5SAlex Xie struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS]; 8191daa2bfaSLe Ma unsigned num_vmhubs; 82097b2e202SAlex Deucher 82197b2e202SAlex Deucher /* memory management */ 82297b2e202SAlex Deucher struct amdgpu_mman mman; 82397b2e202SAlex Deucher struct amdgpu_vram_scratch vram_scratch; 82497b2e202SAlex Deucher struct amdgpu_wb wb; 82597b2e202SAlex Deucher atomic64_t num_bytes_moved; 826dbd5ed60SChristian König atomic64_t num_evictions; 82768e2c5ffSMarek Olšák atomic64_t num_vram_cpu_page_faults; 828d94aed5aSMarek Olšák atomic_t gpu_reset_counter; 829f1892138SChunming Zhou atomic_t vram_lost_counter; 83097b2e202SAlex Deucher 83195844d20SMarek Olšák /* data for buffer migration throttling */ 83295844d20SMarek Olšák struct { 83395844d20SMarek Olšák spinlock_t lock; 83495844d20SMarek Olšák s64 last_update_us; 83595844d20SMarek Olšák s64 accum_us; /* accumulated microseconds */ 83600f06b24SJohn Brooks s64 accum_us_vis; /* for visible VRAM */ 83795844d20SMarek Olšák u32 log2_max_MBps; 83895844d20SMarek Olšák } mm_stats; 83995844d20SMarek Olšák 84097b2e202SAlex Deucher /* display */ 8419accf2fdSEmily Deng bool enable_virtual_display; 84297b2e202SAlex Deucher struct amdgpu_mode_info mode_info; 8434562236bSHarry Wentland /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */ 84497b2e202SAlex Deucher struct work_struct hotplug_work; 84597b2e202SAlex Deucher struct amdgpu_irq_src crtc_irq; 846d2574c33SMario Kleiner struct amdgpu_irq_src vupdate_irq; 84797b2e202SAlex Deucher struct amdgpu_irq_src pageflip_irq; 84897b2e202SAlex Deucher struct amdgpu_irq_src hpd_irq; 84997b2e202SAlex Deucher 85097b2e202SAlex Deucher /* rings */ 85176bf0db5SChristian König u64 fence_context; 85297b2e202SAlex Deucher unsigned num_rings; 85397b2e202SAlex Deucher struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; 85497b2e202SAlex Deucher bool ib_pool_ready; 8559ecefb19SChristian König struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX]; 8561c6d567bSNirmoy Das struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX]; 85797b2e202SAlex Deucher 85897b2e202SAlex Deucher /* interrupts */ 85997b2e202SAlex Deucher struct amdgpu_irq irq; 86097b2e202SAlex Deucher 8611f7371b2SAlex Deucher /* powerplay */ 8621f7371b2SAlex Deucher struct amd_powerplay powerplay; 863f3898ea1SEric Huang bool pp_force_state_enabled; 8641f7371b2SAlex Deucher 865137d63abSHuang Rui /* smu */ 866137d63abSHuang Rui struct smu_context smu; 867137d63abSHuang Rui 86897b2e202SAlex Deucher /* dpm */ 86997b2e202SAlex Deucher struct amdgpu_pm pm; 87097b2e202SAlex Deucher u32 cg_flags; 87197b2e202SAlex Deucher u32 pg_flags; 87297b2e202SAlex Deucher 873bebc0762SHawking Zhang /* nbio */ 874bebc0762SHawking Zhang struct amdgpu_nbio nbio; 875bebc0762SHawking Zhang 876d3a5a121STao Zhou /* mmhub */ 877d3a5a121STao Zhou struct amdgpu_mmhub mmhub; 878d3a5a121STao Zhou 87997b2e202SAlex Deucher /* gfx */ 88097b2e202SAlex Deucher struct amdgpu_gfx gfx; 88197b2e202SAlex Deucher 88297b2e202SAlex Deucher /* sdma */ 883c113ea1cSAlex Deucher struct amdgpu_sdma sdma; 88497b2e202SAlex Deucher 88597b2e202SAlex Deucher /* uvd */ 88697b2e202SAlex Deucher struct amdgpu_uvd uvd; 88797b2e202SAlex Deucher 88897b2e202SAlex Deucher /* vce */ 88997b2e202SAlex Deucher struct amdgpu_vce vce; 89095d0906fSLeo Liu 89195d0906fSLeo Liu /* vcn */ 89295d0906fSLeo Liu struct amdgpu_vcn vcn; 89397b2e202SAlex Deucher 89488a1c40aSLeo Liu /* jpeg */ 89588a1c40aSLeo Liu struct amdgpu_jpeg jpeg; 89688a1c40aSLeo Liu 89797b2e202SAlex Deucher /* firmwares */ 89897b2e202SAlex Deucher struct amdgpu_firmware firmware; 89997b2e202SAlex Deucher 9000e5ca0d1SHuang Rui /* PSP */ 9010e5ca0d1SHuang Rui struct psp_context psp; 9020e5ca0d1SHuang Rui 90397b2e202SAlex Deucher /* GDS */ 90497b2e202SAlex Deucher struct amdgpu_gds gds; 90597b2e202SAlex Deucher 906611736d8SFelix Kuehling /* KFD */ 907611736d8SFelix Kuehling struct amdgpu_kfd_dev kfd; 908611736d8SFelix Kuehling 909045c0216STao Zhou /* UMC */ 910045c0216STao Zhou struct amdgpu_umc umc; 911045c0216STao Zhou 9124562236bSHarry Wentland /* display related functionality */ 9134562236bSHarry Wentland struct amdgpu_display_manager dm; 9144562236bSHarry Wentland 915a538bbe7SJack Xiao /* mes */ 916a538bbe7SJack Xiao bool enable_mes; 917a538bbe7SJack Xiao struct amdgpu_mes mes; 918a538bbe7SJack Xiao 919bdf84a80SJoseph Greathouse /* df */ 920bdf84a80SJoseph Greathouse struct amdgpu_df df; 921bdf84a80SJoseph Greathouse 922a1255107SAlex Deucher struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM]; 92397b2e202SAlex Deucher int num_ip_blocks; 92497b2e202SAlex Deucher struct mutex mn_lock; 92597b2e202SAlex Deucher DECLARE_HASHTABLE(mn_hash, 7); 92697b2e202SAlex Deucher 92797b2e202SAlex Deucher /* tracking pinned memory */ 928a5ccfe5cSMichel Dänzer atomic64_t vram_pin_size; 929a5ccfe5cSMichel Dänzer atomic64_t visible_pin_size; 930a5ccfe5cSMichel Dänzer atomic64_t gart_pin_size; 931130e0371SOded Gabbay 9324522824cSShaoyun Liu /* soc15 register offset based on ip, instance and segment */ 9334522824cSShaoyun Liu uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; 9344522824cSShaoyun Liu 9352dc80b00SShirish S /* delayed work_func for deferring clockgating during resume */ 936beff74bcSAlex Deucher struct delayed_work delayed_init_work; 9372dc80b00SShirish S 9385a5099cbSXiangliang Yu struct amdgpu_virt virt; 9390c4e7fa5SChunming Zhou 9400c4e7fa5SChunming Zhou /* link all shadow bo */ 9410c4e7fa5SChunming Zhou struct list_head shadow_list; 9420c4e7fa5SChunming Zhou struct mutex shadow_list_lock; 9435c1354bdSChunming Zhou 944c836fec5SJim Qu /* record hw reset is performed */ 945c836fec5SJim Qu bool has_hw_reset; 9460c49e0b8SChunming Zhou u8 reset_magic[AMDGPU_RESET_MAGIC_NUM]; 947c836fec5SJim Qu 94844779b43SRex Zhu /* s3/s4 mask */ 94944779b43SRex Zhu bool in_suspend; 95085625e64SEvan Quan bool in_hibernate; 95144779b43SRex Zhu 95253b3f8f4SDennis Li atomic_t in_gpu_reset; 953a3a09142SAlex Deucher enum pp_mp1_state mp1_state; 954f1403342SChristian König struct mutex lock_reset; 955409c5191SOak Zeng struct amdgpu_doorbell_index doorbell_index; 956d4535e2cSAndrey Grodzovsky 95762914a99SJason Gunthorpe struct mutex notifier_lock; 95862914a99SJason Gunthorpe 95926bc5340SAndrey Grodzovsky int asic_reset_res; 960d4535e2cSAndrey Grodzovsky struct work_struct xgmi_reset_work; 9619b638f97Sshaoyunl 962912dfc84SEvan Quan long gfx_timeout; 963912dfc84SEvan Quan long sdma_timeout; 964912dfc84SEvan Quan long video_timeout; 965912dfc84SEvan Quan long compute_timeout; 966fb2dbfd2SKent Russell 967fb2dbfd2SKent Russell uint64_t unique_id; 968e4cf4bf5SJonathan Kim uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS]; 9695c5b2ba0SEvan Quan 9706ae6c7d4SAlex Deucher /* enable runtime pm on the device */ 9716ae6c7d4SAlex Deucher bool runpm; 972f0f7ddfcSAlex Deucher bool in_runpm; 9737c868b59SYintian Tao 9747c868b59SYintian Tao bool pm_sysfs_en; 9757c868b59SYintian Tao bool ucode_sysfs_en; 976bd607166SKent Russell 977bd607166SKent Russell /* Chip product information */ 978bd607166SKent Russell char product_number[16]; 979bd607166SKent Russell char product_name[32]; 9808df1a28fSDan Carpenter char serial[20]; 981728e7e0cSJiange Zhao 982728e7e0cSJiange Zhao struct amdgpu_autodump autodump; 983b265bdbdSEvan Quan 984b265bdbdSEvan Quan atomic_t throttling_logging_enabled; 985b265bdbdSEvan Quan struct ratelimit_state throttling_logging_rs; 98697b2e202SAlex Deucher }; 98797b2e202SAlex Deucher 988a7d64de6SChristian König static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) 989a7d64de6SChristian König { 990a7d64de6SChristian König return container_of(bdev, struct amdgpu_device, mman.bdev); 991a7d64de6SChristian König } 992a7d64de6SChristian König 99397b2e202SAlex Deucher int amdgpu_device_init(struct amdgpu_device *adev, 99497b2e202SAlex Deucher struct drm_device *ddev, 99597b2e202SAlex Deucher struct pci_dev *pdev, 99697b2e202SAlex Deucher uint32_t flags); 99797b2e202SAlex Deucher void amdgpu_device_fini(struct amdgpu_device *adev); 99897b2e202SAlex Deucher int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); 99997b2e202SAlex Deucher 1000e35e2b11STianci.Yin void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, 1001e35e2b11STianci.Yin uint32_t *buf, size_t size, bool write); 1002e78b579dSHawking Zhang uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, 100315d72fd7SMonk Liu uint32_t acc_flags); 1004e78b579dSHawking Zhang void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, 100515d72fd7SMonk Liu uint32_t acc_flags); 10062e0cc4d4SMonk Liu void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v, 10072e0cc4d4SMonk Liu uint32_t acc_flags); 1008421a2a30SMonk Liu void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value); 1009421a2a30SMonk Liu uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset); 1010421a2a30SMonk Liu 101197b2e202SAlex Deucher u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg); 101297b2e202SAlex Deucher void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v); 101397b2e202SAlex Deucher 10144562236bSHarry Wentland bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); 10154562236bSHarry Wentland bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); 10164562236bSHarry Wentland 10179475a943SShaoyun Liu int emu_soc_asic_init(struct amdgpu_device *adev); 10189475a943SShaoyun Liu 101997b2e202SAlex Deucher /* 102097b2e202SAlex Deucher * Registers read & write functions. 102197b2e202SAlex Deucher */ 102215d72fd7SMonk Liu #define AMDGPU_REGS_NO_KIQ (1<<1) 102315d72fd7SMonk Liu 1024e78b579dSHawking Zhang #define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) 1025e78b579dSHawking Zhang #define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) 102615d72fd7SMonk Liu 1027f384ff95SHawking Zhang #define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg)) 1028f384ff95SHawking Zhang #define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v)) 1029c68dbcd8Schen gong 1030421a2a30SMonk Liu #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg)) 1031421a2a30SMonk Liu #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v)) 1032421a2a30SMonk Liu 1033e78b579dSHawking Zhang #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0) 1034e78b579dSHawking Zhang #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0)) 1035e78b579dSHawking Zhang #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0) 103697b2e202SAlex Deucher #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 103797b2e202SAlex Deucher #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 103897b2e202SAlex Deucher #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) 103997b2e202SAlex Deucher #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) 104036b9a952SHuang Rui #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg)) 104136b9a952SHuang Rui #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v)) 10424fa1c6a6STao Zhou #define RREG64_PCIE(reg) adev->pcie_rreg64(adev, (reg)) 10434fa1c6a6STao Zhou #define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v)) 104497b2e202SAlex Deucher #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) 104597b2e202SAlex Deucher #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) 104697b2e202SAlex Deucher #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) 104797b2e202SAlex Deucher #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) 104897b2e202SAlex Deucher #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) 104997b2e202SAlex Deucher #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) 1050ccdbb20aSRex Zhu #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg)) 1051ccdbb20aSRex Zhu #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v)) 105216abb5d2SEvan Quan #define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg)) 105316abb5d2SEvan Quan #define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v)) 105497b2e202SAlex Deucher #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) 105597b2e202SAlex Deucher #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) 105697b2e202SAlex Deucher #define WREG32_P(reg, val, mask) \ 105797b2e202SAlex Deucher do { \ 105897b2e202SAlex Deucher uint32_t tmp_ = RREG32(reg); \ 105997b2e202SAlex Deucher tmp_ &= (mask); \ 106097b2e202SAlex Deucher tmp_ |= ((val) & ~(mask)); \ 106197b2e202SAlex Deucher WREG32(reg, tmp_); \ 106297b2e202SAlex Deucher } while (0) 106397b2e202SAlex Deucher #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) 106497b2e202SAlex Deucher #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) 106597b2e202SAlex Deucher #define WREG32_PLL_P(reg, val, mask) \ 106697b2e202SAlex Deucher do { \ 106797b2e202SAlex Deucher uint32_t tmp_ = RREG32_PLL(reg); \ 106897b2e202SAlex Deucher tmp_ &= (mask); \ 106997b2e202SAlex Deucher tmp_ |= ((val) & ~(mask)); \ 107097b2e202SAlex Deucher WREG32_PLL(reg, tmp_); \ 107197b2e202SAlex Deucher } while (0) 1072fb40bcebSAlex Jivin 1073fb40bcebSAlex Jivin #define WREG32_SMC_P(_Reg, _Val, _Mask) \ 1074fb40bcebSAlex Jivin do { \ 1075fb40bcebSAlex Jivin u32 tmp = RREG32_SMC(_Reg); \ 1076fb40bcebSAlex Jivin tmp &= (_Mask); \ 1077fb40bcebSAlex Jivin tmp |= ((_Val) & ~(_Mask)); \ 1078fb40bcebSAlex Jivin WREG32_SMC(_Reg, tmp); \ 1079fb40bcebSAlex Jivin } while (0) 1080fb40bcebSAlex Jivin 1081e78b579dSHawking Zhang #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false)) 108297b2e202SAlex Deucher #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg)) 108397b2e202SAlex Deucher #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v)) 108497b2e202SAlex Deucher 108597b2e202SAlex Deucher #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT 108697b2e202SAlex Deucher #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK 108797b2e202SAlex Deucher 108897b2e202SAlex Deucher #define REG_SET_FIELD(orig_val, reg, field, field_val) \ 108997b2e202SAlex Deucher (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \ 109097b2e202SAlex Deucher (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field)))) 109197b2e202SAlex Deucher 109297b2e202SAlex Deucher #define REG_GET_FIELD(value, reg, field) \ 109397b2e202SAlex Deucher (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field)) 109497b2e202SAlex Deucher 109561cb8cefSTom St Denis #define WREG32_FIELD(reg, field, val) \ 109661cb8cefSTom St Denis WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) 109761cb8cefSTom St Denis 1098ccaf3574STom St Denis #define WREG32_FIELD_OFFSET(reg, offset, field, val) \ 1099ccaf3574STom St Denis WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) 1100ccaf3574STom St Denis 110197b2e202SAlex Deucher /* 110297b2e202SAlex Deucher * BIOS helpers. 110397b2e202SAlex Deucher */ 110497b2e202SAlex Deucher #define RBIOS8(i) (adev->bios[i]) 110597b2e202SAlex Deucher #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) 110697b2e202SAlex Deucher #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) 110797b2e202SAlex Deucher 110897b2e202SAlex Deucher /* 110997b2e202SAlex Deucher * ASICs macro. 111097b2e202SAlex Deucher */ 111197b2e202SAlex Deucher #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) 111297b2e202SAlex Deucher #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) 11130cf3c64fSAlex Deucher #define amdgpu_asic_reset_method(adev) (adev)->asic_funcs->reset_method((adev)) 111497b2e202SAlex Deucher #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) 111597b2e202SAlex Deucher #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) 111697b2e202SAlex Deucher #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) 1117841686dfSMaruthi Bayyavarapu #define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev)) 1118841686dfSMaruthi Bayyavarapu #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l)) 1119841686dfSMaruthi Bayyavarapu #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) 112097b2e202SAlex Deucher #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) 11217946b878SAlex Deucher #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) 112297b2e202SAlex Deucher #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) 1123bbf282d8SAlex Deucher #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) 112469882565SChristian König #define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r)) 112569882565SChristian König #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r)) 112669070690SAlex Deucher #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) 11275253163aSOak Zeng #define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev)) 1128b45e18acSKent Russell #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1))) 112944401889SAlex Deucher #define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev)) 1130dcea6e65SKent Russell #define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev))) 113169d5436dSAlex Deucher #define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev)) 113269d5436dSAlex Deucher 1133e3526257SMonk Liu #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter)); 113497b2e202SAlex Deucher 113597b2e202SAlex Deucher /* Common functions */ 11369a1cddd6Sjqdeng bool amdgpu_device_has_job_running(struct amdgpu_device *adev); 113712938fadSChristian König bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); 11385f152b5eSAlex Deucher int amdgpu_device_gpu_recover(struct amdgpu_device *adev, 113912938fadSChristian König struct amdgpu_job* job); 11408111c387SAlex Deucher void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); 114139c640c0SAlex Deucher bool amdgpu_device_need_post(struct amdgpu_device *adev); 1142d5fc5e82SChunming Zhou 114300f06b24SJohn Brooks void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, 114400f06b24SJohn Brooks u64 num_vis_bytes); 1145d6895ad3SChristian König int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev); 11469c3f2b54SAlex Deucher void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, 114797b2e202SAlex Deucher const u32 *registers, 114897b2e202SAlex Deucher const u32 array_size); 114997b2e202SAlex Deucher 115031af062aSAlex Deucher bool amdgpu_device_supports_boco(struct drm_device *dev); 1151a69cba42SAlex Deucher bool amdgpu_device_supports_baco(struct drm_device *dev); 1152992af942SJonathan Kim bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, 1153992af942SJonathan Kim struct amdgpu_device *peer_adev); 1154361dbd01SAlex Deucher int amdgpu_device_baco_enter(struct drm_device *dev); 1155361dbd01SAlex Deucher int amdgpu_device_baco_exit(struct drm_device *dev); 1156992af942SJonathan Kim 115797b2e202SAlex Deucher /* atpx handler */ 115897b2e202SAlex Deucher #if defined(CONFIG_VGA_SWITCHEROO) 115997b2e202SAlex Deucher void amdgpu_register_atpx_handler(void); 116097b2e202SAlex Deucher void amdgpu_unregister_atpx_handler(void); 1161a78fe133SAlex Deucher bool amdgpu_has_atpx_dgpu_power_cntl(void); 11622f5af82eSAlex Deucher bool amdgpu_is_atpx_hybrid(void); 1163efc83cf4SAlex Deucher bool amdgpu_atpx_dgpu_req_power_for_displays(void); 1164714f88e0SAlex Xie bool amdgpu_has_atpx(void); 116597b2e202SAlex Deucher #else 116697b2e202SAlex Deucher static inline void amdgpu_register_atpx_handler(void) {} 116797b2e202SAlex Deucher static inline void amdgpu_unregister_atpx_handler(void) {} 1168a78fe133SAlex Deucher static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } 11692f5af82eSAlex Deucher static inline bool amdgpu_is_atpx_hybrid(void) { return false; } 1170efc83cf4SAlex Deucher static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; } 1171714f88e0SAlex Xie static inline bool amdgpu_has_atpx(void) { return false; } 117297b2e202SAlex Deucher #endif 117397b2e202SAlex Deucher 117424aeefcdSLyude Paul #if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI) 117524aeefcdSLyude Paul void *amdgpu_atpx_get_dhandle(void); 117624aeefcdSLyude Paul #else 117724aeefcdSLyude Paul static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; } 117824aeefcdSLyude Paul #endif 117924aeefcdSLyude Paul 118097b2e202SAlex Deucher /* 118197b2e202SAlex Deucher * KMS 118297b2e202SAlex Deucher */ 118397b2e202SAlex Deucher extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; 1184f498d9edSNils Wallménius extern const int amdgpu_max_kms_ioctl; 118597b2e202SAlex Deucher 118697b2e202SAlex Deucher int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags); 118711b3c20bSGabriel Krisman Bertazi void amdgpu_driver_unload_kms(struct drm_device *dev); 118897b2e202SAlex Deucher void amdgpu_driver_lastclose_kms(struct drm_device *dev); 118997b2e202SAlex Deucher int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); 119097b2e202SAlex Deucher void amdgpu_driver_postclose_kms(struct drm_device *dev, 119197b2e202SAlex Deucher struct drm_file *file_priv); 1192cdd61df6SAlex Deucher int amdgpu_device_ip_suspend(struct amdgpu_device *adev); 1193de185019SAlex Deucher int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); 1194de185019SAlex Deucher int amdgpu_device_resume(struct drm_device *dev, bool fbcon); 1195e3eff4b5SThomas Zimmermann u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc); 1196e3eff4b5SThomas Zimmermann int amdgpu_enable_vblank_kms(struct drm_crtc *crtc); 1197e3eff4b5SThomas Zimmermann void amdgpu_disable_vblank_kms(struct drm_crtc *crtc); 119897b2e202SAlex Deucher long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, 119997b2e202SAlex Deucher unsigned long arg); 120097b2e202SAlex Deucher 120197b2e202SAlex Deucher /* 120297b2e202SAlex Deucher * functions used by amdgpu_encoder.c 120397b2e202SAlex Deucher */ 120497b2e202SAlex Deucher struct amdgpu_afmt_acr { 120597b2e202SAlex Deucher u32 clock; 120697b2e202SAlex Deucher 120797b2e202SAlex Deucher int n_32khz; 120897b2e202SAlex Deucher int cts_32khz; 120997b2e202SAlex Deucher 121097b2e202SAlex Deucher int n_44_1khz; 121197b2e202SAlex Deucher int cts_44_1khz; 121297b2e202SAlex Deucher 121397b2e202SAlex Deucher int n_48khz; 121497b2e202SAlex Deucher int cts_48khz; 121597b2e202SAlex Deucher 121697b2e202SAlex Deucher }; 121797b2e202SAlex Deucher 121897b2e202SAlex Deucher struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock); 121997b2e202SAlex Deucher 122097b2e202SAlex Deucher /* amdgpu_acpi.c */ 122197b2e202SAlex Deucher #if defined(CONFIG_ACPI) 122297b2e202SAlex Deucher int amdgpu_acpi_init(struct amdgpu_device *adev); 122397b2e202SAlex Deucher void amdgpu_acpi_fini(struct amdgpu_device *adev); 122497b2e202SAlex Deucher bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev); 122597b2e202SAlex Deucher int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, 122697b2e202SAlex Deucher u8 perf_req, bool advertise); 122797b2e202SAlex Deucher int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); 1228206bbafeSDavid Francis 1229206bbafeSDavid Francis void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev, 1230206bbafeSDavid Francis struct amdgpu_dm_backlight_caps *caps); 123197b2e202SAlex Deucher #else 123297b2e202SAlex Deucher static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } 123397b2e202SAlex Deucher static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } 123497b2e202SAlex Deucher #endif 123597b2e202SAlex Deucher 12369cca0b8eSChristian König int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, 12379cca0b8eSChristian König uint64_t addr, struct amdgpu_bo **bo, 12389cca0b8eSChristian König struct amdgpu_bo_va_mapping **mapping); 123997b2e202SAlex Deucher 12404562236bSHarry Wentland #if defined(CONFIG_DRM_AMD_DC) 12414562236bSHarry Wentland int amdgpu_dm_display_resume(struct amdgpu_device *adev ); 12424562236bSHarry Wentland #else 12434562236bSHarry Wentland static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; } 12444562236bSHarry Wentland #endif 12454562236bSHarry Wentland 1246fdafb359SEvan Quan 1247fdafb359SEvan Quan void amdgpu_register_gpu_instance(struct amdgpu_device *adev); 1248fdafb359SEvan Quan void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev); 1249fdafb359SEvan Quan 125097b2e202SAlex Deucher #include "amdgpu_object.h" 1251e4cf4bf5SJonathan Kim 1252e4cf4bf5SJonathan Kim /* used by df_v3_6.c and amdgpu_pmu.c */ 1253e4cf4bf5SJonathan Kim #define AMDGPU_PMU_ATTR(_name, _object) \ 1254e4cf4bf5SJonathan Kim static ssize_t \ 1255e4cf4bf5SJonathan Kim _name##_show(struct device *dev, \ 1256e4cf4bf5SJonathan Kim struct device_attribute *attr, \ 1257e4cf4bf5SJonathan Kim char *page) \ 1258e4cf4bf5SJonathan Kim { \ 1259e4cf4bf5SJonathan Kim BUILD_BUG_ON(sizeof(_object) >= PAGE_SIZE - 1); \ 1260e4cf4bf5SJonathan Kim return sprintf(page, _object "\n"); \ 1261e4cf4bf5SJonathan Kim } \ 1262e4cf4bf5SJonathan Kim \ 1263e4cf4bf5SJonathan Kim static struct device_attribute pmu_attr_##_name = __ATTR_RO(_name) 1264e4cf4bf5SJonathan Kim 1265c6252390SLuben Tuikov static inline bool amdgpu_is_tmz(struct amdgpu_device *adev) 1266c6252390SLuben Tuikov { 1267c6252390SLuben Tuikov return adev->gmc.tmz_enabled; 1268c6252390SLuben Tuikov } 1269e4cf4bf5SJonathan Kim 127053b3f8f4SDennis Li static inline int amdgpu_in_reset(struct amdgpu_device *adev) 127153b3f8f4SDennis Li { 127253b3f8f4SDennis Li return atomic_read(&adev->in_gpu_reset); 127353b3f8f4SDennis Li } 1274c6252390SLuben Tuikov #endif 1275