1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include <linux/pci.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_atombios.h" 30 #include "amdgpu_ih.h" 31 #include "amdgpu_uvd.h" 32 #include "amdgpu_vce.h" 33 #include "amdgpu_ucode.h" 34 #include "amdgpu_psp.h" 35 #include "amdgpu_smu.h" 36 #include "atom.h" 37 #include "amd_pcie.h" 38 39 #include "gc/gc_11_0_0_offset.h" 40 #include "gc/gc_11_0_0_sh_mask.h" 41 #include "mp/mp_13_0_0_offset.h" 42 43 #include "soc15.h" 44 #include "soc15_common.h" 45 #include "soc21.h" 46 47 static const struct amd_ip_funcs soc21_common_ip_funcs; 48 49 /* SOC21 */ 50 static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array[] = 51 { 52 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, 53 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, 54 }; 55 56 static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode = 57 { 58 .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array), 59 .codec_array = vcn_4_0_0_video_codecs_encode_array, 60 }; 61 62 static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[] = 63 { 64 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, 65 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, 66 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, 67 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, 68 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, 69 }; 70 71 static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode = 72 { 73 .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array), 74 .codec_array = vcn_4_0_0_video_codecs_decode_array, 75 }; 76 77 static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode, 78 const struct amdgpu_video_codecs **codecs) 79 { 80 switch (adev->ip_versions[UVD_HWIP][0]) { 81 82 case IP_VERSION(4, 0, 0): 83 case IP_VERSION(4, 0, 2): 84 if (encode) 85 *codecs = &vcn_4_0_0_video_codecs_encode; 86 else 87 *codecs = &vcn_4_0_0_video_codecs_decode; 88 return 0; 89 default: 90 return -EINVAL; 91 } 92 } 93 /* 94 * Indirect registers accessor 95 */ 96 static u32 soc21_pcie_rreg(struct amdgpu_device *adev, u32 reg) 97 { 98 unsigned long address, data; 99 address = adev->nbio.funcs->get_pcie_index_offset(adev); 100 data = adev->nbio.funcs->get_pcie_data_offset(adev); 101 102 return amdgpu_device_indirect_rreg(adev, address, data, reg); 103 } 104 105 static void soc21_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 106 { 107 unsigned long address, data; 108 109 address = adev->nbio.funcs->get_pcie_index_offset(adev); 110 data = adev->nbio.funcs->get_pcie_data_offset(adev); 111 112 amdgpu_device_indirect_wreg(adev, address, data, reg, v); 113 } 114 115 static u64 soc21_pcie_rreg64(struct amdgpu_device *adev, u32 reg) 116 { 117 unsigned long address, data; 118 address = adev->nbio.funcs->get_pcie_index_offset(adev); 119 data = adev->nbio.funcs->get_pcie_data_offset(adev); 120 121 return amdgpu_device_indirect_rreg64(adev, address, data, reg); 122 } 123 124 static void soc21_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) 125 { 126 unsigned long address, data; 127 128 address = adev->nbio.funcs->get_pcie_index_offset(adev); 129 data = adev->nbio.funcs->get_pcie_data_offset(adev); 130 131 amdgpu_device_indirect_wreg64(adev, address, data, reg, v); 132 } 133 134 static u32 soc21_didt_rreg(struct amdgpu_device *adev, u32 reg) 135 { 136 unsigned long flags, address, data; 137 u32 r; 138 139 address = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_INDEX); 140 data = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_DATA); 141 142 spin_lock_irqsave(&adev->didt_idx_lock, flags); 143 WREG32(address, (reg)); 144 r = RREG32(data); 145 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 146 return r; 147 } 148 149 static void soc21_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 150 { 151 unsigned long flags, address, data; 152 153 address = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_INDEX); 154 data = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_DATA); 155 156 spin_lock_irqsave(&adev->didt_idx_lock, flags); 157 WREG32(address, (reg)); 158 WREG32(data, (v)); 159 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 160 } 161 162 static u32 soc21_get_config_memsize(struct amdgpu_device *adev) 163 { 164 return adev->nbio.funcs->get_memsize(adev); 165 } 166 167 static u32 soc21_get_xclk(struct amdgpu_device *adev) 168 { 169 return adev->clock.spll.reference_freq; 170 } 171 172 173 void soc21_grbm_select(struct amdgpu_device *adev, 174 u32 me, u32 pipe, u32 queue, u32 vmid) 175 { 176 u32 grbm_gfx_cntl = 0; 177 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); 178 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); 179 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); 180 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); 181 182 WREG32(SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL), grbm_gfx_cntl); 183 } 184 185 static void soc21_vga_set_state(struct amdgpu_device *adev, bool state) 186 { 187 /* todo */ 188 } 189 190 static bool soc21_read_disabled_bios(struct amdgpu_device *adev) 191 { 192 /* todo */ 193 return false; 194 } 195 196 static struct soc15_allowed_register_entry soc21_allowed_read_registers[] = { 197 { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS)}, 198 { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS2)}, 199 { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE0)}, 200 { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE1)}, 201 { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE2)}, 202 { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE3)}, 203 { SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_STATUS_REG)}, 204 { SOC15_REG_ENTRY(SDMA1, 0, regSDMA1_STATUS_REG)}, 205 { SOC15_REG_ENTRY(GC, 0, regCP_STAT)}, 206 { SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT1)}, 207 { SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT2)}, 208 { SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT3)}, 209 { SOC15_REG_ENTRY(GC, 0, regCP_CPF_BUSY_STAT)}, 210 { SOC15_REG_ENTRY(GC, 0, regCP_CPF_STALLED_STAT1)}, 211 { SOC15_REG_ENTRY(GC, 0, regCP_CPF_STATUS)}, 212 { SOC15_REG_ENTRY(GC, 0, regCP_CPC_BUSY_STAT)}, 213 { SOC15_REG_ENTRY(GC, 0, regCP_CPC_STALLED_STAT1)}, 214 { SOC15_REG_ENTRY(GC, 0, regCP_CPC_STATUS)}, 215 { SOC15_REG_ENTRY(GC, 0, regGB_ADDR_CONFIG)}, 216 }; 217 218 static uint32_t soc21_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 219 u32 sh_num, u32 reg_offset) 220 { 221 uint32_t val; 222 223 mutex_lock(&adev->grbm_idx_mutex); 224 if (se_num != 0xffffffff || sh_num != 0xffffffff) 225 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 226 227 val = RREG32(reg_offset); 228 229 if (se_num != 0xffffffff || sh_num != 0xffffffff) 230 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 231 mutex_unlock(&adev->grbm_idx_mutex); 232 return val; 233 } 234 235 static uint32_t soc21_get_register_value(struct amdgpu_device *adev, 236 bool indexed, u32 se_num, 237 u32 sh_num, u32 reg_offset) 238 { 239 if (indexed) { 240 return soc21_read_indexed_register(adev, se_num, sh_num, reg_offset); 241 } else { 242 if (reg_offset == SOC15_REG_OFFSET(GC, 0, regGB_ADDR_CONFIG) && adev->gfx.config.gb_addr_config) 243 return adev->gfx.config.gb_addr_config; 244 return RREG32(reg_offset); 245 } 246 } 247 248 static int soc21_read_register(struct amdgpu_device *adev, u32 se_num, 249 u32 sh_num, u32 reg_offset, u32 *value) 250 { 251 uint32_t i; 252 struct soc15_allowed_register_entry *en; 253 254 *value = 0; 255 for (i = 0; i < ARRAY_SIZE(soc21_allowed_read_registers); i++) { 256 en = &soc21_allowed_read_registers[i]; 257 if (adev->reg_offset[en->hwip][en->inst] && 258 reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] 259 + en->reg_offset)) 260 continue; 261 262 *value = soc21_get_register_value(adev, 263 soc21_allowed_read_registers[i].grbm_indexed, 264 se_num, sh_num, reg_offset); 265 return 0; 266 } 267 return -EINVAL; 268 } 269 270 #if 0 271 static int soc21_asic_mode1_reset(struct amdgpu_device *adev) 272 { 273 u32 i; 274 int ret = 0; 275 276 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 277 278 /* disable BM */ 279 pci_clear_master(adev->pdev); 280 281 amdgpu_device_cache_pci_state(adev->pdev); 282 283 if (amdgpu_dpm_is_mode1_reset_supported(adev)) { 284 dev_info(adev->dev, "GPU smu mode1 reset\n"); 285 ret = amdgpu_dpm_mode1_reset(adev); 286 } else { 287 dev_info(adev->dev, "GPU psp mode1 reset\n"); 288 ret = psp_gpu_reset(adev); 289 } 290 291 if (ret) 292 dev_err(adev->dev, "GPU mode1 reset failed\n"); 293 amdgpu_device_load_pci_state(adev->pdev); 294 295 /* wait for asic to come out of reset */ 296 for (i = 0; i < adev->usec_timeout; i++) { 297 u32 memsize = adev->nbio.funcs->get_memsize(adev); 298 299 if (memsize != 0xffffffff) 300 break; 301 udelay(1); 302 } 303 304 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 305 306 return ret; 307 } 308 #endif 309 310 static enum amd_reset_method 311 soc21_asic_reset_method(struct amdgpu_device *adev) 312 { 313 if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 || 314 amdgpu_reset_method == AMD_RESET_METHOD_MODE2 || 315 amdgpu_reset_method == AMD_RESET_METHOD_BACO) 316 return amdgpu_reset_method; 317 318 if (amdgpu_reset_method != -1) 319 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", 320 amdgpu_reset_method); 321 322 switch (adev->ip_versions[MP1_HWIP][0]) { 323 case IP_VERSION(13, 0, 0): 324 case IP_VERSION(13, 0, 7): 325 return AMD_RESET_METHOD_MODE1; 326 case IP_VERSION(13, 0, 4): 327 return AMD_RESET_METHOD_MODE2; 328 default: 329 if (amdgpu_dpm_is_baco_supported(adev)) 330 return AMD_RESET_METHOD_BACO; 331 else 332 return AMD_RESET_METHOD_MODE1; 333 } 334 } 335 336 static int soc21_asic_reset(struct amdgpu_device *adev) 337 { 338 int ret = 0; 339 340 switch (soc21_asic_reset_method(adev)) { 341 case AMD_RESET_METHOD_PCI: 342 dev_info(adev->dev, "PCI reset\n"); 343 ret = amdgpu_device_pci_reset(adev); 344 break; 345 case AMD_RESET_METHOD_BACO: 346 dev_info(adev->dev, "BACO reset\n"); 347 ret = amdgpu_dpm_baco_reset(adev); 348 break; 349 case AMD_RESET_METHOD_MODE2: 350 dev_info(adev->dev, "MODE2 reset\n"); 351 ret = amdgpu_dpm_mode2_reset(adev); 352 break; 353 default: 354 dev_info(adev->dev, "MODE1 reset\n"); 355 ret = amdgpu_device_mode1_reset(adev); 356 break; 357 } 358 359 return ret; 360 } 361 362 static int soc21_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 363 { 364 /* todo */ 365 return 0; 366 } 367 368 static int soc21_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 369 { 370 /* todo */ 371 return 0; 372 } 373 374 static void soc21_pcie_gen3_enable(struct amdgpu_device *adev) 375 { 376 if (pci_is_root_bus(adev->pdev->bus)) 377 return; 378 379 if (amdgpu_pcie_gen2 == 0) 380 return; 381 382 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 383 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 384 return; 385 386 /* todo */ 387 } 388 389 static void soc21_program_aspm(struct amdgpu_device *adev) 390 { 391 if (!amdgpu_device_should_use_aspm(adev)) 392 return; 393 394 if (!(adev->flags & AMD_IS_APU) && 395 (adev->nbio.funcs->program_aspm)) 396 adev->nbio.funcs->program_aspm(adev); 397 } 398 399 static void soc21_enable_doorbell_aperture(struct amdgpu_device *adev, 400 bool enable) 401 { 402 adev->nbio.funcs->enable_doorbell_aperture(adev, enable); 403 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); 404 } 405 406 const struct amdgpu_ip_block_version soc21_common_ip_block = 407 { 408 .type = AMD_IP_BLOCK_TYPE_COMMON, 409 .major = 1, 410 .minor = 0, 411 .rev = 0, 412 .funcs = &soc21_common_ip_funcs, 413 }; 414 415 static uint32_t soc21_get_rev_id(struct amdgpu_device *adev) 416 { 417 return adev->nbio.funcs->get_rev_id(adev); 418 } 419 420 static bool soc21_need_full_reset(struct amdgpu_device *adev) 421 { 422 switch (adev->ip_versions[GC_HWIP][0]) { 423 case IP_VERSION(11, 0, 0): 424 return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC); 425 case IP_VERSION(11, 0, 2): 426 return false; 427 default: 428 return true; 429 } 430 } 431 432 static bool soc21_need_reset_on_init(struct amdgpu_device *adev) 433 { 434 u32 sol_reg; 435 436 if (adev->flags & AMD_IS_APU) 437 return false; 438 439 /* Check sOS sign of life register to confirm sys driver and sOS 440 * are already been loaded. 441 */ 442 sol_reg = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81); 443 if (sol_reg) 444 return true; 445 446 return false; 447 } 448 449 static uint64_t soc21_get_pcie_replay_count(struct amdgpu_device *adev) 450 { 451 452 /* TODO 453 * dummy implement for pcie_replay_count sysfs interface 454 * */ 455 456 return 0; 457 } 458 459 static void soc21_init_doorbell_index(struct amdgpu_device *adev) 460 { 461 adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; 462 adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0; 463 adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1; 464 adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2; 465 adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3; 466 adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4; 467 adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5; 468 adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6; 469 adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7; 470 adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START; 471 adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END; 472 adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0; 473 adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1; 474 adev->doorbell_index.gfx_userqueue_start = 475 AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_START; 476 adev->doorbell_index.gfx_userqueue_end = 477 AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_END; 478 adev->doorbell_index.mes_ring0 = AMDGPU_NAVI10_DOORBELL_MES_RING0; 479 adev->doorbell_index.mes_ring1 = AMDGPU_NAVI10_DOORBELL_MES_RING1; 480 adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0; 481 adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1; 482 adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH; 483 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1; 484 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3; 485 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5; 486 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7; 487 adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP; 488 adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP; 489 490 adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1; 491 adev->doorbell_index.sdma_doorbell_range = 20; 492 } 493 494 static void soc21_pre_asic_init(struct amdgpu_device *adev) 495 { 496 } 497 498 static int soc21_update_umd_stable_pstate(struct amdgpu_device *adev, 499 bool enter) 500 { 501 if (enter) 502 amdgpu_gfx_rlc_enter_safe_mode(adev); 503 else 504 amdgpu_gfx_rlc_exit_safe_mode(adev); 505 506 if (adev->gfx.funcs->update_perfmon_mgcg) 507 adev->gfx.funcs->update_perfmon_mgcg(adev, !enter); 508 509 return 0; 510 } 511 512 static const struct amdgpu_asic_funcs soc21_asic_funcs = 513 { 514 .read_disabled_bios = &soc21_read_disabled_bios, 515 .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom, 516 .read_register = &soc21_read_register, 517 .reset = &soc21_asic_reset, 518 .reset_method = &soc21_asic_reset_method, 519 .set_vga_state = &soc21_vga_set_state, 520 .get_xclk = &soc21_get_xclk, 521 .set_uvd_clocks = &soc21_set_uvd_clocks, 522 .set_vce_clocks = &soc21_set_vce_clocks, 523 .get_config_memsize = &soc21_get_config_memsize, 524 .init_doorbell_index = &soc21_init_doorbell_index, 525 .need_full_reset = &soc21_need_full_reset, 526 .need_reset_on_init = &soc21_need_reset_on_init, 527 .get_pcie_replay_count = &soc21_get_pcie_replay_count, 528 .supports_baco = &amdgpu_dpm_is_baco_supported, 529 .pre_asic_init = &soc21_pre_asic_init, 530 .query_video_codecs = &soc21_query_video_codecs, 531 .update_umd_stable_pstate = &soc21_update_umd_stable_pstate, 532 }; 533 534 static int soc21_common_early_init(void *handle) 535 { 536 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) 537 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 538 539 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; 540 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; 541 adev->smc_rreg = NULL; 542 adev->smc_wreg = NULL; 543 adev->pcie_rreg = &soc21_pcie_rreg; 544 adev->pcie_wreg = &soc21_pcie_wreg; 545 adev->pcie_rreg64 = &soc21_pcie_rreg64; 546 adev->pcie_wreg64 = &soc21_pcie_wreg64; 547 adev->pciep_rreg = amdgpu_device_pcie_port_rreg; 548 adev->pciep_wreg = amdgpu_device_pcie_port_wreg; 549 550 /* TODO: will add them during VCN v2 implementation */ 551 adev->uvd_ctx_rreg = NULL; 552 adev->uvd_ctx_wreg = NULL; 553 554 adev->didt_rreg = &soc21_didt_rreg; 555 adev->didt_wreg = &soc21_didt_wreg; 556 557 adev->asic_funcs = &soc21_asic_funcs; 558 559 adev->rev_id = soc21_get_rev_id(adev); 560 adev->external_rev_id = 0xff; 561 switch (adev->ip_versions[GC_HWIP][0]) { 562 case IP_VERSION(11, 0, 0): 563 adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG | 564 AMD_CG_SUPPORT_GFX_CGLS | 565 #if 0 566 AMD_CG_SUPPORT_GFX_3D_CGCG | 567 AMD_CG_SUPPORT_GFX_3D_CGLS | 568 #endif 569 AMD_CG_SUPPORT_GFX_MGCG | 570 AMD_CG_SUPPORT_REPEATER_FGCG | 571 AMD_CG_SUPPORT_GFX_FGCG | 572 AMD_CG_SUPPORT_GFX_PERF_CLK | 573 AMD_CG_SUPPORT_VCN_MGCG | 574 AMD_CG_SUPPORT_JPEG_MGCG | 575 AMD_CG_SUPPORT_ATHUB_MGCG | 576 AMD_CG_SUPPORT_ATHUB_LS | 577 AMD_CG_SUPPORT_MC_MGCG | 578 AMD_CG_SUPPORT_MC_LS | 579 AMD_CG_SUPPORT_IH_CG | 580 AMD_CG_SUPPORT_HDP_SD; 581 adev->pg_flags = AMD_PG_SUPPORT_VCN | 582 AMD_PG_SUPPORT_VCN_DPG | 583 AMD_PG_SUPPORT_JPEG | 584 AMD_PG_SUPPORT_ATHUB | 585 AMD_PG_SUPPORT_MMHUB; 586 adev->external_rev_id = adev->rev_id + 0x1; // TODO: need update 587 break; 588 case IP_VERSION(11, 0, 2): 589 adev->cg_flags = 590 AMD_CG_SUPPORT_GFX_CGCG | 591 AMD_CG_SUPPORT_GFX_CGLS | 592 AMD_CG_SUPPORT_REPEATER_FGCG | 593 AMD_CG_SUPPORT_VCN_MGCG | 594 AMD_CG_SUPPORT_JPEG_MGCG | 595 AMD_CG_SUPPORT_ATHUB_MGCG | 596 AMD_CG_SUPPORT_ATHUB_LS | 597 AMD_CG_SUPPORT_IH_CG | 598 AMD_CG_SUPPORT_HDP_SD; 599 adev->pg_flags = 600 AMD_PG_SUPPORT_VCN | 601 AMD_PG_SUPPORT_VCN_DPG | 602 AMD_PG_SUPPORT_JPEG | 603 AMD_PG_SUPPORT_ATHUB | 604 AMD_PG_SUPPORT_MMHUB; 605 adev->external_rev_id = adev->rev_id + 0x10; 606 break; 607 case IP_VERSION(11, 0, 1): 608 adev->cg_flags = 609 AMD_CG_SUPPORT_GFX_CGCG | 610 AMD_CG_SUPPORT_GFX_CGLS | 611 AMD_CG_SUPPORT_GFX_MGCG | 612 AMD_CG_SUPPORT_GFX_FGCG | 613 AMD_CG_SUPPORT_REPEATER_FGCG | 614 AMD_CG_SUPPORT_GFX_PERF_CLK | 615 AMD_CG_SUPPORT_MC_MGCG | 616 AMD_CG_SUPPORT_MC_LS | 617 AMD_CG_SUPPORT_HDP_MGCG | 618 AMD_CG_SUPPORT_HDP_LS | 619 AMD_CG_SUPPORT_ATHUB_MGCG | 620 AMD_CG_SUPPORT_ATHUB_LS | 621 AMD_CG_SUPPORT_IH_CG | 622 AMD_CG_SUPPORT_BIF_MGCG | 623 AMD_CG_SUPPORT_BIF_LS | 624 AMD_CG_SUPPORT_VCN_MGCG | 625 AMD_CG_SUPPORT_JPEG_MGCG; 626 adev->pg_flags = 627 AMD_PG_SUPPORT_GFX_PG | 628 AMD_PG_SUPPORT_VCN_DPG | 629 AMD_PG_SUPPORT_JPEG; 630 adev->external_rev_id = adev->rev_id + 0x1; 631 break; 632 default: 633 /* FIXME: not supported yet */ 634 return -EINVAL; 635 } 636 637 return 0; 638 } 639 640 static int soc21_common_late_init(void *handle) 641 { 642 return 0; 643 } 644 645 static int soc21_common_sw_init(void *handle) 646 { 647 return 0; 648 } 649 650 static int soc21_common_sw_fini(void *handle) 651 { 652 return 0; 653 } 654 655 static int soc21_common_hw_init(void *handle) 656 { 657 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 658 659 /* enable pcie gen2/3 link */ 660 soc21_pcie_gen3_enable(adev); 661 /* enable aspm */ 662 soc21_program_aspm(adev); 663 /* setup nbio registers */ 664 adev->nbio.funcs->init_registers(adev); 665 /* remap HDP registers to a hole in mmio space, 666 * for the purpose of expose those registers 667 * to process space 668 */ 669 if (adev->nbio.funcs->remap_hdp_registers) 670 adev->nbio.funcs->remap_hdp_registers(adev); 671 /* enable the doorbell aperture */ 672 soc21_enable_doorbell_aperture(adev, true); 673 674 return 0; 675 } 676 677 static int soc21_common_hw_fini(void *handle) 678 { 679 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 680 681 /* disable the doorbell aperture */ 682 soc21_enable_doorbell_aperture(adev, false); 683 684 return 0; 685 } 686 687 static int soc21_common_suspend(void *handle) 688 { 689 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 690 691 return soc21_common_hw_fini(adev); 692 } 693 694 static int soc21_common_resume(void *handle) 695 { 696 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 697 698 return soc21_common_hw_init(adev); 699 } 700 701 static bool soc21_common_is_idle(void *handle) 702 { 703 return true; 704 } 705 706 static int soc21_common_wait_for_idle(void *handle) 707 { 708 return 0; 709 } 710 711 static int soc21_common_soft_reset(void *handle) 712 { 713 return 0; 714 } 715 716 static int soc21_common_set_clockgating_state(void *handle, 717 enum amd_clockgating_state state) 718 { 719 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 720 721 switch (adev->ip_versions[NBIO_HWIP][0]) { 722 case IP_VERSION(4, 3, 0): 723 case IP_VERSION(4, 3, 1): 724 case IP_VERSION(7, 7, 0): 725 adev->nbio.funcs->update_medium_grain_clock_gating(adev, 726 state == AMD_CG_STATE_GATE); 727 adev->nbio.funcs->update_medium_grain_light_sleep(adev, 728 state == AMD_CG_STATE_GATE); 729 adev->hdp.funcs->update_clock_gating(adev, 730 state == AMD_CG_STATE_GATE); 731 break; 732 default: 733 break; 734 } 735 return 0; 736 } 737 738 static int soc21_common_set_powergating_state(void *handle, 739 enum amd_powergating_state state) 740 { 741 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 742 743 switch (adev->ip_versions[LSDMA_HWIP][0]) { 744 case IP_VERSION(6, 0, 0): 745 case IP_VERSION(6, 0, 2): 746 adev->lsdma.funcs->update_memory_power_gating(adev, 747 state == AMD_PG_STATE_GATE); 748 break; 749 default: 750 break; 751 } 752 753 return 0; 754 } 755 756 static void soc21_common_get_clockgating_state(void *handle, u64 *flags) 757 { 758 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 759 760 adev->nbio.funcs->get_clockgating_state(adev, flags); 761 762 adev->hdp.funcs->get_clock_gating_state(adev, flags); 763 764 return; 765 } 766 767 static const struct amd_ip_funcs soc21_common_ip_funcs = { 768 .name = "soc21_common", 769 .early_init = soc21_common_early_init, 770 .late_init = soc21_common_late_init, 771 .sw_init = soc21_common_sw_init, 772 .sw_fini = soc21_common_sw_fini, 773 .hw_init = soc21_common_hw_init, 774 .hw_fini = soc21_common_hw_fini, 775 .suspend = soc21_common_suspend, 776 .resume = soc21_common_resume, 777 .is_idle = soc21_common_is_idle, 778 .wait_for_idle = soc21_common_wait_for_idle, 779 .soft_reset = soc21_common_soft_reset, 780 .set_clockgating_state = soc21_common_set_clockgating_state, 781 .set_powergating_state = soc21_common_set_powergating_state, 782 .get_clockgating_state = soc21_common_get_clockgating_state, 783 }; 784