1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/firmware.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include <linux/pci.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_atombios.h" 30 #include "amdgpu_ih.h" 31 #include "amdgpu_uvd.h" 32 #include "amdgpu_vce.h" 33 #include "amdgpu_ucode.h" 34 #include "amdgpu_psp.h" 35 #include "amdgpu_smu.h" 36 #include "atom.h" 37 #include "amd_pcie.h" 38 39 #include "gc/gc_11_0_0_offset.h" 40 #include "gc/gc_11_0_0_sh_mask.h" 41 #include "mp/mp_13_0_0_offset.h" 42 43 #include "soc15.h" 44 #include "soc15_common.h" 45 #include "soc21.h" 46 #include "mxgpu_nv.h" 47 48 static const struct amd_ip_funcs soc21_common_ip_funcs; 49 50 /* SOC21 */ 51 static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = 52 { 53 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, 54 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, 55 }; 56 57 static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = 58 { 59 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, 60 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, 61 }; 62 63 static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 = 64 { 65 .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn0), 66 .codec_array = vcn_4_0_0_video_codecs_encode_array_vcn0, 67 }; 68 69 static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn1 = 70 { 71 .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn1), 72 .codec_array = vcn_4_0_0_video_codecs_encode_array_vcn1, 73 }; 74 75 static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn0[] = 76 { 77 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, 78 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, 79 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, 80 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, 81 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, 82 }; 83 84 static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn1[] = 85 { 86 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, 87 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, 88 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, 89 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, 90 }; 91 92 static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn0 = 93 { 94 .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn0), 95 .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn0, 96 }; 97 98 static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 = 99 { 100 .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn1), 101 .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1, 102 }; 103 104 static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode, 105 const struct amdgpu_video_codecs **codecs) 106 { 107 if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config)) 108 return -EINVAL; 109 110 switch (adev->ip_versions[UVD_HWIP][0]) { 111 case IP_VERSION(4, 0, 0): 112 case IP_VERSION(4, 0, 2): 113 if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) { 114 if (encode) 115 *codecs = &vcn_4_0_0_video_codecs_encode_vcn1; 116 else 117 *codecs = &vcn_4_0_0_video_codecs_decode_vcn1; 118 } else { 119 if (encode) 120 *codecs = &vcn_4_0_0_video_codecs_encode_vcn0; 121 else 122 *codecs = &vcn_4_0_0_video_codecs_decode_vcn0; 123 } 124 return 0; 125 default: 126 return -EINVAL; 127 } 128 } 129 /* 130 * Indirect registers accessor 131 */ 132 static u32 soc21_pcie_rreg(struct amdgpu_device *adev, u32 reg) 133 { 134 unsigned long address, data; 135 address = adev->nbio.funcs->get_pcie_index_offset(adev); 136 data = adev->nbio.funcs->get_pcie_data_offset(adev); 137 138 return amdgpu_device_indirect_rreg(adev, address, data, reg); 139 } 140 141 static void soc21_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 142 { 143 unsigned long address, data; 144 145 address = adev->nbio.funcs->get_pcie_index_offset(adev); 146 data = adev->nbio.funcs->get_pcie_data_offset(adev); 147 148 amdgpu_device_indirect_wreg(adev, address, data, reg, v); 149 } 150 151 static u64 soc21_pcie_rreg64(struct amdgpu_device *adev, u32 reg) 152 { 153 unsigned long address, data; 154 address = adev->nbio.funcs->get_pcie_index_offset(adev); 155 data = adev->nbio.funcs->get_pcie_data_offset(adev); 156 157 return amdgpu_device_indirect_rreg64(adev, address, data, reg); 158 } 159 160 static void soc21_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) 161 { 162 unsigned long address, data; 163 164 address = adev->nbio.funcs->get_pcie_index_offset(adev); 165 data = adev->nbio.funcs->get_pcie_data_offset(adev); 166 167 amdgpu_device_indirect_wreg64(adev, address, data, reg, v); 168 } 169 170 static u32 soc21_didt_rreg(struct amdgpu_device *adev, u32 reg) 171 { 172 unsigned long flags, address, data; 173 u32 r; 174 175 address = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_INDEX); 176 data = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_DATA); 177 178 spin_lock_irqsave(&adev->didt_idx_lock, flags); 179 WREG32(address, (reg)); 180 r = RREG32(data); 181 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 182 return r; 183 } 184 185 static void soc21_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 186 { 187 unsigned long flags, address, data; 188 189 address = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_INDEX); 190 data = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_DATA); 191 192 spin_lock_irqsave(&adev->didt_idx_lock, flags); 193 WREG32(address, (reg)); 194 WREG32(data, (v)); 195 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 196 } 197 198 static u32 soc21_get_config_memsize(struct amdgpu_device *adev) 199 { 200 return adev->nbio.funcs->get_memsize(adev); 201 } 202 203 static u32 soc21_get_xclk(struct amdgpu_device *adev) 204 { 205 return adev->clock.spll.reference_freq; 206 } 207 208 209 void soc21_grbm_select(struct amdgpu_device *adev, 210 u32 me, u32 pipe, u32 queue, u32 vmid) 211 { 212 u32 grbm_gfx_cntl = 0; 213 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); 214 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); 215 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); 216 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); 217 218 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, grbm_gfx_cntl); 219 } 220 221 static void soc21_vga_set_state(struct amdgpu_device *adev, bool state) 222 { 223 /* todo */ 224 } 225 226 static bool soc21_read_disabled_bios(struct amdgpu_device *adev) 227 { 228 /* todo */ 229 return false; 230 } 231 232 static struct soc15_allowed_register_entry soc21_allowed_read_registers[] = { 233 { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS)}, 234 { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS2)}, 235 { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE0)}, 236 { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE1)}, 237 { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE2)}, 238 { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE3)}, 239 { SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_STATUS_REG)}, 240 { SOC15_REG_ENTRY(SDMA1, 0, regSDMA1_STATUS_REG)}, 241 { SOC15_REG_ENTRY(GC, 0, regCP_STAT)}, 242 { SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT1)}, 243 { SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT2)}, 244 { SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT3)}, 245 { SOC15_REG_ENTRY(GC, 0, regCP_CPF_BUSY_STAT)}, 246 { SOC15_REG_ENTRY(GC, 0, regCP_CPF_STALLED_STAT1)}, 247 { SOC15_REG_ENTRY(GC, 0, regCP_CPF_STATUS)}, 248 { SOC15_REG_ENTRY(GC, 0, regCP_CPC_BUSY_STAT)}, 249 { SOC15_REG_ENTRY(GC, 0, regCP_CPC_STALLED_STAT1)}, 250 { SOC15_REG_ENTRY(GC, 0, regCP_CPC_STATUS)}, 251 { SOC15_REG_ENTRY(GC, 0, regGB_ADDR_CONFIG)}, 252 }; 253 254 static uint32_t soc21_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 255 u32 sh_num, u32 reg_offset) 256 { 257 uint32_t val; 258 259 mutex_lock(&adev->grbm_idx_mutex); 260 if (se_num != 0xffffffff || sh_num != 0xffffffff) 261 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 262 263 val = RREG32(reg_offset); 264 265 if (se_num != 0xffffffff || sh_num != 0xffffffff) 266 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 267 mutex_unlock(&adev->grbm_idx_mutex); 268 return val; 269 } 270 271 static uint32_t soc21_get_register_value(struct amdgpu_device *adev, 272 bool indexed, u32 se_num, 273 u32 sh_num, u32 reg_offset) 274 { 275 if (indexed) { 276 return soc21_read_indexed_register(adev, se_num, sh_num, reg_offset); 277 } else { 278 if (reg_offset == SOC15_REG_OFFSET(GC, 0, regGB_ADDR_CONFIG) && adev->gfx.config.gb_addr_config) 279 return adev->gfx.config.gb_addr_config; 280 return RREG32(reg_offset); 281 } 282 } 283 284 static int soc21_read_register(struct amdgpu_device *adev, u32 se_num, 285 u32 sh_num, u32 reg_offset, u32 *value) 286 { 287 uint32_t i; 288 struct soc15_allowed_register_entry *en; 289 290 *value = 0; 291 for (i = 0; i < ARRAY_SIZE(soc21_allowed_read_registers); i++) { 292 en = &soc21_allowed_read_registers[i]; 293 if (adev->reg_offset[en->hwip][en->inst] && 294 reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] 295 + en->reg_offset)) 296 continue; 297 298 *value = soc21_get_register_value(adev, 299 soc21_allowed_read_registers[i].grbm_indexed, 300 se_num, sh_num, reg_offset); 301 return 0; 302 } 303 return -EINVAL; 304 } 305 306 #if 0 307 static int soc21_asic_mode1_reset(struct amdgpu_device *adev) 308 { 309 u32 i; 310 int ret = 0; 311 312 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 313 314 /* disable BM */ 315 pci_clear_master(adev->pdev); 316 317 amdgpu_device_cache_pci_state(adev->pdev); 318 319 if (amdgpu_dpm_is_mode1_reset_supported(adev)) { 320 dev_info(adev->dev, "GPU smu mode1 reset\n"); 321 ret = amdgpu_dpm_mode1_reset(adev); 322 } else { 323 dev_info(adev->dev, "GPU psp mode1 reset\n"); 324 ret = psp_gpu_reset(adev); 325 } 326 327 if (ret) 328 dev_err(adev->dev, "GPU mode1 reset failed\n"); 329 amdgpu_device_load_pci_state(adev->pdev); 330 331 /* wait for asic to come out of reset */ 332 for (i = 0; i < adev->usec_timeout; i++) { 333 u32 memsize = adev->nbio.funcs->get_memsize(adev); 334 335 if (memsize != 0xffffffff) 336 break; 337 udelay(1); 338 } 339 340 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 341 342 return ret; 343 } 344 #endif 345 346 static enum amd_reset_method 347 soc21_asic_reset_method(struct amdgpu_device *adev) 348 { 349 if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 || 350 amdgpu_reset_method == AMD_RESET_METHOD_MODE2 || 351 amdgpu_reset_method == AMD_RESET_METHOD_BACO) 352 return amdgpu_reset_method; 353 354 if (amdgpu_reset_method != -1) 355 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", 356 amdgpu_reset_method); 357 358 switch (adev->ip_versions[MP1_HWIP][0]) { 359 case IP_VERSION(13, 0, 0): 360 case IP_VERSION(13, 0, 7): 361 case IP_VERSION(13, 0, 10): 362 return AMD_RESET_METHOD_MODE1; 363 case IP_VERSION(13, 0, 4): 364 case IP_VERSION(13, 0, 11): 365 return AMD_RESET_METHOD_MODE2; 366 default: 367 if (amdgpu_dpm_is_baco_supported(adev)) 368 return AMD_RESET_METHOD_BACO; 369 else 370 return AMD_RESET_METHOD_MODE1; 371 } 372 } 373 374 static int soc21_asic_reset(struct amdgpu_device *adev) 375 { 376 int ret = 0; 377 378 switch (soc21_asic_reset_method(adev)) { 379 case AMD_RESET_METHOD_PCI: 380 dev_info(adev->dev, "PCI reset\n"); 381 ret = amdgpu_device_pci_reset(adev); 382 break; 383 case AMD_RESET_METHOD_BACO: 384 dev_info(adev->dev, "BACO reset\n"); 385 ret = amdgpu_dpm_baco_reset(adev); 386 break; 387 case AMD_RESET_METHOD_MODE2: 388 dev_info(adev->dev, "MODE2 reset\n"); 389 ret = amdgpu_dpm_mode2_reset(adev); 390 break; 391 default: 392 dev_info(adev->dev, "MODE1 reset\n"); 393 ret = amdgpu_device_mode1_reset(adev); 394 break; 395 } 396 397 return ret; 398 } 399 400 static int soc21_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 401 { 402 /* todo */ 403 return 0; 404 } 405 406 static int soc21_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 407 { 408 /* todo */ 409 return 0; 410 } 411 412 static void soc21_pcie_gen3_enable(struct amdgpu_device *adev) 413 { 414 if (pci_is_root_bus(adev->pdev->bus)) 415 return; 416 417 if (amdgpu_pcie_gen2 == 0) 418 return; 419 420 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 421 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 422 return; 423 424 /* todo */ 425 } 426 427 static void soc21_program_aspm(struct amdgpu_device *adev) 428 { 429 if (!amdgpu_device_should_use_aspm(adev)) 430 return; 431 432 if (!(adev->flags & AMD_IS_APU) && 433 (adev->nbio.funcs->program_aspm)) 434 adev->nbio.funcs->program_aspm(adev); 435 } 436 437 static void soc21_enable_doorbell_aperture(struct amdgpu_device *adev, 438 bool enable) 439 { 440 adev->nbio.funcs->enable_doorbell_aperture(adev, enable); 441 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); 442 } 443 444 const struct amdgpu_ip_block_version soc21_common_ip_block = 445 { 446 .type = AMD_IP_BLOCK_TYPE_COMMON, 447 .major = 1, 448 .minor = 0, 449 .rev = 0, 450 .funcs = &soc21_common_ip_funcs, 451 }; 452 453 static uint32_t soc21_get_rev_id(struct amdgpu_device *adev) 454 { 455 return adev->nbio.funcs->get_rev_id(adev); 456 } 457 458 static bool soc21_need_full_reset(struct amdgpu_device *adev) 459 { 460 switch (adev->ip_versions[GC_HWIP][0]) { 461 case IP_VERSION(11, 0, 0): 462 return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC); 463 case IP_VERSION(11, 0, 2): 464 case IP_VERSION(11, 0, 3): 465 return false; 466 default: 467 return true; 468 } 469 } 470 471 static bool soc21_need_reset_on_init(struct amdgpu_device *adev) 472 { 473 u32 sol_reg; 474 475 if (adev->flags & AMD_IS_APU) 476 return false; 477 478 /* Check sOS sign of life register to confirm sys driver and sOS 479 * are already been loaded. 480 */ 481 sol_reg = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81); 482 if (sol_reg) 483 return true; 484 485 return false; 486 } 487 488 static uint64_t soc21_get_pcie_replay_count(struct amdgpu_device *adev) 489 { 490 491 /* TODO 492 * dummy implement for pcie_replay_count sysfs interface 493 * */ 494 495 return 0; 496 } 497 498 static void soc21_init_doorbell_index(struct amdgpu_device *adev) 499 { 500 adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; 501 adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0; 502 adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1; 503 adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2; 504 adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3; 505 adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4; 506 adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5; 507 adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6; 508 adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7; 509 adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START; 510 adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END; 511 adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0; 512 adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1; 513 adev->doorbell_index.gfx_userqueue_start = 514 AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_START; 515 adev->doorbell_index.gfx_userqueue_end = 516 AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_END; 517 adev->doorbell_index.mes_ring0 = AMDGPU_NAVI10_DOORBELL_MES_RING0; 518 adev->doorbell_index.mes_ring1 = AMDGPU_NAVI10_DOORBELL_MES_RING1; 519 adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0; 520 adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1; 521 adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH; 522 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1; 523 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3; 524 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5; 525 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7; 526 adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP; 527 adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP; 528 529 adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1; 530 adev->doorbell_index.sdma_doorbell_range = 20; 531 } 532 533 static void soc21_pre_asic_init(struct amdgpu_device *adev) 534 { 535 } 536 537 static int soc21_update_umd_stable_pstate(struct amdgpu_device *adev, 538 bool enter) 539 { 540 if (enter) 541 amdgpu_gfx_rlc_enter_safe_mode(adev); 542 else 543 amdgpu_gfx_rlc_exit_safe_mode(adev); 544 545 if (adev->gfx.funcs->update_perfmon_mgcg) 546 adev->gfx.funcs->update_perfmon_mgcg(adev, !enter); 547 548 return 0; 549 } 550 551 static const struct amdgpu_asic_funcs soc21_asic_funcs = 552 { 553 .read_disabled_bios = &soc21_read_disabled_bios, 554 .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom, 555 .read_register = &soc21_read_register, 556 .reset = &soc21_asic_reset, 557 .reset_method = &soc21_asic_reset_method, 558 .set_vga_state = &soc21_vga_set_state, 559 .get_xclk = &soc21_get_xclk, 560 .set_uvd_clocks = &soc21_set_uvd_clocks, 561 .set_vce_clocks = &soc21_set_vce_clocks, 562 .get_config_memsize = &soc21_get_config_memsize, 563 .init_doorbell_index = &soc21_init_doorbell_index, 564 .need_full_reset = &soc21_need_full_reset, 565 .need_reset_on_init = &soc21_need_reset_on_init, 566 .get_pcie_replay_count = &soc21_get_pcie_replay_count, 567 .supports_baco = &amdgpu_dpm_is_baco_supported, 568 .pre_asic_init = &soc21_pre_asic_init, 569 .query_video_codecs = &soc21_query_video_codecs, 570 .update_umd_stable_pstate = &soc21_update_umd_stable_pstate, 571 }; 572 573 static int soc21_common_early_init(void *handle) 574 { 575 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) 576 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 577 578 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; 579 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; 580 adev->smc_rreg = NULL; 581 adev->smc_wreg = NULL; 582 adev->pcie_rreg = &soc21_pcie_rreg; 583 adev->pcie_wreg = &soc21_pcie_wreg; 584 adev->pcie_rreg64 = &soc21_pcie_rreg64; 585 adev->pcie_wreg64 = &soc21_pcie_wreg64; 586 adev->pciep_rreg = amdgpu_device_pcie_port_rreg; 587 adev->pciep_wreg = amdgpu_device_pcie_port_wreg; 588 589 /* TODO: will add them during VCN v2 implementation */ 590 adev->uvd_ctx_rreg = NULL; 591 adev->uvd_ctx_wreg = NULL; 592 593 adev->didt_rreg = &soc21_didt_rreg; 594 adev->didt_wreg = &soc21_didt_wreg; 595 596 adev->asic_funcs = &soc21_asic_funcs; 597 598 adev->rev_id = soc21_get_rev_id(adev); 599 adev->external_rev_id = 0xff; 600 switch (adev->ip_versions[GC_HWIP][0]) { 601 case IP_VERSION(11, 0, 0): 602 adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG | 603 AMD_CG_SUPPORT_GFX_CGLS | 604 #if 0 605 AMD_CG_SUPPORT_GFX_3D_CGCG | 606 AMD_CG_SUPPORT_GFX_3D_CGLS | 607 #endif 608 AMD_CG_SUPPORT_GFX_MGCG | 609 AMD_CG_SUPPORT_REPEATER_FGCG | 610 AMD_CG_SUPPORT_GFX_FGCG | 611 AMD_CG_SUPPORT_GFX_PERF_CLK | 612 AMD_CG_SUPPORT_VCN_MGCG | 613 AMD_CG_SUPPORT_JPEG_MGCG | 614 AMD_CG_SUPPORT_ATHUB_MGCG | 615 AMD_CG_SUPPORT_ATHUB_LS | 616 AMD_CG_SUPPORT_MC_MGCG | 617 AMD_CG_SUPPORT_MC_LS | 618 AMD_CG_SUPPORT_IH_CG | 619 AMD_CG_SUPPORT_HDP_SD; 620 adev->pg_flags = AMD_PG_SUPPORT_VCN | 621 AMD_PG_SUPPORT_VCN_DPG | 622 AMD_PG_SUPPORT_JPEG | 623 AMD_PG_SUPPORT_ATHUB | 624 AMD_PG_SUPPORT_MMHUB; 625 adev->external_rev_id = adev->rev_id + 0x1; // TODO: need update 626 break; 627 case IP_VERSION(11, 0, 2): 628 adev->cg_flags = 629 AMD_CG_SUPPORT_GFX_CGCG | 630 AMD_CG_SUPPORT_GFX_CGLS | 631 AMD_CG_SUPPORT_REPEATER_FGCG | 632 AMD_CG_SUPPORT_VCN_MGCG | 633 AMD_CG_SUPPORT_JPEG_MGCG | 634 AMD_CG_SUPPORT_ATHUB_MGCG | 635 AMD_CG_SUPPORT_ATHUB_LS | 636 AMD_CG_SUPPORT_IH_CG | 637 AMD_CG_SUPPORT_HDP_SD; 638 adev->pg_flags = 639 AMD_PG_SUPPORT_VCN | 640 AMD_PG_SUPPORT_VCN_DPG | 641 AMD_PG_SUPPORT_JPEG | 642 AMD_PG_SUPPORT_ATHUB | 643 AMD_PG_SUPPORT_MMHUB; 644 adev->external_rev_id = adev->rev_id + 0x10; 645 break; 646 case IP_VERSION(11, 0, 1): 647 adev->cg_flags = 648 AMD_CG_SUPPORT_GFX_CGCG | 649 AMD_CG_SUPPORT_GFX_CGLS | 650 AMD_CG_SUPPORT_GFX_MGCG | 651 AMD_CG_SUPPORT_GFX_FGCG | 652 AMD_CG_SUPPORT_REPEATER_FGCG | 653 AMD_CG_SUPPORT_GFX_PERF_CLK | 654 AMD_CG_SUPPORT_MC_MGCG | 655 AMD_CG_SUPPORT_MC_LS | 656 AMD_CG_SUPPORT_HDP_MGCG | 657 AMD_CG_SUPPORT_HDP_LS | 658 AMD_CG_SUPPORT_ATHUB_MGCG | 659 AMD_CG_SUPPORT_ATHUB_LS | 660 AMD_CG_SUPPORT_IH_CG | 661 AMD_CG_SUPPORT_BIF_MGCG | 662 AMD_CG_SUPPORT_BIF_LS | 663 AMD_CG_SUPPORT_VCN_MGCG | 664 AMD_CG_SUPPORT_JPEG_MGCG; 665 adev->pg_flags = 666 AMD_PG_SUPPORT_GFX_PG | 667 AMD_PG_SUPPORT_VCN | 668 AMD_PG_SUPPORT_VCN_DPG | 669 AMD_PG_SUPPORT_JPEG; 670 adev->external_rev_id = adev->rev_id + 0x1; 671 break; 672 case IP_VERSION(11, 0, 3): 673 adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG | 674 AMD_CG_SUPPORT_JPEG_MGCG | 675 AMD_CG_SUPPORT_GFX_CGCG | 676 AMD_CG_SUPPORT_GFX_CGLS | 677 AMD_CG_SUPPORT_REPEATER_FGCG | 678 AMD_CG_SUPPORT_GFX_MGCG; 679 adev->pg_flags = AMD_PG_SUPPORT_VCN | 680 AMD_PG_SUPPORT_VCN_DPG | 681 AMD_PG_SUPPORT_JPEG; 682 adev->external_rev_id = adev->rev_id + 0x20; 683 break; 684 case IP_VERSION(11, 0, 4): 685 adev->cg_flags = 686 AMD_CG_SUPPORT_GFX_CGCG | 687 AMD_CG_SUPPORT_GFX_CGLS | 688 AMD_CG_SUPPORT_GFX_MGCG | 689 AMD_CG_SUPPORT_GFX_FGCG | 690 AMD_CG_SUPPORT_REPEATER_FGCG | 691 AMD_CG_SUPPORT_GFX_PERF_CLK | 692 AMD_CG_SUPPORT_MC_MGCG | 693 AMD_CG_SUPPORT_MC_LS | 694 AMD_CG_SUPPORT_HDP_MGCG | 695 AMD_CG_SUPPORT_HDP_LS | 696 AMD_CG_SUPPORT_ATHUB_MGCG | 697 AMD_CG_SUPPORT_ATHUB_LS | 698 AMD_CG_SUPPORT_IH_CG | 699 AMD_CG_SUPPORT_BIF_MGCG | 700 AMD_CG_SUPPORT_BIF_LS | 701 AMD_CG_SUPPORT_VCN_MGCG | 702 AMD_CG_SUPPORT_JPEG_MGCG; 703 adev->pg_flags = AMD_PG_SUPPORT_VCN | 704 AMD_PG_SUPPORT_VCN_DPG | 705 AMD_PG_SUPPORT_GFX_PG | 706 AMD_PG_SUPPORT_JPEG; 707 adev->external_rev_id = adev->rev_id + 0x1; 708 break; 709 710 default: 711 /* FIXME: not supported yet */ 712 return -EINVAL; 713 } 714 715 if (amdgpu_sriov_vf(adev)) { 716 amdgpu_virt_init_setting(adev); 717 xgpu_nv_mailbox_set_irq_funcs(adev); 718 } 719 720 return 0; 721 } 722 723 static int soc21_common_late_init(void *handle) 724 { 725 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 726 727 if (amdgpu_sriov_vf(adev)) 728 xgpu_nv_mailbox_get_irq(adev); 729 730 return 0; 731 } 732 733 static int soc21_common_sw_init(void *handle) 734 { 735 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 736 737 if (amdgpu_sriov_vf(adev)) 738 xgpu_nv_mailbox_add_irq_id(adev); 739 740 return 0; 741 } 742 743 static int soc21_common_sw_fini(void *handle) 744 { 745 return 0; 746 } 747 748 static int soc21_common_hw_init(void *handle) 749 { 750 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 751 752 /* enable pcie gen2/3 link */ 753 soc21_pcie_gen3_enable(adev); 754 /* enable aspm */ 755 soc21_program_aspm(adev); 756 /* setup nbio registers */ 757 adev->nbio.funcs->init_registers(adev); 758 /* remap HDP registers to a hole in mmio space, 759 * for the purpose of expose those registers 760 * to process space 761 */ 762 if (adev->nbio.funcs->remap_hdp_registers) 763 adev->nbio.funcs->remap_hdp_registers(adev); 764 /* enable the doorbell aperture */ 765 soc21_enable_doorbell_aperture(adev, true); 766 767 return 0; 768 } 769 770 static int soc21_common_hw_fini(void *handle) 771 { 772 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 773 774 /* disable the doorbell aperture */ 775 soc21_enable_doorbell_aperture(adev, false); 776 777 if (amdgpu_sriov_vf(adev)) 778 xgpu_nv_mailbox_put_irq(adev); 779 780 return 0; 781 } 782 783 static int soc21_common_suspend(void *handle) 784 { 785 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 786 787 return soc21_common_hw_fini(adev); 788 } 789 790 static int soc21_common_resume(void *handle) 791 { 792 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 793 794 return soc21_common_hw_init(adev); 795 } 796 797 static bool soc21_common_is_idle(void *handle) 798 { 799 return true; 800 } 801 802 static int soc21_common_wait_for_idle(void *handle) 803 { 804 return 0; 805 } 806 807 static int soc21_common_soft_reset(void *handle) 808 { 809 return 0; 810 } 811 812 static int soc21_common_set_clockgating_state(void *handle, 813 enum amd_clockgating_state state) 814 { 815 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 816 817 switch (adev->ip_versions[NBIO_HWIP][0]) { 818 case IP_VERSION(4, 3, 0): 819 case IP_VERSION(4, 3, 1): 820 case IP_VERSION(7, 7, 0): 821 adev->nbio.funcs->update_medium_grain_clock_gating(adev, 822 state == AMD_CG_STATE_GATE); 823 adev->nbio.funcs->update_medium_grain_light_sleep(adev, 824 state == AMD_CG_STATE_GATE); 825 adev->hdp.funcs->update_clock_gating(adev, 826 state == AMD_CG_STATE_GATE); 827 break; 828 default: 829 break; 830 } 831 return 0; 832 } 833 834 static int soc21_common_set_powergating_state(void *handle, 835 enum amd_powergating_state state) 836 { 837 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 838 839 switch (adev->ip_versions[LSDMA_HWIP][0]) { 840 case IP_VERSION(6, 0, 0): 841 case IP_VERSION(6, 0, 2): 842 adev->lsdma.funcs->update_memory_power_gating(adev, 843 state == AMD_PG_STATE_GATE); 844 break; 845 default: 846 break; 847 } 848 849 return 0; 850 } 851 852 static void soc21_common_get_clockgating_state(void *handle, u64 *flags) 853 { 854 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 855 856 adev->nbio.funcs->get_clockgating_state(adev, flags); 857 858 adev->hdp.funcs->get_clock_gating_state(adev, flags); 859 860 return; 861 } 862 863 static const struct amd_ip_funcs soc21_common_ip_funcs = { 864 .name = "soc21_common", 865 .early_init = soc21_common_early_init, 866 .late_init = soc21_common_late_init, 867 .sw_init = soc21_common_sw_init, 868 .sw_fini = soc21_common_sw_fini, 869 .hw_init = soc21_common_hw_init, 870 .hw_fini = soc21_common_hw_fini, 871 .suspend = soc21_common_suspend, 872 .resume = soc21_common_resume, 873 .is_idle = soc21_common_is_idle, 874 .wait_for_idle = soc21_common_wait_for_idle, 875 .soft_reset = soc21_common_soft_reset, 876 .set_clockgating_state = soc21_common_set_clockgating_state, 877 .set_powergating_state = soc21_common_set_powergating_state, 878 .get_clockgating_state = soc21_common_get_clockgating_state, 879 }; 880