1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu.h" 24 #include "amdgpu_atombios.h" 25 #include "nbio_v2_3.h" 26 27 #include "nbio/nbio_2_3_default.h" 28 #include "nbio/nbio_2_3_offset.h" 29 #include "nbio/nbio_2_3_sh_mask.h" 30 #include <uapi/linux/kfd_ioctl.h> 31 #include <linux/pci.h> 32 33 #define smnPCIE_CONFIG_CNTL 0x11180044 34 #define smnCPM_CONTROL 0x11180460 35 #define smnPCIE_CNTL2 0x11180070 36 #define smnPCIE_LC_CNTL 0x11140280 37 38 #define mmBIF_SDMA2_DOORBELL_RANGE 0x01d6 39 #define mmBIF_SDMA2_DOORBELL_RANGE_BASE_IDX 2 40 #define mmBIF_SDMA3_DOORBELL_RANGE 0x01d7 41 #define mmBIF_SDMA3_DOORBELL_RANGE_BASE_IDX 2 42 43 #define mmBIF_MMSCH1_DOORBELL_RANGE 0x01d8 44 #define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2 45 46 static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev) 47 { 48 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL, 49 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL); 50 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL, 51 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL); 52 } 53 54 static u32 nbio_v2_3_get_rev_id(struct amdgpu_device *adev) 55 { 56 u32 tmp; 57 58 /* 59 * guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0, 60 * therefore we force rev_id to 0 (which is the default value) 61 */ 62 if (amdgpu_sriov_vf(adev)) { 63 return 0; 64 } 65 66 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); 67 tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; 68 tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT; 69 70 return tmp; 71 } 72 73 static void nbio_v2_3_mc_access_enable(struct amdgpu_device *adev, bool enable) 74 { 75 if (enable) 76 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 77 BIF_FB_EN__FB_READ_EN_MASK | 78 BIF_FB_EN__FB_WRITE_EN_MASK); 79 else 80 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0); 81 } 82 83 static void nbio_v2_3_hdp_flush(struct amdgpu_device *adev, 84 struct amdgpu_ring *ring) 85 { 86 if (!ring || !ring->funcs->emit_wreg) 87 WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); 88 else 89 amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); 90 } 91 92 static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev) 93 { 94 return RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE); 95 } 96 97 static void nbio_v2_3_sdma_doorbell_range(struct amdgpu_device *adev, int instance, 98 bool use_doorbell, int doorbell_index, 99 int doorbell_size) 100 { 101 u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) : 102 instance == 1 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE) : 103 instance == 2 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA2_DOORBELL_RANGE) : 104 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA3_DOORBELL_RANGE); 105 106 u32 doorbell_range = RREG32(reg); 107 108 if (use_doorbell) { 109 doorbell_range = REG_SET_FIELD(doorbell_range, 110 BIF_SDMA0_DOORBELL_RANGE, OFFSET, 111 doorbell_index); 112 doorbell_range = REG_SET_FIELD(doorbell_range, 113 BIF_SDMA0_DOORBELL_RANGE, SIZE, 114 doorbell_size); 115 } else 116 doorbell_range = REG_SET_FIELD(doorbell_range, 117 BIF_SDMA0_DOORBELL_RANGE, SIZE, 118 0); 119 120 WREG32(reg, doorbell_range); 121 } 122 123 static void nbio_v2_3_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell, 124 int doorbell_index, int instance) 125 { 126 u32 reg = instance ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE) : 127 SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE); 128 129 u32 doorbell_range = RREG32(reg); 130 131 if (use_doorbell) { 132 doorbell_range = REG_SET_FIELD(doorbell_range, 133 BIF_MMSCH0_DOORBELL_RANGE, OFFSET, 134 doorbell_index); 135 doorbell_range = REG_SET_FIELD(doorbell_range, 136 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 8); 137 } else 138 doorbell_range = REG_SET_FIELD(doorbell_range, 139 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 0); 140 141 WREG32(reg, doorbell_range); 142 } 143 144 static void nbio_v2_3_enable_doorbell_aperture(struct amdgpu_device *adev, 145 bool enable) 146 { 147 WREG32_FIELD15(NBIO, 0, RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 148 enable ? 1 : 0); 149 } 150 151 static void nbio_v2_3_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, 152 bool enable) 153 { 154 u32 tmp = 0; 155 156 if (enable) { 157 tmp = REG_SET_FIELD(tmp, BIF_BX_PF_DOORBELL_SELFRING_GPA_APER_CNTL, 158 DOORBELL_SELFRING_GPA_APER_EN, 1) | 159 REG_SET_FIELD(tmp, BIF_BX_PF_DOORBELL_SELFRING_GPA_APER_CNTL, 160 DOORBELL_SELFRING_GPA_APER_MODE, 1) | 161 REG_SET_FIELD(tmp, BIF_BX_PF_DOORBELL_SELFRING_GPA_APER_CNTL, 162 DOORBELL_SELFRING_GPA_APER_SIZE, 0); 163 164 WREG32_SOC15(NBIO, 0, mmBIF_BX_PF_DOORBELL_SELFRING_GPA_APER_BASE_LOW, 165 lower_32_bits(adev->doorbell.base)); 166 WREG32_SOC15(NBIO, 0, mmBIF_BX_PF_DOORBELL_SELFRING_GPA_APER_BASE_HIGH, 167 upper_32_bits(adev->doorbell.base)); 168 } 169 170 WREG32_SOC15(NBIO, 0, mmBIF_BX_PF_DOORBELL_SELFRING_GPA_APER_CNTL, 171 tmp); 172 } 173 174 175 static void nbio_v2_3_ih_doorbell_range(struct amdgpu_device *adev, 176 bool use_doorbell, int doorbell_index) 177 { 178 u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE); 179 180 if (use_doorbell) { 181 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, 182 BIF_IH_DOORBELL_RANGE, OFFSET, 183 doorbell_index); 184 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, 185 BIF_IH_DOORBELL_RANGE, SIZE, 186 2); 187 } else 188 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, 189 BIF_IH_DOORBELL_RANGE, SIZE, 190 0); 191 192 WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range); 193 } 194 195 static void nbio_v2_3_ih_control(struct amdgpu_device *adev) 196 { 197 u32 interrupt_cntl; 198 199 /* setup interrupt control */ 200 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8); 201 202 interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL); 203 /* 204 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi 205 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN 206 */ 207 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, 208 IH_DUMMY_RD_OVERRIDE, 0); 209 210 /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */ 211 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, 212 IH_REQ_NONSNOOP_EN, 0); 213 214 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl); 215 } 216 217 static void nbio_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev, 218 bool enable) 219 { 220 uint32_t def, data; 221 222 def = data = RREG32_PCIE(smnCPM_CONTROL); 223 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) { 224 data |= (CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK | 225 CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK | 226 CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK | 227 CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK | 228 CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK | 229 CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK); 230 } else { 231 data &= ~(CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK | 232 CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK | 233 CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK | 234 CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK | 235 CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK | 236 CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK); 237 } 238 239 if (def != data) 240 WREG32_PCIE(smnCPM_CONTROL, data); 241 } 242 243 static void nbio_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev, 244 bool enable) 245 { 246 uint32_t def, data; 247 248 def = data = RREG32_PCIE(smnPCIE_CNTL2); 249 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) { 250 data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 251 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 252 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 253 } else { 254 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 255 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 256 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 257 } 258 259 if (def != data) 260 WREG32_PCIE(smnPCIE_CNTL2, data); 261 } 262 263 static void nbio_v2_3_get_clockgating_state(struct amdgpu_device *adev, 264 u32 *flags) 265 { 266 int data; 267 268 /* AMD_CG_SUPPORT_BIF_MGCG */ 269 data = RREG32_PCIE(smnCPM_CONTROL); 270 if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK) 271 *flags |= AMD_CG_SUPPORT_BIF_MGCG; 272 273 /* AMD_CG_SUPPORT_BIF_LS */ 274 data = RREG32_PCIE(smnPCIE_CNTL2); 275 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK) 276 *flags |= AMD_CG_SUPPORT_BIF_LS; 277 } 278 279 static u32 nbio_v2_3_get_hdp_flush_req_offset(struct amdgpu_device *adev) 280 { 281 return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_GPU_HDP_FLUSH_REQ); 282 } 283 284 static u32 nbio_v2_3_get_hdp_flush_done_offset(struct amdgpu_device *adev) 285 { 286 return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_GPU_HDP_FLUSH_DONE); 287 } 288 289 static u32 nbio_v2_3_get_pcie_index_offset(struct amdgpu_device *adev) 290 { 291 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2); 292 } 293 294 static u32 nbio_v2_3_get_pcie_data_offset(struct amdgpu_device *adev) 295 { 296 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2); 297 } 298 299 const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg = { 300 .ref_and_mask_cp0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP0_MASK, 301 .ref_and_mask_cp1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP1_MASK, 302 .ref_and_mask_cp2 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP2_MASK, 303 .ref_and_mask_cp3 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP3_MASK, 304 .ref_and_mask_cp4 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP4_MASK, 305 .ref_and_mask_cp5 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP5_MASK, 306 .ref_and_mask_cp6 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP6_MASK, 307 .ref_and_mask_cp7 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP7_MASK, 308 .ref_and_mask_cp8 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP8_MASK, 309 .ref_and_mask_cp9 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP9_MASK, 310 .ref_and_mask_sdma0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA0_MASK, 311 .ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK, 312 }; 313 314 static void nbio_v2_3_init_registers(struct amdgpu_device *adev) 315 { 316 uint32_t def, data; 317 318 def = data = RREG32_PCIE(smnPCIE_CONFIG_CNTL); 319 data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1); 320 data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1); 321 322 if (def != data) 323 WREG32_PCIE(smnPCIE_CONFIG_CNTL, data); 324 } 325 326 #define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT 0x00000000 // off by default, no gains over L1 327 #define NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT 0x00000009 // 1=1us, 9=1ms 328 #define NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT 0x0000000E // 4ms 329 330 static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev, 331 bool enable) 332 { 333 uint32_t def, data; 334 335 def = data = RREG32_PCIE(smnPCIE_LC_CNTL); 336 337 if (enable) { 338 /* Disable ASPM L0s/L1 first */ 339 data &= ~(PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK | PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK); 340 341 data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT; 342 343 if (pci_is_thunderbolt_attached(adev->pdev)) 344 data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; 345 else 346 data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; 347 348 data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; 349 } else { 350 /* Disbale ASPM L1 */ 351 data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; 352 /* Disable ASPM TxL0s */ 353 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; 354 /* Disable ACPI L1 */ 355 data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; 356 } 357 358 if (def != data) 359 WREG32_PCIE(smnPCIE_LC_CNTL, data); 360 } 361 362 const struct amdgpu_nbio_funcs nbio_v2_3_funcs = { 363 .get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset, 364 .get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset, 365 .get_pcie_index_offset = nbio_v2_3_get_pcie_index_offset, 366 .get_pcie_data_offset = nbio_v2_3_get_pcie_data_offset, 367 .get_rev_id = nbio_v2_3_get_rev_id, 368 .mc_access_enable = nbio_v2_3_mc_access_enable, 369 .hdp_flush = nbio_v2_3_hdp_flush, 370 .get_memsize = nbio_v2_3_get_memsize, 371 .sdma_doorbell_range = nbio_v2_3_sdma_doorbell_range, 372 .vcn_doorbell_range = nbio_v2_3_vcn_doorbell_range, 373 .enable_doorbell_aperture = nbio_v2_3_enable_doorbell_aperture, 374 .enable_doorbell_selfring_aperture = nbio_v2_3_enable_doorbell_selfring_aperture, 375 .ih_doorbell_range = nbio_v2_3_ih_doorbell_range, 376 .update_medium_grain_clock_gating = nbio_v2_3_update_medium_grain_clock_gating, 377 .update_medium_grain_light_sleep = nbio_v2_3_update_medium_grain_light_sleep, 378 .get_clockgating_state = nbio_v2_3_get_clockgating_state, 379 .ih_control = nbio_v2_3_ih_control, 380 .init_registers = nbio_v2_3_init_registers, 381 .remap_hdp_registers = nbio_v2_3_remap_hdp_registers, 382 .enable_aspm = nbio_v2_3_enable_aspm, 383 }; 384