1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/kernel.h> 24 #include <linux/firmware.h> 25 #include <drm/drmP.h> 26 #include "amdgpu.h" 27 #include "amdgpu_gfx.h" 28 #include "soc15.h" 29 #include "soc15d.h" 30 31 #include "gc/gc_9_0_offset.h" 32 #include "gc/gc_9_0_sh_mask.h" 33 #include "vega10_enum.h" 34 #include "hdp/hdp_4_0_offset.h" 35 36 #include "soc15_common.h" 37 #include "clearstate_gfx9.h" 38 #include "v9_structs.h" 39 40 #define GFX9_NUM_GFX_RINGS 1 41 #define GFX9_MEC_HPD_SIZE 2048 42 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 43 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L 44 #define GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH 34 45 46 #define mmPWR_MISC_CNTL_STATUS 0x0183 47 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0 48 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0 49 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1 50 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L 51 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L 52 53 MODULE_FIRMWARE("amdgpu/vega10_ce.bin"); 54 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin"); 55 MODULE_FIRMWARE("amdgpu/vega10_me.bin"); 56 MODULE_FIRMWARE("amdgpu/vega10_mec.bin"); 57 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin"); 58 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin"); 59 60 MODULE_FIRMWARE("amdgpu/vega12_ce.bin"); 61 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin"); 62 MODULE_FIRMWARE("amdgpu/vega12_me.bin"); 63 MODULE_FIRMWARE("amdgpu/vega12_mec.bin"); 64 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin"); 65 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin"); 66 67 MODULE_FIRMWARE("amdgpu/raven_ce.bin"); 68 MODULE_FIRMWARE("amdgpu/raven_pfp.bin"); 69 MODULE_FIRMWARE("amdgpu/raven_me.bin"); 70 MODULE_FIRMWARE("amdgpu/raven_mec.bin"); 71 MODULE_FIRMWARE("amdgpu/raven_mec2.bin"); 72 MODULE_FIRMWARE("amdgpu/raven_rlc.bin"); 73 74 static const struct soc15_reg_golden golden_settings_gc_9_0[] = 75 { 76 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080), 77 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080), 78 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080), 79 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420), 80 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000), 81 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080), 82 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024), 83 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001), 84 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), 85 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080), 86 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080), 87 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080), 88 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080), 89 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080), 90 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000), 91 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107), 92 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000), 93 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000), 94 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), 95 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), 96 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), 97 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff), 98 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080) 99 }; 100 101 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = 102 { 103 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107), 104 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000), 105 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042), 106 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042), 107 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000), 108 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000), 109 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800) 110 }; 111 112 static const struct soc15_reg_golden golden_settings_gc_9_1[] = 113 { 114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104), 115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080), 116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080), 117 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080), 118 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420), 119 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000), 120 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080), 121 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024), 122 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001), 123 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), 124 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080), 125 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080), 126 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080), 127 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080), 128 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080), 129 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000), 130 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000), 131 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120), 132 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), 133 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff), 134 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080) 135 }; 136 137 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] = 138 { 139 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000), 140 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042), 141 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042), 142 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000), 143 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000), 144 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000), 145 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800) 146 }; 147 148 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] = 149 { 150 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000), 151 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382) 152 }; 153 154 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] = 155 { 156 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420), 157 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000), 158 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024), 159 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001), 160 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), 161 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000), 162 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800), 163 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800), 164 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87), 165 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f), 166 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000), 167 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000), 168 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), 169 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), 170 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), 171 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff) 172 }; 173 174 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] = 175 { 176 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080), 177 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104), 178 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000), 179 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041), 180 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041), 181 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000), 182 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107), 183 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000), 184 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410), 185 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000) 186 }; 187 188 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042 189 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041 190 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042 191 192 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev); 193 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev); 194 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev); 195 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev); 196 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, 197 struct amdgpu_cu_info *cu_info); 198 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev); 199 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); 200 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring); 201 202 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) 203 { 204 switch (adev->asic_type) { 205 case CHIP_VEGA10: 206 soc15_program_register_sequence(adev, 207 golden_settings_gc_9_0, 208 ARRAY_SIZE(golden_settings_gc_9_0)); 209 soc15_program_register_sequence(adev, 210 golden_settings_gc_9_0_vg10, 211 ARRAY_SIZE(golden_settings_gc_9_0_vg10)); 212 break; 213 case CHIP_VEGA12: 214 soc15_program_register_sequence(adev, 215 golden_settings_gc_9_2_1, 216 ARRAY_SIZE(golden_settings_gc_9_2_1)); 217 soc15_program_register_sequence(adev, 218 golden_settings_gc_9_2_1_vg12, 219 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12)); 220 break; 221 case CHIP_RAVEN: 222 soc15_program_register_sequence(adev, 223 golden_settings_gc_9_1, 224 ARRAY_SIZE(golden_settings_gc_9_1)); 225 soc15_program_register_sequence(adev, 226 golden_settings_gc_9_1_rv1, 227 ARRAY_SIZE(golden_settings_gc_9_1_rv1)); 228 break; 229 default: 230 break; 231 } 232 233 soc15_program_register_sequence(adev, golden_settings_gc_9_x_common, 234 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common)); 235 } 236 237 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev) 238 { 239 adev->gfx.scratch.num_reg = 8; 240 adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0); 241 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; 242 } 243 244 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, 245 bool wc, uint32_t reg, uint32_t val) 246 { 247 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 248 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) | 249 WRITE_DATA_DST_SEL(0) | 250 (wc ? WR_CONFIRM : 0)); 251 amdgpu_ring_write(ring, reg); 252 amdgpu_ring_write(ring, 0); 253 amdgpu_ring_write(ring, val); 254 } 255 256 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, 257 int mem_space, int opt, uint32_t addr0, 258 uint32_t addr1, uint32_t ref, uint32_t mask, 259 uint32_t inv) 260 { 261 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 262 amdgpu_ring_write(ring, 263 /* memory (1) or register (0) */ 264 (WAIT_REG_MEM_MEM_SPACE(mem_space) | 265 WAIT_REG_MEM_OPERATION(opt) | /* wait */ 266 WAIT_REG_MEM_FUNCTION(3) | /* equal */ 267 WAIT_REG_MEM_ENGINE(eng_sel))); 268 269 if (mem_space) 270 BUG_ON(addr0 & 0x3); /* Dword align */ 271 amdgpu_ring_write(ring, addr0); 272 amdgpu_ring_write(ring, addr1); 273 amdgpu_ring_write(ring, ref); 274 amdgpu_ring_write(ring, mask); 275 amdgpu_ring_write(ring, inv); /* poll interval */ 276 } 277 278 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring) 279 { 280 struct amdgpu_device *adev = ring->adev; 281 uint32_t scratch; 282 uint32_t tmp = 0; 283 unsigned i; 284 int r; 285 286 r = amdgpu_gfx_scratch_get(adev, &scratch); 287 if (r) { 288 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r); 289 return r; 290 } 291 WREG32(scratch, 0xCAFEDEAD); 292 r = amdgpu_ring_alloc(ring, 3); 293 if (r) { 294 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 295 ring->idx, r); 296 amdgpu_gfx_scratch_free(adev, scratch); 297 return r; 298 } 299 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 300 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); 301 amdgpu_ring_write(ring, 0xDEADBEEF); 302 amdgpu_ring_commit(ring); 303 304 for (i = 0; i < adev->usec_timeout; i++) { 305 tmp = RREG32(scratch); 306 if (tmp == 0xDEADBEEF) 307 break; 308 DRM_UDELAY(1); 309 } 310 if (i < adev->usec_timeout) { 311 DRM_DEBUG("ring test on %d succeeded in %d usecs\n", 312 ring->idx, i); 313 } else { 314 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", 315 ring->idx, scratch, tmp); 316 r = -EINVAL; 317 } 318 amdgpu_gfx_scratch_free(adev, scratch); 319 return r; 320 } 321 322 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 323 { 324 struct amdgpu_device *adev = ring->adev; 325 struct amdgpu_ib ib; 326 struct dma_fence *f = NULL; 327 328 unsigned index; 329 uint64_t gpu_addr; 330 uint32_t tmp; 331 long r; 332 333 r = amdgpu_device_wb_get(adev, &index); 334 if (r) { 335 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); 336 return r; 337 } 338 339 gpu_addr = adev->wb.gpu_addr + (index * 4); 340 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); 341 memset(&ib, 0, sizeof(ib)); 342 r = amdgpu_ib_get(adev, NULL, 16, &ib); 343 if (r) { 344 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 345 goto err1; 346 } 347 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); 348 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; 349 ib.ptr[2] = lower_32_bits(gpu_addr); 350 ib.ptr[3] = upper_32_bits(gpu_addr); 351 ib.ptr[4] = 0xDEADBEEF; 352 ib.length_dw = 5; 353 354 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 355 if (r) 356 goto err2; 357 358 r = dma_fence_wait_timeout(f, false, timeout); 359 if (r == 0) { 360 DRM_ERROR("amdgpu: IB test timed out.\n"); 361 r = -ETIMEDOUT; 362 goto err2; 363 } else if (r < 0) { 364 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 365 goto err2; 366 } 367 368 tmp = adev->wb.wb[index]; 369 if (tmp == 0xDEADBEEF) { 370 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); 371 r = 0; 372 } else { 373 DRM_ERROR("ib test on ring %d failed\n", ring->idx); 374 r = -EINVAL; 375 } 376 377 err2: 378 amdgpu_ib_free(adev, &ib, NULL); 379 dma_fence_put(f); 380 err1: 381 amdgpu_device_wb_free(adev, index); 382 return r; 383 } 384 385 386 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev) 387 { 388 release_firmware(adev->gfx.pfp_fw); 389 adev->gfx.pfp_fw = NULL; 390 release_firmware(adev->gfx.me_fw); 391 adev->gfx.me_fw = NULL; 392 release_firmware(adev->gfx.ce_fw); 393 adev->gfx.ce_fw = NULL; 394 release_firmware(adev->gfx.rlc_fw); 395 adev->gfx.rlc_fw = NULL; 396 release_firmware(adev->gfx.mec_fw); 397 adev->gfx.mec_fw = NULL; 398 release_firmware(adev->gfx.mec2_fw); 399 adev->gfx.mec2_fw = NULL; 400 401 kfree(adev->gfx.rlc.register_list_format); 402 } 403 404 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) 405 { 406 const char *chip_name; 407 char fw_name[30]; 408 int err; 409 struct amdgpu_firmware_info *info = NULL; 410 const struct common_firmware_header *header = NULL; 411 const struct gfx_firmware_header_v1_0 *cp_hdr; 412 const struct rlc_firmware_header_v2_0 *rlc_hdr; 413 unsigned int *tmp = NULL; 414 unsigned int i = 0; 415 416 DRM_DEBUG("\n"); 417 418 switch (adev->asic_type) { 419 case CHIP_VEGA10: 420 chip_name = "vega10"; 421 break; 422 case CHIP_VEGA12: 423 chip_name = "vega12"; 424 break; 425 case CHIP_RAVEN: 426 chip_name = "raven"; 427 break; 428 default: 429 BUG(); 430 } 431 432 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); 433 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); 434 if (err) 435 goto out; 436 err = amdgpu_ucode_validate(adev->gfx.pfp_fw); 437 if (err) 438 goto out; 439 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; 440 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 441 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 442 443 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); 444 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); 445 if (err) 446 goto out; 447 err = amdgpu_ucode_validate(adev->gfx.me_fw); 448 if (err) 449 goto out; 450 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; 451 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 452 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 453 454 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); 455 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); 456 if (err) 457 goto out; 458 err = amdgpu_ucode_validate(adev->gfx.ce_fw); 459 if (err) 460 goto out; 461 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; 462 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 463 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 464 465 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); 466 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); 467 if (err) 468 goto out; 469 err = amdgpu_ucode_validate(adev->gfx.rlc_fw); 470 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 471 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); 472 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); 473 adev->gfx.rlc.save_and_restore_offset = 474 le32_to_cpu(rlc_hdr->save_and_restore_offset); 475 adev->gfx.rlc.clear_state_descriptor_offset = 476 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset); 477 adev->gfx.rlc.avail_scratch_ram_locations = 478 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations); 479 adev->gfx.rlc.reg_restore_list_size = 480 le32_to_cpu(rlc_hdr->reg_restore_list_size); 481 adev->gfx.rlc.reg_list_format_start = 482 le32_to_cpu(rlc_hdr->reg_list_format_start); 483 adev->gfx.rlc.reg_list_format_separate_start = 484 le32_to_cpu(rlc_hdr->reg_list_format_separate_start); 485 adev->gfx.rlc.starting_offsets_start = 486 le32_to_cpu(rlc_hdr->starting_offsets_start); 487 adev->gfx.rlc.reg_list_format_size_bytes = 488 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes); 489 adev->gfx.rlc.reg_list_size_bytes = 490 le32_to_cpu(rlc_hdr->reg_list_size_bytes); 491 adev->gfx.rlc.register_list_format = 492 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes + 493 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL); 494 if (!adev->gfx.rlc.register_list_format) { 495 err = -ENOMEM; 496 goto out; 497 } 498 499 tmp = (unsigned int *)((uintptr_t)rlc_hdr + 500 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); 501 for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++) 502 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); 503 504 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; 505 506 tmp = (unsigned int *)((uintptr_t)rlc_hdr + 507 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); 508 for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) 509 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); 510 511 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); 512 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); 513 if (err) 514 goto out; 515 err = amdgpu_ucode_validate(adev->gfx.mec_fw); 516 if (err) 517 goto out; 518 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 519 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 520 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 521 522 523 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); 524 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); 525 if (!err) { 526 err = amdgpu_ucode_validate(adev->gfx.mec2_fw); 527 if (err) 528 goto out; 529 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 530 adev->gfx.mec2_fw->data; 531 adev->gfx.mec2_fw_version = 532 le32_to_cpu(cp_hdr->header.ucode_version); 533 adev->gfx.mec2_feature_version = 534 le32_to_cpu(cp_hdr->ucode_feature_version); 535 } else { 536 err = 0; 537 adev->gfx.mec2_fw = NULL; 538 } 539 540 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 541 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP]; 542 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP; 543 info->fw = adev->gfx.pfp_fw; 544 header = (const struct common_firmware_header *)info->fw->data; 545 adev->firmware.fw_size += 546 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 547 548 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME]; 549 info->ucode_id = AMDGPU_UCODE_ID_CP_ME; 550 info->fw = adev->gfx.me_fw; 551 header = (const struct common_firmware_header *)info->fw->data; 552 adev->firmware.fw_size += 553 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 554 555 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE]; 556 info->ucode_id = AMDGPU_UCODE_ID_CP_CE; 557 info->fw = adev->gfx.ce_fw; 558 header = (const struct common_firmware_header *)info->fw->data; 559 adev->firmware.fw_size += 560 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 561 562 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; 563 info->ucode_id = AMDGPU_UCODE_ID_RLC_G; 564 info->fw = adev->gfx.rlc_fw; 565 header = (const struct common_firmware_header *)info->fw->data; 566 adev->firmware.fw_size += 567 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 568 569 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; 570 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1; 571 info->fw = adev->gfx.mec_fw; 572 header = (const struct common_firmware_header *)info->fw->data; 573 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data; 574 adev->firmware.fw_size += 575 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); 576 577 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT]; 578 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT; 579 info->fw = adev->gfx.mec_fw; 580 adev->firmware.fw_size += 581 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); 582 583 if (adev->gfx.mec2_fw) { 584 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2]; 585 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 586 info->fw = adev->gfx.mec2_fw; 587 header = (const struct common_firmware_header *)info->fw->data; 588 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data; 589 adev->firmware.fw_size += 590 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); 591 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT]; 592 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT; 593 info->fw = adev->gfx.mec2_fw; 594 adev->firmware.fw_size += 595 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); 596 } 597 598 } 599 600 out: 601 if (err) { 602 dev_err(adev->dev, 603 "gfx9: Failed to load firmware \"%s\"\n", 604 fw_name); 605 release_firmware(adev->gfx.pfp_fw); 606 adev->gfx.pfp_fw = NULL; 607 release_firmware(adev->gfx.me_fw); 608 adev->gfx.me_fw = NULL; 609 release_firmware(adev->gfx.ce_fw); 610 adev->gfx.ce_fw = NULL; 611 release_firmware(adev->gfx.rlc_fw); 612 adev->gfx.rlc_fw = NULL; 613 release_firmware(adev->gfx.mec_fw); 614 adev->gfx.mec_fw = NULL; 615 release_firmware(adev->gfx.mec2_fw); 616 adev->gfx.mec2_fw = NULL; 617 } 618 return err; 619 } 620 621 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev) 622 { 623 u32 count = 0; 624 const struct cs_section_def *sect = NULL; 625 const struct cs_extent_def *ext = NULL; 626 627 /* begin clear state */ 628 count += 2; 629 /* context control state */ 630 count += 3; 631 632 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) { 633 for (ext = sect->section; ext->extent != NULL; ++ext) { 634 if (sect->id == SECT_CONTEXT) 635 count += 2 + ext->reg_count; 636 else 637 return 0; 638 } 639 } 640 641 /* end clear state */ 642 count += 2; 643 /* clear state */ 644 count += 2; 645 646 return count; 647 } 648 649 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, 650 volatile u32 *buffer) 651 { 652 u32 count = 0, i; 653 const struct cs_section_def *sect = NULL; 654 const struct cs_extent_def *ext = NULL; 655 656 if (adev->gfx.rlc.cs_data == NULL) 657 return; 658 if (buffer == NULL) 659 return; 660 661 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 662 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 663 664 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 665 buffer[count++] = cpu_to_le32(0x80000000); 666 buffer[count++] = cpu_to_le32(0x80000000); 667 668 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { 669 for (ext = sect->section; ext->extent != NULL; ++ext) { 670 if (sect->id == SECT_CONTEXT) { 671 buffer[count++] = 672 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); 673 buffer[count++] = cpu_to_le32(ext->reg_index - 674 PACKET3_SET_CONTEXT_REG_START); 675 for (i = 0; i < ext->reg_count; i++) 676 buffer[count++] = cpu_to_le32(ext->extent[i]); 677 } else { 678 return; 679 } 680 } 681 } 682 683 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 684 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); 685 686 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); 687 buffer[count++] = cpu_to_le32(0); 688 } 689 690 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev) 691 { 692 uint32_t data; 693 694 /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */ 695 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F); 696 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7); 697 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077); 698 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16)); 699 700 /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */ 701 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000); 702 703 /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */ 704 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500); 705 706 mutex_lock(&adev->grbm_idx_mutex); 707 /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/ 708 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 709 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff); 710 711 /* set mmRLC_LB_PARAMS = 0x003F_1006 */ 712 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003); 713 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010); 714 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F); 715 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data); 716 717 /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */ 718 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7); 719 data &= 0x0000FFFF; 720 data |= 0x00C00000; 721 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data); 722 723 /* set RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF */ 724 WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, 0xFFF); 725 726 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved, 727 * but used for RLC_LB_CNTL configuration */ 728 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK; 729 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09); 730 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000); 731 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data); 732 mutex_unlock(&adev->grbm_idx_mutex); 733 } 734 735 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable) 736 { 737 WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0); 738 } 739 740 static void rv_init_cp_jump_table(struct amdgpu_device *adev) 741 { 742 const __le32 *fw_data; 743 volatile u32 *dst_ptr; 744 int me, i, max_me = 5; 745 u32 bo_offset = 0; 746 u32 table_offset, table_size; 747 748 /* write the cp table buffer */ 749 dst_ptr = adev->gfx.rlc.cp_table_ptr; 750 for (me = 0; me < max_me; me++) { 751 if (me == 0) { 752 const struct gfx_firmware_header_v1_0 *hdr = 753 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; 754 fw_data = (const __le32 *) 755 (adev->gfx.ce_fw->data + 756 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 757 table_offset = le32_to_cpu(hdr->jt_offset); 758 table_size = le32_to_cpu(hdr->jt_size); 759 } else if (me == 1) { 760 const struct gfx_firmware_header_v1_0 *hdr = 761 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; 762 fw_data = (const __le32 *) 763 (adev->gfx.pfp_fw->data + 764 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 765 table_offset = le32_to_cpu(hdr->jt_offset); 766 table_size = le32_to_cpu(hdr->jt_size); 767 } else if (me == 2) { 768 const struct gfx_firmware_header_v1_0 *hdr = 769 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; 770 fw_data = (const __le32 *) 771 (adev->gfx.me_fw->data + 772 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 773 table_offset = le32_to_cpu(hdr->jt_offset); 774 table_size = le32_to_cpu(hdr->jt_size); 775 } else if (me == 3) { 776 const struct gfx_firmware_header_v1_0 *hdr = 777 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 778 fw_data = (const __le32 *) 779 (adev->gfx.mec_fw->data + 780 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 781 table_offset = le32_to_cpu(hdr->jt_offset); 782 table_size = le32_to_cpu(hdr->jt_size); 783 } else if (me == 4) { 784 const struct gfx_firmware_header_v1_0 *hdr = 785 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; 786 fw_data = (const __le32 *) 787 (adev->gfx.mec2_fw->data + 788 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 789 table_offset = le32_to_cpu(hdr->jt_offset); 790 table_size = le32_to_cpu(hdr->jt_size); 791 } 792 793 for (i = 0; i < table_size; i ++) { 794 dst_ptr[bo_offset + i] = 795 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); 796 } 797 798 bo_offset += table_size; 799 } 800 } 801 802 static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev) 803 { 804 /* clear state block */ 805 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 806 &adev->gfx.rlc.clear_state_gpu_addr, 807 (void **)&adev->gfx.rlc.cs_ptr); 808 809 /* jump table block */ 810 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 811 &adev->gfx.rlc.cp_table_gpu_addr, 812 (void **)&adev->gfx.rlc.cp_table_ptr); 813 } 814 815 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) 816 { 817 volatile u32 *dst_ptr; 818 u32 dws; 819 const struct cs_section_def *cs_data; 820 int r; 821 822 adev->gfx.rlc.cs_data = gfx9_cs_data; 823 824 cs_data = adev->gfx.rlc.cs_data; 825 826 if (cs_data) { 827 /* clear state block */ 828 adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev); 829 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, 830 AMDGPU_GEM_DOMAIN_VRAM, 831 &adev->gfx.rlc.clear_state_obj, 832 &adev->gfx.rlc.clear_state_gpu_addr, 833 (void **)&adev->gfx.rlc.cs_ptr); 834 if (r) { 835 dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", 836 r); 837 gfx_v9_0_rlc_fini(adev); 838 return r; 839 } 840 /* set up the cs buffer */ 841 dst_ptr = adev->gfx.rlc.cs_ptr; 842 gfx_v9_0_get_csb_buffer(adev, dst_ptr); 843 amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); 844 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); 845 } 846 847 if (adev->asic_type == CHIP_RAVEN) { 848 /* TODO: double check the cp_table_size for RV */ 849 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ 850 r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, 851 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 852 &adev->gfx.rlc.cp_table_obj, 853 &adev->gfx.rlc.cp_table_gpu_addr, 854 (void **)&adev->gfx.rlc.cp_table_ptr); 855 if (r) { 856 dev_err(adev->dev, 857 "(%d) failed to create cp table bo\n", r); 858 gfx_v9_0_rlc_fini(adev); 859 return r; 860 } 861 862 rv_init_cp_jump_table(adev); 863 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); 864 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); 865 866 gfx_v9_0_init_lbpw(adev); 867 } 868 869 return 0; 870 } 871 872 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev) 873 { 874 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 875 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); 876 } 877 878 static int gfx_v9_0_mec_init(struct amdgpu_device *adev) 879 { 880 int r; 881 u32 *hpd; 882 const __le32 *fw_data; 883 unsigned fw_size; 884 u32 *fw; 885 size_t mec_hpd_size; 886 887 const struct gfx_firmware_header_v1_0 *mec_hdr; 888 889 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 890 891 /* take ownership of the relevant compute queues */ 892 amdgpu_gfx_compute_queue_acquire(adev); 893 mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; 894 895 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 896 AMDGPU_GEM_DOMAIN_GTT, 897 &adev->gfx.mec.hpd_eop_obj, 898 &adev->gfx.mec.hpd_eop_gpu_addr, 899 (void **)&hpd); 900 if (r) { 901 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 902 gfx_v9_0_mec_fini(adev); 903 return r; 904 } 905 906 memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size); 907 908 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 909 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 910 911 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 912 913 fw_data = (const __le32 *) 914 (adev->gfx.mec_fw->data + 915 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 916 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; 917 918 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, 919 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 920 &adev->gfx.mec.mec_fw_obj, 921 &adev->gfx.mec.mec_fw_gpu_addr, 922 (void **)&fw); 923 if (r) { 924 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r); 925 gfx_v9_0_mec_fini(adev); 926 return r; 927 } 928 929 memcpy(fw, fw_data, fw_size); 930 931 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 932 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 933 934 return 0; 935 } 936 937 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) 938 { 939 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX, 940 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 941 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | 942 (address << SQ_IND_INDEX__INDEX__SHIFT) | 943 (SQ_IND_INDEX__FORCE_READ_MASK)); 944 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA); 945 } 946 947 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, 948 uint32_t wave, uint32_t thread, 949 uint32_t regno, uint32_t num, uint32_t *out) 950 { 951 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX, 952 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 953 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | 954 (regno << SQ_IND_INDEX__INDEX__SHIFT) | 955 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) | 956 (SQ_IND_INDEX__FORCE_READ_MASK) | 957 (SQ_IND_INDEX__AUTO_INCR_MASK)); 958 while (num--) 959 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA); 960 } 961 962 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) 963 { 964 /* type 1 wave data */ 965 dst[(*no_fields)++] = 1; 966 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS); 967 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO); 968 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI); 969 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO); 970 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI); 971 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID); 972 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0); 973 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1); 974 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC); 975 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC); 976 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS); 977 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS); 978 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0); 979 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0); 980 } 981 982 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, 983 uint32_t wave, uint32_t start, 984 uint32_t size, uint32_t *dst) 985 { 986 wave_read_regs( 987 adev, simd, wave, 0, 988 start + SQIND_WAVE_SGPRS_OFFSET, size, dst); 989 } 990 991 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, 992 uint32_t wave, uint32_t thread, 993 uint32_t start, uint32_t size, 994 uint32_t *dst) 995 { 996 wave_read_regs( 997 adev, simd, wave, thread, 998 start + SQIND_WAVE_VGPRS_OFFSET, size, dst); 999 } 1000 1001 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { 1002 .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter, 1003 .select_se_sh = &gfx_v9_0_select_se_sh, 1004 .read_wave_data = &gfx_v9_0_read_wave_data, 1005 .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs, 1006 .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs, 1007 }; 1008 1009 static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) 1010 { 1011 u32 gb_addr_config; 1012 1013 adev->gfx.funcs = &gfx_v9_0_gfx_funcs; 1014 1015 switch (adev->asic_type) { 1016 case CHIP_VEGA10: 1017 adev->gfx.config.max_hw_contexts = 8; 1018 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1019 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1020 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1021 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1022 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN; 1023 break; 1024 case CHIP_VEGA12: 1025 adev->gfx.config.max_hw_contexts = 8; 1026 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1027 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1028 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1029 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1030 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN; 1031 DRM_INFO("fix gfx.config for vega12\n"); 1032 break; 1033 case CHIP_RAVEN: 1034 adev->gfx.config.max_hw_contexts = 8; 1035 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1036 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1037 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1038 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1039 gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN; 1040 break; 1041 default: 1042 BUG(); 1043 break; 1044 } 1045 1046 adev->gfx.config.gb_addr_config = gb_addr_config; 1047 1048 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << 1049 REG_GET_FIELD( 1050 adev->gfx.config.gb_addr_config, 1051 GB_ADDR_CONFIG, 1052 NUM_PIPES); 1053 1054 adev->gfx.config.max_tile_pipes = 1055 adev->gfx.config.gb_addr_config_fields.num_pipes; 1056 1057 adev->gfx.config.gb_addr_config_fields.num_banks = 1 << 1058 REG_GET_FIELD( 1059 adev->gfx.config.gb_addr_config, 1060 GB_ADDR_CONFIG, 1061 NUM_BANKS); 1062 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << 1063 REG_GET_FIELD( 1064 adev->gfx.config.gb_addr_config, 1065 GB_ADDR_CONFIG, 1066 MAX_COMPRESSED_FRAGS); 1067 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << 1068 REG_GET_FIELD( 1069 adev->gfx.config.gb_addr_config, 1070 GB_ADDR_CONFIG, 1071 NUM_RB_PER_SE); 1072 adev->gfx.config.gb_addr_config_fields.num_se = 1 << 1073 REG_GET_FIELD( 1074 adev->gfx.config.gb_addr_config, 1075 GB_ADDR_CONFIG, 1076 NUM_SHADER_ENGINES); 1077 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + 1078 REG_GET_FIELD( 1079 adev->gfx.config.gb_addr_config, 1080 GB_ADDR_CONFIG, 1081 PIPE_INTERLEAVE_SIZE)); 1082 } 1083 1084 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev, 1085 struct amdgpu_ngg_buf *ngg_buf, 1086 int size_se, 1087 int default_size_se) 1088 { 1089 int r; 1090 1091 if (size_se < 0) { 1092 dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se); 1093 return -EINVAL; 1094 } 1095 size_se = size_se ? size_se : default_size_se; 1096 1097 ngg_buf->size = size_se * adev->gfx.config.max_shader_engines; 1098 r = amdgpu_bo_create_kernel(adev, ngg_buf->size, 1099 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 1100 &ngg_buf->bo, 1101 &ngg_buf->gpu_addr, 1102 NULL); 1103 if (r) { 1104 dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r); 1105 return r; 1106 } 1107 ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo); 1108 1109 return r; 1110 } 1111 1112 static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev) 1113 { 1114 int i; 1115 1116 for (i = 0; i < NGG_BUF_MAX; i++) 1117 amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo, 1118 &adev->gfx.ngg.buf[i].gpu_addr, 1119 NULL); 1120 1121 memset(&adev->gfx.ngg.buf[0], 0, 1122 sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX); 1123 1124 adev->gfx.ngg.init = false; 1125 1126 return 0; 1127 } 1128 1129 static int gfx_v9_0_ngg_init(struct amdgpu_device *adev) 1130 { 1131 int r; 1132 1133 if (!amdgpu_ngg || adev->gfx.ngg.init == true) 1134 return 0; 1135 1136 /* GDS reserve memory: 64 bytes alignment */ 1137 adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40); 1138 adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size; 1139 adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size; 1140 adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE); 1141 adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE); 1142 1143 /* Primitive Buffer */ 1144 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM], 1145 amdgpu_prim_buf_per_se, 1146 64 * 1024); 1147 if (r) { 1148 dev_err(adev->dev, "Failed to create Primitive Buffer\n"); 1149 goto err; 1150 } 1151 1152 /* Position Buffer */ 1153 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS], 1154 amdgpu_pos_buf_per_se, 1155 256 * 1024); 1156 if (r) { 1157 dev_err(adev->dev, "Failed to create Position Buffer\n"); 1158 goto err; 1159 } 1160 1161 /* Control Sideband */ 1162 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL], 1163 amdgpu_cntl_sb_buf_per_se, 1164 256); 1165 if (r) { 1166 dev_err(adev->dev, "Failed to create Control Sideband Buffer\n"); 1167 goto err; 1168 } 1169 1170 /* Parameter Cache, not created by default */ 1171 if (amdgpu_param_buf_per_se <= 0) 1172 goto out; 1173 1174 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM], 1175 amdgpu_param_buf_per_se, 1176 512 * 1024); 1177 if (r) { 1178 dev_err(adev->dev, "Failed to create Parameter Cache\n"); 1179 goto err; 1180 } 1181 1182 out: 1183 adev->gfx.ngg.init = true; 1184 return 0; 1185 err: 1186 gfx_v9_0_ngg_fini(adev); 1187 return r; 1188 } 1189 1190 static int gfx_v9_0_ngg_en(struct amdgpu_device *adev) 1191 { 1192 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; 1193 int r; 1194 u32 data, base; 1195 1196 if (!amdgpu_ngg) 1197 return 0; 1198 1199 /* Program buffer size */ 1200 data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, 1201 adev->gfx.ngg.buf[NGG_PRIM].size >> 8); 1202 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, 1203 adev->gfx.ngg.buf[NGG_POS].size >> 8); 1204 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data); 1205 1206 data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, 1207 adev->gfx.ngg.buf[NGG_CNTL].size >> 8); 1208 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, 1209 adev->gfx.ngg.buf[NGG_PARAM].size >> 10); 1210 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data); 1211 1212 /* Program buffer base address */ 1213 base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr); 1214 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base); 1215 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data); 1216 1217 base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr); 1218 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base); 1219 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data); 1220 1221 base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr); 1222 data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base); 1223 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data); 1224 1225 base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr); 1226 data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base); 1227 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data); 1228 1229 base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr); 1230 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base); 1231 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data); 1232 1233 base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr); 1234 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base); 1235 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data); 1236 1237 /* Clear GDS reserved memory */ 1238 r = amdgpu_ring_alloc(ring, 17); 1239 if (r) { 1240 DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n", 1241 ring->idx, r); 1242 return r; 1243 } 1244 1245 gfx_v9_0_write_data_to_reg(ring, 0, false, 1246 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 1247 (adev->gds.mem.total_size + 1248 adev->gfx.ngg.gds_reserve_size) >> 1249 AMDGPU_GDS_SHIFT); 1250 1251 amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5)); 1252 amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC | 1253 PACKET3_DMA_DATA_DST_SEL(1) | 1254 PACKET3_DMA_DATA_SRC_SEL(2))); 1255 amdgpu_ring_write(ring, 0); 1256 amdgpu_ring_write(ring, 0); 1257 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr); 1258 amdgpu_ring_write(ring, 0); 1259 amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT | 1260 adev->gfx.ngg.gds_reserve_size); 1261 1262 gfx_v9_0_write_data_to_reg(ring, 0, false, 1263 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0); 1264 1265 amdgpu_ring_commit(ring); 1266 1267 return 0; 1268 } 1269 1270 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 1271 int mec, int pipe, int queue) 1272 { 1273 int r; 1274 unsigned irq_type; 1275 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; 1276 1277 ring = &adev->gfx.compute_ring[ring_id]; 1278 1279 /* mec0 is me1 */ 1280 ring->me = mec + 1; 1281 ring->pipe = pipe; 1282 ring->queue = queue; 1283 1284 ring->ring_obj = NULL; 1285 ring->use_doorbell = true; 1286 ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1; 1287 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr 1288 + (ring_id * GFX9_MEC_HPD_SIZE); 1289 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); 1290 1291 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 1292 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 1293 + ring->pipe; 1294 1295 /* type-2 packets are deprecated on MEC, use type-3 instead */ 1296 r = amdgpu_ring_init(adev, ring, 1024, 1297 &adev->gfx.eop_irq, irq_type); 1298 if (r) 1299 return r; 1300 1301 1302 return 0; 1303 } 1304 1305 static int gfx_v9_0_sw_init(void *handle) 1306 { 1307 int i, j, k, r, ring_id; 1308 struct amdgpu_ring *ring; 1309 struct amdgpu_kiq *kiq; 1310 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1311 1312 switch (adev->asic_type) { 1313 case CHIP_VEGA10: 1314 case CHIP_VEGA12: 1315 case CHIP_RAVEN: 1316 adev->gfx.mec.num_mec = 2; 1317 break; 1318 default: 1319 adev->gfx.mec.num_mec = 1; 1320 break; 1321 } 1322 1323 adev->gfx.mec.num_pipe_per_mec = 4; 1324 adev->gfx.mec.num_queue_per_pipe = 8; 1325 1326 /* KIQ event */ 1327 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq); 1328 if (r) 1329 return r; 1330 1331 /* EOP Event */ 1332 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq); 1333 if (r) 1334 return r; 1335 1336 /* Privileged reg */ 1337 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 184, 1338 &adev->gfx.priv_reg_irq); 1339 if (r) 1340 return r; 1341 1342 /* Privileged inst */ 1343 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 185, 1344 &adev->gfx.priv_inst_irq); 1345 if (r) 1346 return r; 1347 1348 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1349 1350 gfx_v9_0_scratch_init(adev); 1351 1352 r = gfx_v9_0_init_microcode(adev); 1353 if (r) { 1354 DRM_ERROR("Failed to load gfx firmware!\n"); 1355 return r; 1356 } 1357 1358 r = gfx_v9_0_rlc_init(adev); 1359 if (r) { 1360 DRM_ERROR("Failed to init rlc BOs!\n"); 1361 return r; 1362 } 1363 1364 r = gfx_v9_0_mec_init(adev); 1365 if (r) { 1366 DRM_ERROR("Failed to init MEC BOs!\n"); 1367 return r; 1368 } 1369 1370 /* set up the gfx ring */ 1371 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 1372 ring = &adev->gfx.gfx_ring[i]; 1373 ring->ring_obj = NULL; 1374 if (!i) 1375 sprintf(ring->name, "gfx"); 1376 else 1377 sprintf(ring->name, "gfx_%d", i); 1378 ring->use_doorbell = true; 1379 ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1; 1380 r = amdgpu_ring_init(adev, ring, 1024, 1381 &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP); 1382 if (r) 1383 return r; 1384 } 1385 1386 /* set up the compute queues - allocate horizontally across pipes */ 1387 ring_id = 0; 1388 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 1389 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 1390 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 1391 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j)) 1392 continue; 1393 1394 r = gfx_v9_0_compute_ring_init(adev, 1395 ring_id, 1396 i, k, j); 1397 if (r) 1398 return r; 1399 1400 ring_id++; 1401 } 1402 } 1403 } 1404 1405 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE); 1406 if (r) { 1407 DRM_ERROR("Failed to init KIQ BOs!\n"); 1408 return r; 1409 } 1410 1411 kiq = &adev->gfx.kiq; 1412 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); 1413 if (r) 1414 return r; 1415 1416 /* create MQD for all compute queues as wel as KIQ for SRIOV case */ 1417 r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation)); 1418 if (r) 1419 return r; 1420 1421 /* reserve GDS, GWS and OA resource for gfx */ 1422 r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size, 1423 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS, 1424 &adev->gds.gds_gfx_bo, NULL, NULL); 1425 if (r) 1426 return r; 1427 1428 r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size, 1429 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS, 1430 &adev->gds.gws_gfx_bo, NULL, NULL); 1431 if (r) 1432 return r; 1433 1434 r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size, 1435 PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA, 1436 &adev->gds.oa_gfx_bo, NULL, NULL); 1437 if (r) 1438 return r; 1439 1440 adev->gfx.ce_ram_size = 0x8000; 1441 1442 gfx_v9_0_gpu_early_init(adev); 1443 1444 r = gfx_v9_0_ngg_init(adev); 1445 if (r) 1446 return r; 1447 1448 return 0; 1449 } 1450 1451 1452 static int gfx_v9_0_sw_fini(void *handle) 1453 { 1454 int i; 1455 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1456 1457 amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL); 1458 amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL); 1459 amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL); 1460 1461 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 1462 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); 1463 for (i = 0; i < adev->gfx.num_compute_rings; i++) 1464 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1465 1466 amdgpu_gfx_compute_mqd_sw_fini(adev); 1467 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); 1468 amdgpu_gfx_kiq_fini(adev); 1469 1470 gfx_v9_0_mec_fini(adev); 1471 gfx_v9_0_ngg_fini(adev); 1472 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 1473 &adev->gfx.rlc.clear_state_gpu_addr, 1474 (void **)&adev->gfx.rlc.cs_ptr); 1475 if (adev->asic_type == CHIP_RAVEN) { 1476 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 1477 &adev->gfx.rlc.cp_table_gpu_addr, 1478 (void **)&adev->gfx.rlc.cp_table_ptr); 1479 } 1480 gfx_v9_0_free_microcode(adev); 1481 1482 return 0; 1483 } 1484 1485 1486 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev) 1487 { 1488 /* TODO */ 1489 } 1490 1491 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance) 1492 { 1493 u32 data; 1494 1495 if (instance == 0xffffffff) 1496 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); 1497 else 1498 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); 1499 1500 if (se_num == 0xffffffff) 1501 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); 1502 else 1503 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1504 1505 if (sh_num == 0xffffffff) 1506 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); 1507 else 1508 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); 1509 1510 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); 1511 } 1512 1513 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev) 1514 { 1515 u32 data, mask; 1516 1517 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE); 1518 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE); 1519 1520 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; 1521 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; 1522 1523 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se / 1524 adev->gfx.config.max_sh_per_se); 1525 1526 return (~data) & mask; 1527 } 1528 1529 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev) 1530 { 1531 int i, j; 1532 u32 data; 1533 u32 active_rbs = 0; 1534 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / 1535 adev->gfx.config.max_sh_per_se; 1536 1537 mutex_lock(&adev->grbm_idx_mutex); 1538 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1539 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1540 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); 1541 data = gfx_v9_0_get_rb_active_bitmap(adev); 1542 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * 1543 rb_bitmap_width_per_sh); 1544 } 1545 } 1546 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1547 mutex_unlock(&adev->grbm_idx_mutex); 1548 1549 adev->gfx.config.backend_enable_mask = active_rbs; 1550 adev->gfx.config.num_rbs = hweight32(active_rbs); 1551 } 1552 1553 #define DEFAULT_SH_MEM_BASES (0x6000) 1554 #define FIRST_COMPUTE_VMID (8) 1555 #define LAST_COMPUTE_VMID (16) 1556 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev) 1557 { 1558 int i; 1559 uint32_t sh_mem_config; 1560 uint32_t sh_mem_bases; 1561 1562 /* 1563 * Configure apertures: 1564 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 1565 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 1566 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 1567 */ 1568 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16); 1569 1570 sh_mem_config = SH_MEM_ADDRESS_MODE_64 | 1571 SH_MEM_ALIGNMENT_MODE_UNALIGNED << 1572 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; 1573 1574 mutex_lock(&adev->srbm_mutex); 1575 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { 1576 soc15_grbm_select(adev, 0, 0, 0, i); 1577 /* CP and shaders */ 1578 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config); 1579 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases); 1580 } 1581 soc15_grbm_select(adev, 0, 0, 0, 0); 1582 mutex_unlock(&adev->srbm_mutex); 1583 } 1584 1585 static void gfx_v9_0_gpu_init(struct amdgpu_device *adev) 1586 { 1587 u32 tmp; 1588 int i; 1589 1590 WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); 1591 1592 gfx_v9_0_tiling_mode_table_init(adev); 1593 1594 gfx_v9_0_setup_rb(adev); 1595 gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info); 1596 1597 /* XXX SH_MEM regs */ 1598 /* where to put LDS, scratch, GPUVM in FSA64 space */ 1599 mutex_lock(&adev->srbm_mutex); 1600 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) { 1601 soc15_grbm_select(adev, 0, 0, 0, i); 1602 /* CP and shaders */ 1603 if (i == 0) { 1604 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 1605 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 1606 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp); 1607 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0); 1608 } else { 1609 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 1610 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 1611 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp); 1612 tmp = adev->gmc.shared_aperture_start >> 48; 1613 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp); 1614 } 1615 } 1616 soc15_grbm_select(adev, 0, 0, 0, 0); 1617 1618 mutex_unlock(&adev->srbm_mutex); 1619 1620 gfx_v9_0_init_compute_vmid(adev); 1621 1622 mutex_lock(&adev->grbm_idx_mutex); 1623 /* 1624 * making sure that the following register writes will be broadcasted 1625 * to all the shaders 1626 */ 1627 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1628 1629 WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE, 1630 (adev->gfx.config.sc_prim_fifo_size_frontend << 1631 PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) | 1632 (adev->gfx.config.sc_prim_fifo_size_backend << 1633 PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) | 1634 (adev->gfx.config.sc_hiz_tile_fifo_size << 1635 PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | 1636 (adev->gfx.config.sc_earlyz_tile_fifo_size << 1637 PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)); 1638 mutex_unlock(&adev->grbm_idx_mutex); 1639 1640 } 1641 1642 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) 1643 { 1644 u32 i, j, k; 1645 u32 mask; 1646 1647 mutex_lock(&adev->grbm_idx_mutex); 1648 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1649 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1650 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); 1651 for (k = 0; k < adev->usec_timeout; k++) { 1652 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0) 1653 break; 1654 udelay(1); 1655 } 1656 if (k == adev->usec_timeout) { 1657 gfx_v9_0_select_se_sh(adev, 0xffffffff, 1658 0xffffffff, 0xffffffff); 1659 mutex_unlock(&adev->grbm_idx_mutex); 1660 DRM_INFO("Timeout wait for RLC serdes %u,%u\n", 1661 i, j); 1662 return; 1663 } 1664 } 1665 } 1666 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1667 mutex_unlock(&adev->grbm_idx_mutex); 1668 1669 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | 1670 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK | 1671 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK | 1672 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK; 1673 for (k = 0; k < adev->usec_timeout; k++) { 1674 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) 1675 break; 1676 udelay(1); 1677 } 1678 } 1679 1680 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, 1681 bool enable) 1682 { 1683 u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0); 1684 1685 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0); 1686 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0); 1687 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0); 1688 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0); 1689 1690 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp); 1691 } 1692 1693 static void gfx_v9_0_init_csb(struct amdgpu_device *adev) 1694 { 1695 /* csib */ 1696 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI), 1697 adev->gfx.rlc.clear_state_gpu_addr >> 32); 1698 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO), 1699 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); 1700 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH), 1701 adev->gfx.rlc.clear_state_size); 1702 } 1703 1704 static void gfx_v9_0_parse_ind_reg_list(int *register_list_format, 1705 int indirect_offset, 1706 int list_size, 1707 int *unique_indirect_regs, 1708 int *unique_indirect_reg_count, 1709 int max_indirect_reg_count, 1710 int *indirect_start_offsets, 1711 int *indirect_start_offsets_count, 1712 int max_indirect_start_offsets_count) 1713 { 1714 int idx; 1715 bool new_entry = true; 1716 1717 for (; indirect_offset < list_size; indirect_offset++) { 1718 1719 if (new_entry) { 1720 new_entry = false; 1721 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset; 1722 *indirect_start_offsets_count = *indirect_start_offsets_count + 1; 1723 BUG_ON(*indirect_start_offsets_count >= max_indirect_start_offsets_count); 1724 } 1725 1726 if (register_list_format[indirect_offset] == 0xFFFFFFFF) { 1727 new_entry = true; 1728 continue; 1729 } 1730 1731 indirect_offset += 2; 1732 1733 /* look for the matching indice */ 1734 for (idx = 0; idx < *unique_indirect_reg_count; idx++) { 1735 if (unique_indirect_regs[idx] == 1736 register_list_format[indirect_offset]) 1737 break; 1738 } 1739 1740 if (idx >= *unique_indirect_reg_count) { 1741 unique_indirect_regs[*unique_indirect_reg_count] = 1742 register_list_format[indirect_offset]; 1743 idx = *unique_indirect_reg_count; 1744 *unique_indirect_reg_count = *unique_indirect_reg_count + 1; 1745 BUG_ON(*unique_indirect_reg_count >= max_indirect_reg_count); 1746 } 1747 1748 register_list_format[indirect_offset] = idx; 1749 } 1750 } 1751 1752 static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev) 1753 { 1754 int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; 1755 int unique_indirect_reg_count = 0; 1756 1757 int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; 1758 int indirect_start_offsets_count = 0; 1759 1760 int list_size = 0; 1761 int i = 0; 1762 u32 tmp = 0; 1763 1764 u32 *register_list_format = 1765 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL); 1766 if (!register_list_format) 1767 return -ENOMEM; 1768 memcpy(register_list_format, adev->gfx.rlc.register_list_format, 1769 adev->gfx.rlc.reg_list_format_size_bytes); 1770 1771 /* setup unique_indirect_regs array and indirect_start_offsets array */ 1772 gfx_v9_0_parse_ind_reg_list(register_list_format, 1773 GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH, 1774 adev->gfx.rlc.reg_list_format_size_bytes >> 2, 1775 unique_indirect_regs, 1776 &unique_indirect_reg_count, 1777 ARRAY_SIZE(unique_indirect_regs), 1778 indirect_start_offsets, 1779 &indirect_start_offsets_count, 1780 ARRAY_SIZE(indirect_start_offsets)); 1781 1782 /* enable auto inc in case it is disabled */ 1783 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL)); 1784 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; 1785 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp); 1786 1787 /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */ 1788 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR), 1789 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET); 1790 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++) 1791 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA), 1792 adev->gfx.rlc.register_restore[i]); 1793 1794 /* load direct register */ 1795 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR), 0); 1796 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++) 1797 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA), 1798 adev->gfx.rlc.register_restore[i]); 1799 1800 /* load indirect register */ 1801 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR), 1802 adev->gfx.rlc.reg_list_format_start); 1803 for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++) 1804 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), 1805 register_list_format[i]); 1806 1807 /* set save/restore list size */ 1808 list_size = adev->gfx.rlc.reg_list_size_bytes >> 2; 1809 list_size = list_size >> 1; 1810 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR), 1811 adev->gfx.rlc.reg_restore_list_size); 1812 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size); 1813 1814 /* write the starting offsets to RLC scratch ram */ 1815 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR), 1816 adev->gfx.rlc.starting_offsets_start); 1817 for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++) 1818 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), 1819 indirect_start_offsets[i]); 1820 1821 /* load unique indirect regs*/ 1822 for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) { 1823 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i, 1824 unique_indirect_regs[i] & 0x3FFFF); 1825 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i, 1826 unique_indirect_regs[i] >> 20); 1827 } 1828 1829 kfree(register_list_format); 1830 return 0; 1831 } 1832 1833 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev) 1834 { 1835 WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1); 1836 } 1837 1838 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev, 1839 bool enable) 1840 { 1841 uint32_t data = 0; 1842 uint32_t default_data = 0; 1843 1844 default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS)); 1845 if (enable == true) { 1846 /* enable GFXIP control over CGPG */ 1847 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK; 1848 if(default_data != data) 1849 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data); 1850 1851 /* update status */ 1852 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK; 1853 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT); 1854 if(default_data != data) 1855 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data); 1856 } else { 1857 /* restore GFXIP control over GCPG */ 1858 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK; 1859 if(default_data != data) 1860 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data); 1861 } 1862 } 1863 1864 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev) 1865 { 1866 uint32_t data = 0; 1867 1868 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | 1869 AMD_PG_SUPPORT_GFX_SMG | 1870 AMD_PG_SUPPORT_GFX_DMG)) { 1871 /* init IDLE_POLL_COUNT = 60 */ 1872 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL)); 1873 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK; 1874 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 1875 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data); 1876 1877 /* init RLC PG Delay */ 1878 data = 0; 1879 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT); 1880 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT); 1881 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT); 1882 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT); 1883 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data); 1884 1885 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2)); 1886 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK; 1887 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT); 1888 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data); 1889 1890 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3)); 1891 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK; 1892 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT); 1893 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data); 1894 1895 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL)); 1896 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK; 1897 1898 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */ 1899 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT); 1900 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data); 1901 1902 pwr_10_0_gfxip_control_over_cgpg(adev, true); 1903 } 1904 } 1905 1906 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev, 1907 bool enable) 1908 { 1909 uint32_t data = 0; 1910 uint32_t default_data = 0; 1911 1912 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 1913 data = REG_SET_FIELD(data, RLC_PG_CNTL, 1914 SMU_CLK_SLOWDOWN_ON_PU_ENABLE, 1915 enable ? 1 : 0); 1916 if (default_data != data) 1917 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 1918 } 1919 1920 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev, 1921 bool enable) 1922 { 1923 uint32_t data = 0; 1924 uint32_t default_data = 0; 1925 1926 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 1927 data = REG_SET_FIELD(data, RLC_PG_CNTL, 1928 SMU_CLK_SLOWDOWN_ON_PD_ENABLE, 1929 enable ? 1 : 0); 1930 if(default_data != data) 1931 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 1932 } 1933 1934 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev, 1935 bool enable) 1936 { 1937 uint32_t data = 0; 1938 uint32_t default_data = 0; 1939 1940 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 1941 data = REG_SET_FIELD(data, RLC_PG_CNTL, 1942 CP_PG_DISABLE, 1943 enable ? 0 : 1); 1944 if(default_data != data) 1945 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 1946 } 1947 1948 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev, 1949 bool enable) 1950 { 1951 uint32_t data, default_data; 1952 1953 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 1954 data = REG_SET_FIELD(data, RLC_PG_CNTL, 1955 GFX_POWER_GATING_ENABLE, 1956 enable ? 1 : 0); 1957 if(default_data != data) 1958 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 1959 } 1960 1961 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev, 1962 bool enable) 1963 { 1964 uint32_t data, default_data; 1965 1966 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 1967 data = REG_SET_FIELD(data, RLC_PG_CNTL, 1968 GFX_PIPELINE_PG_ENABLE, 1969 enable ? 1 : 0); 1970 if(default_data != data) 1971 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 1972 1973 if (!enable) 1974 /* read any GFX register to wake up GFX */ 1975 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL)); 1976 } 1977 1978 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, 1979 bool enable) 1980 { 1981 uint32_t data, default_data; 1982 1983 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 1984 data = REG_SET_FIELD(data, RLC_PG_CNTL, 1985 STATIC_PER_CU_PG_ENABLE, 1986 enable ? 1 : 0); 1987 if(default_data != data) 1988 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 1989 } 1990 1991 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev, 1992 bool enable) 1993 { 1994 uint32_t data, default_data; 1995 1996 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 1997 data = REG_SET_FIELD(data, RLC_PG_CNTL, 1998 DYN_PER_CU_PG_ENABLE, 1999 enable ? 1 : 0); 2000 if(default_data != data) 2001 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 2002 } 2003 2004 static void gfx_v9_0_init_pg(struct amdgpu_device *adev) 2005 { 2006 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | 2007 AMD_PG_SUPPORT_GFX_SMG | 2008 AMD_PG_SUPPORT_GFX_DMG | 2009 AMD_PG_SUPPORT_CP | 2010 AMD_PG_SUPPORT_GDS | 2011 AMD_PG_SUPPORT_RLC_SMU_HS)) { 2012 gfx_v9_0_init_csb(adev); 2013 gfx_v9_0_init_rlc_save_restore_list(adev); 2014 gfx_v9_0_enable_save_restore_machine(adev); 2015 2016 if (adev->asic_type == CHIP_RAVEN) { 2017 WREG32(mmRLC_JUMP_TABLE_RESTORE, 2018 adev->gfx.rlc.cp_table_gpu_addr >> 8); 2019 gfx_v9_0_init_gfx_power_gating(adev); 2020 2021 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { 2022 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true); 2023 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true); 2024 } else { 2025 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false); 2026 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false); 2027 } 2028 2029 if (adev->pg_flags & AMD_PG_SUPPORT_CP) 2030 gfx_v9_0_enable_cp_power_gating(adev, true); 2031 else 2032 gfx_v9_0_enable_cp_power_gating(adev, false); 2033 } 2034 } 2035 } 2036 2037 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev) 2038 { 2039 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0); 2040 gfx_v9_0_enable_gui_idle_interrupt(adev, false); 2041 gfx_v9_0_wait_for_rlc_serdes(adev); 2042 } 2043 2044 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev) 2045 { 2046 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 2047 udelay(50); 2048 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); 2049 udelay(50); 2050 } 2051 2052 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev) 2053 { 2054 #ifdef AMDGPU_RLC_DEBUG_RETRY 2055 u32 rlc_ucode_ver; 2056 #endif 2057 2058 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); 2059 2060 /* carrizo do enable cp interrupt after cp inited */ 2061 if (!(adev->flags & AMD_IS_APU)) 2062 gfx_v9_0_enable_gui_idle_interrupt(adev, true); 2063 2064 udelay(50); 2065 2066 #ifdef AMDGPU_RLC_DEBUG_RETRY 2067 /* RLC_GPM_GENERAL_6 : RLC Ucode version */ 2068 rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6); 2069 if(rlc_ucode_ver == 0x108) { 2070 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n", 2071 rlc_ucode_ver, adev->gfx.rlc_fw_version); 2072 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles, 2073 * default is 0x9C4 to create a 100us interval */ 2074 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4); 2075 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr 2076 * to disable the page fault retry interrupts, default is 2077 * 0x100 (256) */ 2078 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100); 2079 } 2080 #endif 2081 } 2082 2083 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev) 2084 { 2085 const struct rlc_firmware_header_v2_0 *hdr; 2086 const __le32 *fw_data; 2087 unsigned i, fw_size; 2088 2089 if (!adev->gfx.rlc_fw) 2090 return -EINVAL; 2091 2092 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 2093 amdgpu_ucode_print_rlc_hdr(&hdr->header); 2094 2095 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2096 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2097 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 2098 2099 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, 2100 RLCG_UCODE_LOADING_START_ADDRESS); 2101 for (i = 0; i < fw_size; i++) 2102 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); 2103 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); 2104 2105 return 0; 2106 } 2107 2108 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) 2109 { 2110 int r; 2111 2112 if (amdgpu_sriov_vf(adev)) { 2113 gfx_v9_0_init_csb(adev); 2114 return 0; 2115 } 2116 2117 gfx_v9_0_rlc_stop(adev); 2118 2119 /* disable CG */ 2120 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0); 2121 2122 /* disable PG */ 2123 WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0); 2124 2125 gfx_v9_0_rlc_reset(adev); 2126 2127 gfx_v9_0_init_pg(adev); 2128 2129 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 2130 /* legacy rlc firmware loading */ 2131 r = gfx_v9_0_rlc_load_microcode(adev); 2132 if (r) 2133 return r; 2134 } 2135 2136 if (adev->asic_type == CHIP_RAVEN) { 2137 if (amdgpu_lbpw != 0) 2138 gfx_v9_0_enable_lbpw(adev, true); 2139 else 2140 gfx_v9_0_enable_lbpw(adev, false); 2141 } 2142 2143 gfx_v9_0_rlc_start(adev); 2144 2145 return 0; 2146 } 2147 2148 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) 2149 { 2150 int i; 2151 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL); 2152 2153 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); 2154 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); 2155 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); 2156 if (!enable) { 2157 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 2158 adev->gfx.gfx_ring[i].ready = false; 2159 } 2160 WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); 2161 udelay(50); 2162 } 2163 2164 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev) 2165 { 2166 const struct gfx_firmware_header_v1_0 *pfp_hdr; 2167 const struct gfx_firmware_header_v1_0 *ce_hdr; 2168 const struct gfx_firmware_header_v1_0 *me_hdr; 2169 const __le32 *fw_data; 2170 unsigned i, fw_size; 2171 2172 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw) 2173 return -EINVAL; 2174 2175 pfp_hdr = (const struct gfx_firmware_header_v1_0 *) 2176 adev->gfx.pfp_fw->data; 2177 ce_hdr = (const struct gfx_firmware_header_v1_0 *) 2178 adev->gfx.ce_fw->data; 2179 me_hdr = (const struct gfx_firmware_header_v1_0 *) 2180 adev->gfx.me_fw->data; 2181 2182 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 2183 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); 2184 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 2185 2186 gfx_v9_0_cp_gfx_enable(adev, false); 2187 2188 /* PFP */ 2189 fw_data = (const __le32 *) 2190 (adev->gfx.pfp_fw->data + 2191 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); 2192 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4; 2193 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0); 2194 for (i = 0; i < fw_size; i++) 2195 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++)); 2196 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version); 2197 2198 /* CE */ 2199 fw_data = (const __le32 *) 2200 (adev->gfx.ce_fw->data + 2201 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes)); 2202 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4; 2203 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0); 2204 for (i = 0; i < fw_size; i++) 2205 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++)); 2206 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version); 2207 2208 /* ME */ 2209 fw_data = (const __le32 *) 2210 (adev->gfx.me_fw->data + 2211 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); 2212 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4; 2213 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0); 2214 for (i = 0; i < fw_size; i++) 2215 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++)); 2216 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version); 2217 2218 return 0; 2219 } 2220 2221 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev) 2222 { 2223 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; 2224 const struct cs_section_def *sect = NULL; 2225 const struct cs_extent_def *ext = NULL; 2226 int r, i, tmp; 2227 2228 /* init the CP */ 2229 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1); 2230 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1); 2231 2232 gfx_v9_0_cp_gfx_enable(adev, true); 2233 2234 r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3); 2235 if (r) { 2236 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 2237 return r; 2238 } 2239 2240 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 2241 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 2242 2243 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 2244 amdgpu_ring_write(ring, 0x80000000); 2245 amdgpu_ring_write(ring, 0x80000000); 2246 2247 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) { 2248 for (ext = sect->section; ext->extent != NULL; ++ext) { 2249 if (sect->id == SECT_CONTEXT) { 2250 amdgpu_ring_write(ring, 2251 PACKET3(PACKET3_SET_CONTEXT_REG, 2252 ext->reg_count)); 2253 amdgpu_ring_write(ring, 2254 ext->reg_index - PACKET3_SET_CONTEXT_REG_START); 2255 for (i = 0; i < ext->reg_count; i++) 2256 amdgpu_ring_write(ring, ext->extent[i]); 2257 } 2258 } 2259 } 2260 2261 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 2262 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 2263 2264 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 2265 amdgpu_ring_write(ring, 0); 2266 2267 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); 2268 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); 2269 amdgpu_ring_write(ring, 0x8000); 2270 amdgpu_ring_write(ring, 0x8000); 2271 2272 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1)); 2273 tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE | 2274 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START)); 2275 amdgpu_ring_write(ring, tmp); 2276 amdgpu_ring_write(ring, 0); 2277 2278 amdgpu_ring_commit(ring); 2279 2280 return 0; 2281 } 2282 2283 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev) 2284 { 2285 struct amdgpu_ring *ring; 2286 u32 tmp; 2287 u32 rb_bufsz; 2288 u64 rb_addr, rptr_addr, wptr_gpu_addr; 2289 2290 /* Set the write pointer delay */ 2291 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0); 2292 2293 /* set the RB to use vmid 0 */ 2294 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0); 2295 2296 /* Set ring buffer size */ 2297 ring = &adev->gfx.gfx_ring[0]; 2298 rb_bufsz = order_base_2(ring->ring_size / 8); 2299 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz); 2300 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2); 2301 #ifdef __BIG_ENDIAN 2302 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1); 2303 #endif 2304 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp); 2305 2306 /* Initialize the ring buffer's write pointers */ 2307 ring->wptr = 0; 2308 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); 2309 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); 2310 2311 /* set the wb address wether it's enabled or not */ 2312 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); 2313 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); 2314 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 2315 2316 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 2317 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr)); 2318 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr)); 2319 2320 mdelay(1); 2321 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp); 2322 2323 rb_addr = ring->gpu_addr >> 8; 2324 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr); 2325 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr)); 2326 2327 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL); 2328 if (ring->use_doorbell) { 2329 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2330 DOORBELL_OFFSET, ring->doorbell_index); 2331 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2332 DOORBELL_EN, 1); 2333 } else { 2334 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0); 2335 } 2336 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp); 2337 2338 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, 2339 DOORBELL_RANGE_LOWER, ring->doorbell_index); 2340 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp); 2341 2342 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER, 2343 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); 2344 2345 2346 /* start the ring */ 2347 gfx_v9_0_cp_gfx_start(adev); 2348 ring->ready = true; 2349 2350 return 0; 2351 } 2352 2353 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 2354 { 2355 int i; 2356 2357 if (enable) { 2358 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0); 2359 } else { 2360 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 2361 (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); 2362 for (i = 0; i < adev->gfx.num_compute_rings; i++) 2363 adev->gfx.compute_ring[i].ready = false; 2364 adev->gfx.kiq.ring.ready = false; 2365 } 2366 udelay(50); 2367 } 2368 2369 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev) 2370 { 2371 const struct gfx_firmware_header_v1_0 *mec_hdr; 2372 const __le32 *fw_data; 2373 unsigned i; 2374 u32 tmp; 2375 2376 if (!adev->gfx.mec_fw) 2377 return -EINVAL; 2378 2379 gfx_v9_0_cp_compute_enable(adev, false); 2380 2381 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 2382 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 2383 2384 fw_data = (const __le32 *) 2385 (adev->gfx.mec_fw->data + 2386 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 2387 tmp = 0; 2388 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 2389 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2390 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp); 2391 2392 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO, 2393 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000); 2394 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI, 2395 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 2396 2397 /* MEC1 */ 2398 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, 2399 mec_hdr->jt_offset); 2400 for (i = 0; i < mec_hdr->jt_size; i++) 2401 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA, 2402 le32_to_cpup(fw_data + mec_hdr->jt_offset + i)); 2403 2404 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, 2405 adev->gfx.mec_fw_version); 2406 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */ 2407 2408 return 0; 2409 } 2410 2411 /* KIQ functions */ 2412 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring) 2413 { 2414 uint32_t tmp; 2415 struct amdgpu_device *adev = ring->adev; 2416 2417 /* tell RLC which is KIQ queue */ 2418 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS); 2419 tmp &= 0xffffff00; 2420 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 2421 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp); 2422 tmp |= 0x80; 2423 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp); 2424 } 2425 2426 static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev) 2427 { 2428 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; 2429 uint32_t scratch, tmp = 0; 2430 uint64_t queue_mask = 0; 2431 int r, i; 2432 2433 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { 2434 if (!test_bit(i, adev->gfx.mec.queue_bitmap)) 2435 continue; 2436 2437 /* This situation may be hit in the future if a new HW 2438 * generation exposes more than 64 queues. If so, the 2439 * definition of queue_mask needs updating */ 2440 if (WARN_ON(i >= (sizeof(queue_mask)*8))) { 2441 DRM_ERROR("Invalid KCQ enabled: %d\n", i); 2442 break; 2443 } 2444 2445 queue_mask |= (1ull << i); 2446 } 2447 2448 r = amdgpu_gfx_scratch_get(adev, &scratch); 2449 if (r) { 2450 DRM_ERROR("Failed to get scratch reg (%d).\n", r); 2451 return r; 2452 } 2453 WREG32(scratch, 0xCAFEDEAD); 2454 2455 r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 11); 2456 if (r) { 2457 DRM_ERROR("Failed to lock KIQ (%d).\n", r); 2458 amdgpu_gfx_scratch_free(adev, scratch); 2459 return r; 2460 } 2461 2462 /* set resources */ 2463 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 2464 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | 2465 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ 2466 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ 2467 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ 2468 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ 2469 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ 2470 amdgpu_ring_write(kiq_ring, 0); /* oac mask */ 2471 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ 2472 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 2473 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; 2474 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 2475 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 2476 2477 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 2478 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ 2479 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 2480 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 2481 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ 2482 PACKET3_MAP_QUEUES_QUEUE(ring->queue) | 2483 PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 2484 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) | 2485 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 2486 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ 2487 PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */ 2488 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 2489 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); 2490 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); 2491 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); 2492 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); 2493 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 2494 } 2495 /* write to scratch for completion */ 2496 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 2497 amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); 2498 amdgpu_ring_write(kiq_ring, 0xDEADBEEF); 2499 amdgpu_ring_commit(kiq_ring); 2500 2501 for (i = 0; i < adev->usec_timeout; i++) { 2502 tmp = RREG32(scratch); 2503 if (tmp == 0xDEADBEEF) 2504 break; 2505 DRM_UDELAY(1); 2506 } 2507 if (i >= adev->usec_timeout) { 2508 DRM_ERROR("KCQ enable failed (scratch(0x%04X)=0x%08X)\n", 2509 scratch, tmp); 2510 r = -EINVAL; 2511 } 2512 amdgpu_gfx_scratch_free(adev, scratch); 2513 2514 return r; 2515 } 2516 2517 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring) 2518 { 2519 struct amdgpu_device *adev = ring->adev; 2520 struct v9_mqd *mqd = ring->mqd_ptr; 2521 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 2522 uint32_t tmp; 2523 2524 mqd->header = 0xC0310800; 2525 mqd->compute_pipelinestat_enable = 0x00000001; 2526 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 2527 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 2528 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 2529 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 2530 mqd->compute_misc_reserved = 0x00000003; 2531 2532 mqd->dynamic_cu_mask_addr_lo = 2533 lower_32_bits(ring->mqd_gpu_addr 2534 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); 2535 mqd->dynamic_cu_mask_addr_hi = 2536 upper_32_bits(ring->mqd_gpu_addr 2537 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); 2538 2539 eop_base_addr = ring->eop_gpu_addr >> 8; 2540 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 2541 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 2542 2543 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 2544 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL); 2545 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 2546 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1)); 2547 2548 mqd->cp_hqd_eop_control = tmp; 2549 2550 /* enable doorbell? */ 2551 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL); 2552 2553 if (ring->use_doorbell) { 2554 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2555 DOORBELL_OFFSET, ring->doorbell_index); 2556 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2557 DOORBELL_EN, 1); 2558 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2559 DOORBELL_SOURCE, 0); 2560 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2561 DOORBELL_HIT, 0); 2562 } else { 2563 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2564 DOORBELL_EN, 0); 2565 } 2566 2567 mqd->cp_hqd_pq_doorbell_control = tmp; 2568 2569 /* disable the queue if it's active */ 2570 ring->wptr = 0; 2571 mqd->cp_hqd_dequeue_request = 0; 2572 mqd->cp_hqd_pq_rptr = 0; 2573 mqd->cp_hqd_pq_wptr_lo = 0; 2574 mqd->cp_hqd_pq_wptr_hi = 0; 2575 2576 /* set the pointer to the MQD */ 2577 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc; 2578 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); 2579 2580 /* set MQD vmid to 0 */ 2581 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL); 2582 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 2583 mqd->cp_mqd_control = tmp; 2584 2585 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 2586 hqd_gpu_addr = ring->gpu_addr >> 8; 2587 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 2588 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 2589 2590 /* set up the HQD, this is similar to CP_RB0_CNTL */ 2591 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL); 2592 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 2593 (order_base_2(ring->ring_size / 4) - 1)); 2594 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 2595 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); 2596 #ifdef __BIG_ENDIAN 2597 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1); 2598 #endif 2599 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); 2600 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0); 2601 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 2602 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 2603 mqd->cp_hqd_pq_control = tmp; 2604 2605 /* set the wb address whether it's enabled or not */ 2606 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); 2607 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 2608 mqd->cp_hqd_pq_rptr_report_addr_hi = 2609 upper_32_bits(wb_gpu_addr) & 0xffff; 2610 2611 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 2612 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 2613 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 2614 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 2615 2616 tmp = 0; 2617 /* enable the doorbell if requested */ 2618 if (ring->use_doorbell) { 2619 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL); 2620 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2621 DOORBELL_OFFSET, ring->doorbell_index); 2622 2623 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2624 DOORBELL_EN, 1); 2625 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2626 DOORBELL_SOURCE, 0); 2627 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2628 DOORBELL_HIT, 0); 2629 } 2630 2631 mqd->cp_hqd_pq_doorbell_control = tmp; 2632 2633 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 2634 ring->wptr = 0; 2635 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR); 2636 2637 /* set the vmid for the queue */ 2638 mqd->cp_hqd_vmid = 0; 2639 2640 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE); 2641 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); 2642 mqd->cp_hqd_persistent_state = tmp; 2643 2644 /* set MIN_IB_AVAIL_SIZE */ 2645 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL); 2646 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); 2647 mqd->cp_hqd_ib_control = tmp; 2648 2649 /* activate the queue */ 2650 mqd->cp_hqd_active = 1; 2651 2652 return 0; 2653 } 2654 2655 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring) 2656 { 2657 struct amdgpu_device *adev = ring->adev; 2658 struct v9_mqd *mqd = ring->mqd_ptr; 2659 int j; 2660 2661 /* disable wptr polling */ 2662 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); 2663 2664 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR, 2665 mqd->cp_hqd_eop_base_addr_lo); 2666 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI, 2667 mqd->cp_hqd_eop_base_addr_hi); 2668 2669 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 2670 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL, 2671 mqd->cp_hqd_eop_control); 2672 2673 /* enable doorbell? */ 2674 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 2675 mqd->cp_hqd_pq_doorbell_control); 2676 2677 /* disable the queue if it's active */ 2678 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) { 2679 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1); 2680 for (j = 0; j < adev->usec_timeout; j++) { 2681 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1)) 2682 break; 2683 udelay(1); 2684 } 2685 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 2686 mqd->cp_hqd_dequeue_request); 2687 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 2688 mqd->cp_hqd_pq_rptr); 2689 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 2690 mqd->cp_hqd_pq_wptr_lo); 2691 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 2692 mqd->cp_hqd_pq_wptr_hi); 2693 } 2694 2695 /* set the pointer to the MQD */ 2696 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, 2697 mqd->cp_mqd_base_addr_lo); 2698 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, 2699 mqd->cp_mqd_base_addr_hi); 2700 2701 /* set MQD vmid to 0 */ 2702 WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL, 2703 mqd->cp_mqd_control); 2704 2705 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 2706 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE, 2707 mqd->cp_hqd_pq_base_lo); 2708 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI, 2709 mqd->cp_hqd_pq_base_hi); 2710 2711 /* set up the HQD, this is similar to CP_RB0_CNTL */ 2712 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL, 2713 mqd->cp_hqd_pq_control); 2714 2715 /* set the wb address whether it's enabled or not */ 2716 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR, 2717 mqd->cp_hqd_pq_rptr_report_addr_lo); 2718 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI, 2719 mqd->cp_hqd_pq_rptr_report_addr_hi); 2720 2721 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 2722 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR, 2723 mqd->cp_hqd_pq_wptr_poll_addr_lo); 2724 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, 2725 mqd->cp_hqd_pq_wptr_poll_addr_hi); 2726 2727 /* enable the doorbell if requested */ 2728 if (ring->use_doorbell) { 2729 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER, 2730 (AMDGPU_DOORBELL64_KIQ *2) << 2); 2731 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER, 2732 (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2); 2733 } 2734 2735 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 2736 mqd->cp_hqd_pq_doorbell_control); 2737 2738 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 2739 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 2740 mqd->cp_hqd_pq_wptr_lo); 2741 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 2742 mqd->cp_hqd_pq_wptr_hi); 2743 2744 /* set the vmid for the queue */ 2745 WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid); 2746 2747 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 2748 mqd->cp_hqd_persistent_state); 2749 2750 /* activate the queue */ 2751 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 2752 mqd->cp_hqd_active); 2753 2754 if (ring->use_doorbell) 2755 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); 2756 2757 return 0; 2758 } 2759 2760 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) 2761 { 2762 struct amdgpu_device *adev = ring->adev; 2763 struct v9_mqd *mqd = ring->mqd_ptr; 2764 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; 2765 2766 gfx_v9_0_kiq_setting(ring); 2767 2768 if (adev->in_gpu_reset) { /* for GPU_RESET case */ 2769 /* reset MQD to a clean status */ 2770 if (adev->gfx.mec.mqd_backup[mqd_idx]) 2771 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); 2772 2773 /* reset ring buffer */ 2774 ring->wptr = 0; 2775 amdgpu_ring_clear_ring(ring); 2776 2777 mutex_lock(&adev->srbm_mutex); 2778 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 2779 gfx_v9_0_kiq_init_register(ring); 2780 soc15_grbm_select(adev, 0, 0, 0, 0); 2781 mutex_unlock(&adev->srbm_mutex); 2782 } else { 2783 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); 2784 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; 2785 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; 2786 mutex_lock(&adev->srbm_mutex); 2787 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 2788 gfx_v9_0_mqd_init(ring); 2789 gfx_v9_0_kiq_init_register(ring); 2790 soc15_grbm_select(adev, 0, 0, 0, 0); 2791 mutex_unlock(&adev->srbm_mutex); 2792 2793 if (adev->gfx.mec.mqd_backup[mqd_idx]) 2794 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); 2795 } 2796 2797 return 0; 2798 } 2799 2800 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) 2801 { 2802 struct amdgpu_device *adev = ring->adev; 2803 struct v9_mqd *mqd = ring->mqd_ptr; 2804 int mqd_idx = ring - &adev->gfx.compute_ring[0]; 2805 2806 if (!adev->in_gpu_reset && !adev->gfx.in_suspend) { 2807 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); 2808 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; 2809 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; 2810 mutex_lock(&adev->srbm_mutex); 2811 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 2812 gfx_v9_0_mqd_init(ring); 2813 soc15_grbm_select(adev, 0, 0, 0, 0); 2814 mutex_unlock(&adev->srbm_mutex); 2815 2816 if (adev->gfx.mec.mqd_backup[mqd_idx]) 2817 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); 2818 } else if (adev->in_gpu_reset) { /* for GPU_RESET case */ 2819 /* reset MQD to a clean status */ 2820 if (adev->gfx.mec.mqd_backup[mqd_idx]) 2821 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); 2822 2823 /* reset ring buffer */ 2824 ring->wptr = 0; 2825 amdgpu_ring_clear_ring(ring); 2826 } else { 2827 amdgpu_ring_clear_ring(ring); 2828 } 2829 2830 return 0; 2831 } 2832 2833 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev) 2834 { 2835 struct amdgpu_ring *ring = NULL; 2836 int r = 0, i; 2837 2838 gfx_v9_0_cp_compute_enable(adev, true); 2839 2840 ring = &adev->gfx.kiq.ring; 2841 2842 r = amdgpu_bo_reserve(ring->mqd_obj, false); 2843 if (unlikely(r != 0)) 2844 goto done; 2845 2846 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 2847 if (!r) { 2848 r = gfx_v9_0_kiq_init_queue(ring); 2849 amdgpu_bo_kunmap(ring->mqd_obj); 2850 ring->mqd_ptr = NULL; 2851 } 2852 amdgpu_bo_unreserve(ring->mqd_obj); 2853 if (r) 2854 goto done; 2855 2856 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 2857 ring = &adev->gfx.compute_ring[i]; 2858 2859 r = amdgpu_bo_reserve(ring->mqd_obj, false); 2860 if (unlikely(r != 0)) 2861 goto done; 2862 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 2863 if (!r) { 2864 r = gfx_v9_0_kcq_init_queue(ring); 2865 amdgpu_bo_kunmap(ring->mqd_obj); 2866 ring->mqd_ptr = NULL; 2867 } 2868 amdgpu_bo_unreserve(ring->mqd_obj); 2869 if (r) 2870 goto done; 2871 } 2872 2873 r = gfx_v9_0_kiq_kcq_enable(adev); 2874 done: 2875 return r; 2876 } 2877 2878 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) 2879 { 2880 int r, i; 2881 struct amdgpu_ring *ring; 2882 2883 if (!(adev->flags & AMD_IS_APU)) 2884 gfx_v9_0_enable_gui_idle_interrupt(adev, false); 2885 2886 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 2887 /* legacy firmware loading */ 2888 r = gfx_v9_0_cp_gfx_load_microcode(adev); 2889 if (r) 2890 return r; 2891 2892 r = gfx_v9_0_cp_compute_load_microcode(adev); 2893 if (r) 2894 return r; 2895 } 2896 2897 r = gfx_v9_0_cp_gfx_resume(adev); 2898 if (r) 2899 return r; 2900 2901 r = gfx_v9_0_kiq_resume(adev); 2902 if (r) 2903 return r; 2904 2905 ring = &adev->gfx.gfx_ring[0]; 2906 r = amdgpu_ring_test_ring(ring); 2907 if (r) { 2908 ring->ready = false; 2909 return r; 2910 } 2911 2912 ring = &adev->gfx.kiq.ring; 2913 ring->ready = true; 2914 r = amdgpu_ring_test_ring(ring); 2915 if (r) 2916 ring->ready = false; 2917 2918 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 2919 ring = &adev->gfx.compute_ring[i]; 2920 2921 ring->ready = true; 2922 r = amdgpu_ring_test_ring(ring); 2923 if (r) 2924 ring->ready = false; 2925 } 2926 2927 gfx_v9_0_enable_gui_idle_interrupt(adev, true); 2928 2929 return 0; 2930 } 2931 2932 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable) 2933 { 2934 gfx_v9_0_cp_gfx_enable(adev, enable); 2935 gfx_v9_0_cp_compute_enable(adev, enable); 2936 } 2937 2938 static int gfx_v9_0_hw_init(void *handle) 2939 { 2940 int r; 2941 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2942 2943 gfx_v9_0_init_golden_registers(adev); 2944 2945 gfx_v9_0_gpu_init(adev); 2946 2947 r = gfx_v9_0_rlc_resume(adev); 2948 if (r) 2949 return r; 2950 2951 r = gfx_v9_0_cp_resume(adev); 2952 if (r) 2953 return r; 2954 2955 r = gfx_v9_0_ngg_en(adev); 2956 if (r) 2957 return r; 2958 2959 return r; 2960 } 2961 2962 static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring) 2963 { 2964 struct amdgpu_device *adev = kiq_ring->adev; 2965 uint32_t scratch, tmp = 0; 2966 int r, i; 2967 2968 r = amdgpu_gfx_scratch_get(adev, &scratch); 2969 if (r) { 2970 DRM_ERROR("Failed to get scratch reg (%d).\n", r); 2971 return r; 2972 } 2973 WREG32(scratch, 0xCAFEDEAD); 2974 2975 r = amdgpu_ring_alloc(kiq_ring, 10); 2976 if (r) { 2977 DRM_ERROR("Failed to lock KIQ (%d).\n", r); 2978 amdgpu_gfx_scratch_free(adev, scratch); 2979 return r; 2980 } 2981 2982 /* unmap queues */ 2983 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); 2984 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 2985 PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */ 2986 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | 2987 PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) | 2988 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); 2989 amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 2990 amdgpu_ring_write(kiq_ring, 0); 2991 amdgpu_ring_write(kiq_ring, 0); 2992 amdgpu_ring_write(kiq_ring, 0); 2993 /* write to scratch for completion */ 2994 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 2995 amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); 2996 amdgpu_ring_write(kiq_ring, 0xDEADBEEF); 2997 amdgpu_ring_commit(kiq_ring); 2998 2999 for (i = 0; i < adev->usec_timeout; i++) { 3000 tmp = RREG32(scratch); 3001 if (tmp == 0xDEADBEEF) 3002 break; 3003 DRM_UDELAY(1); 3004 } 3005 if (i >= adev->usec_timeout) { 3006 DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); 3007 r = -EINVAL; 3008 } 3009 amdgpu_gfx_scratch_free(adev, scratch); 3010 return r; 3011 } 3012 3013 3014 static int gfx_v9_0_hw_fini(void *handle) 3015 { 3016 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3017 int i; 3018 3019 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 3020 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 3021 3022 /* disable KCQ to avoid CPC touch memory not valid anymore */ 3023 for (i = 0; i < adev->gfx.num_compute_rings; i++) 3024 gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]); 3025 3026 if (amdgpu_sriov_vf(adev)) { 3027 gfx_v9_0_cp_gfx_enable(adev, false); 3028 /* must disable polling for SRIOV when hw finished, otherwise 3029 * CPC engine may still keep fetching WB address which is already 3030 * invalid after sw finished and trigger DMAR reading error in 3031 * hypervisor side. 3032 */ 3033 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); 3034 return 0; 3035 } 3036 gfx_v9_0_cp_enable(adev, false); 3037 gfx_v9_0_rlc_stop(adev); 3038 3039 return 0; 3040 } 3041 3042 static int gfx_v9_0_suspend(void *handle) 3043 { 3044 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3045 3046 adev->gfx.in_suspend = true; 3047 return gfx_v9_0_hw_fini(adev); 3048 } 3049 3050 static int gfx_v9_0_resume(void *handle) 3051 { 3052 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3053 int r; 3054 3055 r = gfx_v9_0_hw_init(adev); 3056 adev->gfx.in_suspend = false; 3057 return r; 3058 } 3059 3060 static bool gfx_v9_0_is_idle(void *handle) 3061 { 3062 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3063 3064 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS), 3065 GRBM_STATUS, GUI_ACTIVE)) 3066 return false; 3067 else 3068 return true; 3069 } 3070 3071 static int gfx_v9_0_wait_for_idle(void *handle) 3072 { 3073 unsigned i; 3074 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3075 3076 for (i = 0; i < adev->usec_timeout; i++) { 3077 if (gfx_v9_0_is_idle(handle)) 3078 return 0; 3079 udelay(1); 3080 } 3081 return -ETIMEDOUT; 3082 } 3083 3084 static int gfx_v9_0_soft_reset(void *handle) 3085 { 3086 u32 grbm_soft_reset = 0; 3087 u32 tmp; 3088 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3089 3090 /* GRBM_STATUS */ 3091 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS); 3092 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | 3093 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | 3094 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | 3095 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | 3096 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | 3097 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) { 3098 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 3099 GRBM_SOFT_RESET, SOFT_RESET_CP, 1); 3100 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 3101 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); 3102 } 3103 3104 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { 3105 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 3106 GRBM_SOFT_RESET, SOFT_RESET_CP, 1); 3107 } 3108 3109 /* GRBM_STATUS2 */ 3110 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2); 3111 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY)) 3112 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 3113 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 3114 3115 3116 if (grbm_soft_reset) { 3117 /* stop the rlc */ 3118 gfx_v9_0_rlc_stop(adev); 3119 3120 /* Disable GFX parsing/prefetching */ 3121 gfx_v9_0_cp_gfx_enable(adev, false); 3122 3123 /* Disable MEC parsing/prefetching */ 3124 gfx_v9_0_cp_compute_enable(adev, false); 3125 3126 if (grbm_soft_reset) { 3127 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); 3128 tmp |= grbm_soft_reset; 3129 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); 3130 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); 3131 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); 3132 3133 udelay(50); 3134 3135 tmp &= ~grbm_soft_reset; 3136 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); 3137 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); 3138 } 3139 3140 /* Wait a little for things to settle down */ 3141 udelay(50); 3142 } 3143 return 0; 3144 } 3145 3146 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) 3147 { 3148 uint64_t clock; 3149 3150 mutex_lock(&adev->gfx.gpu_clock_mutex); 3151 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); 3152 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | 3153 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); 3154 mutex_unlock(&adev->gfx.gpu_clock_mutex); 3155 return clock; 3156 } 3157 3158 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring, 3159 uint32_t vmid, 3160 uint32_t gds_base, uint32_t gds_size, 3161 uint32_t gws_base, uint32_t gws_size, 3162 uint32_t oa_base, uint32_t oa_size) 3163 { 3164 struct amdgpu_device *adev = ring->adev; 3165 3166 gds_base = gds_base >> AMDGPU_GDS_SHIFT; 3167 gds_size = gds_size >> AMDGPU_GDS_SHIFT; 3168 3169 gws_base = gws_base >> AMDGPU_GWS_SHIFT; 3170 gws_size = gws_size >> AMDGPU_GWS_SHIFT; 3171 3172 oa_base = oa_base >> AMDGPU_OA_SHIFT; 3173 oa_size = oa_size >> AMDGPU_OA_SHIFT; 3174 3175 /* GDS Base */ 3176 gfx_v9_0_write_data_to_reg(ring, 0, false, 3177 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid, 3178 gds_base); 3179 3180 /* GDS Size */ 3181 gfx_v9_0_write_data_to_reg(ring, 0, false, 3182 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid, 3183 gds_size); 3184 3185 /* GWS */ 3186 gfx_v9_0_write_data_to_reg(ring, 0, false, 3187 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid, 3188 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); 3189 3190 /* OA */ 3191 gfx_v9_0_write_data_to_reg(ring, 0, false, 3192 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid, 3193 (1 << (oa_size + oa_base)) - (1 << oa_base)); 3194 } 3195 3196 static int gfx_v9_0_early_init(void *handle) 3197 { 3198 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3199 3200 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS; 3201 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; 3202 gfx_v9_0_set_ring_funcs(adev); 3203 gfx_v9_0_set_irq_funcs(adev); 3204 gfx_v9_0_set_gds_init(adev); 3205 gfx_v9_0_set_rlc_funcs(adev); 3206 3207 return 0; 3208 } 3209 3210 static int gfx_v9_0_late_init(void *handle) 3211 { 3212 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3213 int r; 3214 3215 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 3216 if (r) 3217 return r; 3218 3219 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 3220 if (r) 3221 return r; 3222 3223 return 0; 3224 } 3225 3226 static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev) 3227 { 3228 uint32_t rlc_setting, data; 3229 unsigned i; 3230 3231 if (adev->gfx.rlc.in_safe_mode) 3232 return; 3233 3234 /* if RLC is not enabled, do nothing */ 3235 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL); 3236 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) 3237 return; 3238 3239 if (adev->cg_flags & 3240 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG | 3241 AMD_CG_SUPPORT_GFX_3D_CGCG)) { 3242 data = RLC_SAFE_MODE__CMD_MASK; 3243 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 3244 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); 3245 3246 /* wait for RLC_SAFE_MODE */ 3247 for (i = 0; i < adev->usec_timeout; i++) { 3248 if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) 3249 break; 3250 udelay(1); 3251 } 3252 adev->gfx.rlc.in_safe_mode = true; 3253 } 3254 } 3255 3256 static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev) 3257 { 3258 uint32_t rlc_setting, data; 3259 3260 if (!adev->gfx.rlc.in_safe_mode) 3261 return; 3262 3263 /* if RLC is not enabled, do nothing */ 3264 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL); 3265 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) 3266 return; 3267 3268 if (adev->cg_flags & 3269 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) { 3270 /* 3271 * Try to exit safe mode only if it is already in safe 3272 * mode. 3273 */ 3274 data = RLC_SAFE_MODE__CMD_MASK; 3275 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); 3276 adev->gfx.rlc.in_safe_mode = false; 3277 } 3278 } 3279 3280 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, 3281 bool enable) 3282 { 3283 /* TODO: double check if we need to perform under safe mdoe */ 3284 /* gfx_v9_0_enter_rlc_safe_mode(adev); */ 3285 3286 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { 3287 gfx_v9_0_enable_gfx_cg_power_gating(adev, true); 3288 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE) 3289 gfx_v9_0_enable_gfx_pipeline_powergating(adev, true); 3290 } else { 3291 gfx_v9_0_enable_gfx_cg_power_gating(adev, false); 3292 gfx_v9_0_enable_gfx_pipeline_powergating(adev, false); 3293 } 3294 3295 /* gfx_v9_0_exit_rlc_safe_mode(adev); */ 3296 } 3297 3298 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev, 3299 bool enable) 3300 { 3301 /* TODO: double check if we need to perform under safe mode */ 3302 /* gfx_v9_0_enter_rlc_safe_mode(adev); */ 3303 3304 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) 3305 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true); 3306 else 3307 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false); 3308 3309 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable) 3310 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true); 3311 else 3312 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false); 3313 3314 /* gfx_v9_0_exit_rlc_safe_mode(adev); */ 3315 } 3316 3317 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 3318 bool enable) 3319 { 3320 uint32_t data, def; 3321 3322 /* It is disabled by HW by default */ 3323 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { 3324 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 3325 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3326 data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK | 3327 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 3328 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | 3329 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); 3330 3331 /* only for Vega10 & Raven1 */ 3332 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK; 3333 3334 if (def != data) 3335 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); 3336 3337 /* MGLS is a global flag to control all MGLS in GFX */ 3338 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { 3339 /* 2 - RLC memory Light sleep */ 3340 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { 3341 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); 3342 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; 3343 if (def != data) 3344 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data); 3345 } 3346 /* 3 - CP memory Light sleep */ 3347 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { 3348 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); 3349 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 3350 if (def != data) 3351 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); 3352 } 3353 } 3354 } else { 3355 /* 1 - MGCG_OVERRIDE */ 3356 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3357 data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK | 3358 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 3359 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 3360 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | 3361 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); 3362 if (def != data) 3363 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); 3364 3365 /* 2 - disable MGLS in RLC */ 3366 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); 3367 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { 3368 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; 3369 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data); 3370 } 3371 3372 /* 3 - disable MGLS in CP */ 3373 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); 3374 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { 3375 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 3376 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); 3377 } 3378 } 3379 } 3380 3381 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, 3382 bool enable) 3383 { 3384 uint32_t data, def; 3385 3386 adev->gfx.rlc.funcs->enter_safe_mode(adev); 3387 3388 /* Enable 3D CGCG/CGLS */ 3389 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) { 3390 /* write cmd to clear cgcg/cgls ov */ 3391 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3392 /* unset CGCG override */ 3393 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK; 3394 /* update CGCG and CGLS override bits */ 3395 if (def != data) 3396 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); 3397 /* enable 3Dcgcg FSM(0x0020003f) */ 3398 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); 3399 data = (0x2000 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 3400 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 3401 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 3402 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 3403 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 3404 if (def != data) 3405 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data); 3406 3407 /* set IDLE_POLL_COUNT(0x00900100) */ 3408 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL); 3409 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 3410 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 3411 if (def != data) 3412 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data); 3413 } else { 3414 /* Disable CGCG/CGLS */ 3415 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); 3416 /* disable cgcg, cgls should be disabled */ 3417 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK | 3418 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK); 3419 /* disable cgcg and cgls in FSM */ 3420 if (def != data) 3421 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data); 3422 } 3423 3424 adev->gfx.rlc.funcs->exit_safe_mode(adev); 3425 } 3426 3427 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, 3428 bool enable) 3429 { 3430 uint32_t def, data; 3431 3432 adev->gfx.rlc.funcs->enter_safe_mode(adev); 3433 3434 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { 3435 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3436 /* unset CGCG override */ 3437 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; 3438 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 3439 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 3440 else 3441 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 3442 /* update CGCG and CGLS override bits */ 3443 if (def != data) 3444 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); 3445 3446 /* enable cgcg FSM(0x0020003F) */ 3447 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); 3448 data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 3449 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 3450 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 3451 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 3452 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 3453 if (def != data) 3454 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data); 3455 3456 /* set IDLE_POLL_COUNT(0x00900100) */ 3457 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL); 3458 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 3459 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 3460 if (def != data) 3461 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data); 3462 } else { 3463 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); 3464 /* reset CGCG/CGLS bits */ 3465 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); 3466 /* disable cgcg and cgls in FSM */ 3467 if (def != data) 3468 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data); 3469 } 3470 3471 adev->gfx.rlc.funcs->exit_safe_mode(adev); 3472 } 3473 3474 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, 3475 bool enable) 3476 { 3477 if (enable) { 3478 /* CGCG/CGLS should be enabled after MGCG/MGLS 3479 * === MGCG + MGLS === 3480 */ 3481 gfx_v9_0_update_medium_grain_clock_gating(adev, enable); 3482 /* === CGCG /CGLS for GFX 3D Only === */ 3483 gfx_v9_0_update_3d_clock_gating(adev, enable); 3484 /* === CGCG + CGLS === */ 3485 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable); 3486 } else { 3487 /* CGCG/CGLS should be disabled before MGCG/MGLS 3488 * === CGCG + CGLS === 3489 */ 3490 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable); 3491 /* === CGCG /CGLS for GFX 3D Only === */ 3492 gfx_v9_0_update_3d_clock_gating(adev, enable); 3493 /* === MGCG + MGLS === */ 3494 gfx_v9_0_update_medium_grain_clock_gating(adev, enable); 3495 } 3496 return 0; 3497 } 3498 3499 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { 3500 .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode, 3501 .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode 3502 }; 3503 3504 static int gfx_v9_0_set_powergating_state(void *handle, 3505 enum amd_powergating_state state) 3506 { 3507 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3508 bool enable = (state == AMD_PG_STATE_GATE) ? true : false; 3509 3510 switch (adev->asic_type) { 3511 case CHIP_RAVEN: 3512 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { 3513 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true); 3514 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true); 3515 } else { 3516 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false); 3517 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false); 3518 } 3519 3520 if (adev->pg_flags & AMD_PG_SUPPORT_CP) 3521 gfx_v9_0_enable_cp_power_gating(adev, true); 3522 else 3523 gfx_v9_0_enable_cp_power_gating(adev, false); 3524 3525 /* update gfx cgpg state */ 3526 gfx_v9_0_update_gfx_cg_power_gating(adev, enable); 3527 3528 /* update mgcg state */ 3529 gfx_v9_0_update_gfx_mg_power_gating(adev, enable); 3530 break; 3531 default: 3532 break; 3533 } 3534 3535 return 0; 3536 } 3537 3538 static int gfx_v9_0_set_clockgating_state(void *handle, 3539 enum amd_clockgating_state state) 3540 { 3541 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3542 3543 if (amdgpu_sriov_vf(adev)) 3544 return 0; 3545 3546 switch (adev->asic_type) { 3547 case CHIP_VEGA10: 3548 case CHIP_VEGA12: 3549 case CHIP_RAVEN: 3550 gfx_v9_0_update_gfx_clock_gating(adev, 3551 state == AMD_CG_STATE_GATE ? true : false); 3552 break; 3553 default: 3554 break; 3555 } 3556 return 0; 3557 } 3558 3559 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags) 3560 { 3561 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3562 int data; 3563 3564 if (amdgpu_sriov_vf(adev)) 3565 *flags = 0; 3566 3567 /* AMD_CG_SUPPORT_GFX_MGCG */ 3568 data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3569 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) 3570 *flags |= AMD_CG_SUPPORT_GFX_MGCG; 3571 3572 /* AMD_CG_SUPPORT_GFX_CGCG */ 3573 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); 3574 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) 3575 *flags |= AMD_CG_SUPPORT_GFX_CGCG; 3576 3577 /* AMD_CG_SUPPORT_GFX_CGLS */ 3578 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) 3579 *flags |= AMD_CG_SUPPORT_GFX_CGLS; 3580 3581 /* AMD_CG_SUPPORT_GFX_RLC_LS */ 3582 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); 3583 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) 3584 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; 3585 3586 /* AMD_CG_SUPPORT_GFX_CP_LS */ 3587 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); 3588 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) 3589 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; 3590 3591 /* AMD_CG_SUPPORT_GFX_3D_CGCG */ 3592 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); 3593 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) 3594 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; 3595 3596 /* AMD_CG_SUPPORT_GFX_3D_CGLS */ 3597 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK) 3598 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS; 3599 } 3600 3601 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) 3602 { 3603 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/ 3604 } 3605 3606 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) 3607 { 3608 struct amdgpu_device *adev = ring->adev; 3609 u64 wptr; 3610 3611 /* XXX check if swapping is necessary on BE */ 3612 if (ring->use_doorbell) { 3613 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]); 3614 } else { 3615 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR); 3616 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32; 3617 } 3618 3619 return wptr; 3620 } 3621 3622 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) 3623 { 3624 struct amdgpu_device *adev = ring->adev; 3625 3626 if (ring->use_doorbell) { 3627 /* XXX check if swapping is necessary on BE */ 3628 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr); 3629 WDOORBELL64(ring->doorbell_index, ring->wptr); 3630 } else { 3631 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); 3632 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); 3633 } 3634 } 3635 3636 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 3637 { 3638 struct amdgpu_device *adev = ring->adev; 3639 u32 ref_and_mask, reg_mem_engine; 3640 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; 3641 3642 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 3643 switch (ring->me) { 3644 case 1: 3645 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; 3646 break; 3647 case 2: 3648 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; 3649 break; 3650 default: 3651 return; 3652 } 3653 reg_mem_engine = 0; 3654 } else { 3655 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; 3656 reg_mem_engine = 1; /* pfp */ 3657 } 3658 3659 gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, 3660 adev->nbio_funcs->get_hdp_flush_req_offset(adev), 3661 adev->nbio_funcs->get_hdp_flush_done_offset(adev), 3662 ref_and_mask, ref_and_mask, 0x20); 3663 } 3664 3665 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 3666 struct amdgpu_ib *ib, 3667 unsigned vmid, bool ctx_switch) 3668 { 3669 u32 header, control = 0; 3670 3671 if (ib->flags & AMDGPU_IB_FLAG_CE) 3672 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2); 3673 else 3674 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 3675 3676 control |= ib->length_dw | (vmid << 24); 3677 3678 if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { 3679 control |= INDIRECT_BUFFER_PRE_ENB(1); 3680 3681 if (!(ib->flags & AMDGPU_IB_FLAG_CE)) 3682 gfx_v9_0_ring_emit_de_meta(ring); 3683 } 3684 3685 amdgpu_ring_write(ring, header); 3686 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 3687 amdgpu_ring_write(ring, 3688 #ifdef __BIG_ENDIAN 3689 (2 << 0) | 3690 #endif 3691 lower_32_bits(ib->gpu_addr)); 3692 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 3693 amdgpu_ring_write(ring, control); 3694 } 3695 3696 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 3697 struct amdgpu_ib *ib, 3698 unsigned vmid, bool ctx_switch) 3699 { 3700 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 3701 3702 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 3703 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 3704 amdgpu_ring_write(ring, 3705 #ifdef __BIG_ENDIAN 3706 (2 << 0) | 3707 #endif 3708 lower_32_bits(ib->gpu_addr)); 3709 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 3710 amdgpu_ring_write(ring, control); 3711 } 3712 3713 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 3714 u64 seq, unsigned flags) 3715 { 3716 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 3717 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 3718 3719 /* RELEASE_MEM - flush caches, send int */ 3720 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 3721 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | 3722 EOP_TC_ACTION_EN | 3723 EOP_TC_WB_ACTION_EN | 3724 EOP_TC_MD_ACTION_EN | 3725 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 3726 EVENT_INDEX(5))); 3727 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 3728 3729 /* 3730 * the address should be Qword aligned if 64bit write, Dword 3731 * aligned if only send 32bit data low (discard data high) 3732 */ 3733 if (write64bit) 3734 BUG_ON(addr & 0x7); 3735 else 3736 BUG_ON(addr & 0x3); 3737 amdgpu_ring_write(ring, lower_32_bits(addr)); 3738 amdgpu_ring_write(ring, upper_32_bits(addr)); 3739 amdgpu_ring_write(ring, lower_32_bits(seq)); 3740 amdgpu_ring_write(ring, upper_32_bits(seq)); 3741 amdgpu_ring_write(ring, 0); 3742 } 3743 3744 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 3745 { 3746 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 3747 uint32_t seq = ring->fence_drv.sync_seq; 3748 uint64_t addr = ring->fence_drv.gpu_addr; 3749 3750 gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0, 3751 lower_32_bits(addr), upper_32_bits(addr), 3752 seq, 0xffffffff, 4); 3753 } 3754 3755 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 3756 unsigned vmid, uint64_t pd_addr) 3757 { 3758 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 3759 3760 /* compute doesn't have PFP */ 3761 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) { 3762 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 3763 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 3764 amdgpu_ring_write(ring, 0x0); 3765 } 3766 } 3767 3768 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring) 3769 { 3770 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */ 3771 } 3772 3773 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring) 3774 { 3775 u64 wptr; 3776 3777 /* XXX check if swapping is necessary on BE */ 3778 if (ring->use_doorbell) 3779 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]); 3780 else 3781 BUG(); 3782 return wptr; 3783 } 3784 3785 static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring, 3786 bool acquire) 3787 { 3788 struct amdgpu_device *adev = ring->adev; 3789 int pipe_num, tmp, reg; 3790 int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1; 3791 3792 pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe; 3793 3794 /* first me only has 2 entries, GFX and HP3D */ 3795 if (ring->me > 0) 3796 pipe_num -= 2; 3797 3798 reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num; 3799 tmp = RREG32(reg); 3800 tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent); 3801 WREG32(reg, tmp); 3802 } 3803 3804 static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev, 3805 struct amdgpu_ring *ring, 3806 bool acquire) 3807 { 3808 int i, pipe; 3809 bool reserve; 3810 struct amdgpu_ring *iring; 3811 3812 mutex_lock(&adev->gfx.pipe_reserve_mutex); 3813 pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0); 3814 if (acquire) 3815 set_bit(pipe, adev->gfx.pipe_reserve_bitmap); 3816 else 3817 clear_bit(pipe, adev->gfx.pipe_reserve_bitmap); 3818 3819 if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) { 3820 /* Clear all reservations - everyone reacquires all resources */ 3821 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) 3822 gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i], 3823 true); 3824 3825 for (i = 0; i < adev->gfx.num_compute_rings; ++i) 3826 gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i], 3827 true); 3828 } else { 3829 /* Lower all pipes without a current reservation */ 3830 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { 3831 iring = &adev->gfx.gfx_ring[i]; 3832 pipe = amdgpu_gfx_queue_to_bit(adev, 3833 iring->me, 3834 iring->pipe, 3835 0); 3836 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); 3837 gfx_v9_0_ring_set_pipe_percent(iring, reserve); 3838 } 3839 3840 for (i = 0; i < adev->gfx.num_compute_rings; ++i) { 3841 iring = &adev->gfx.compute_ring[i]; 3842 pipe = amdgpu_gfx_queue_to_bit(adev, 3843 iring->me, 3844 iring->pipe, 3845 0); 3846 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); 3847 gfx_v9_0_ring_set_pipe_percent(iring, reserve); 3848 } 3849 } 3850 3851 mutex_unlock(&adev->gfx.pipe_reserve_mutex); 3852 } 3853 3854 static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev, 3855 struct amdgpu_ring *ring, 3856 bool acquire) 3857 { 3858 uint32_t pipe_priority = acquire ? 0x2 : 0x0; 3859 uint32_t queue_priority = acquire ? 0xf : 0x0; 3860 3861 mutex_lock(&adev->srbm_mutex); 3862 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3863 3864 WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority); 3865 WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority); 3866 3867 soc15_grbm_select(adev, 0, 0, 0, 0); 3868 mutex_unlock(&adev->srbm_mutex); 3869 } 3870 3871 static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring, 3872 enum drm_sched_priority priority) 3873 { 3874 struct amdgpu_device *adev = ring->adev; 3875 bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW; 3876 3877 if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) 3878 return; 3879 3880 gfx_v9_0_hqd_set_priority(adev, ring, acquire); 3881 gfx_v9_0_pipe_reserve_resources(adev, ring, acquire); 3882 } 3883 3884 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring) 3885 { 3886 struct amdgpu_device *adev = ring->adev; 3887 3888 /* XXX check if swapping is necessary on BE */ 3889 if (ring->use_doorbell) { 3890 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr); 3891 WDOORBELL64(ring->doorbell_index, ring->wptr); 3892 } else{ 3893 BUG(); /* only DOORBELL method supported on gfx9 now */ 3894 } 3895 } 3896 3897 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, 3898 u64 seq, unsigned int flags) 3899 { 3900 struct amdgpu_device *adev = ring->adev; 3901 3902 /* we only allocate 32bit for each seq wb address */ 3903 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 3904 3905 /* write fence seq to the "addr" */ 3906 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3907 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 3908 WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); 3909 amdgpu_ring_write(ring, lower_32_bits(addr)); 3910 amdgpu_ring_write(ring, upper_32_bits(addr)); 3911 amdgpu_ring_write(ring, lower_32_bits(seq)); 3912 3913 if (flags & AMDGPU_FENCE_FLAG_INT) { 3914 /* set register to trigger INT */ 3915 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3916 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 3917 WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); 3918 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS)); 3919 amdgpu_ring_write(ring, 0); 3920 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ 3921 } 3922 } 3923 3924 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring) 3925 { 3926 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 3927 amdgpu_ring_write(ring, 0); 3928 } 3929 3930 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring) 3931 { 3932 struct v9_ce_ib_state ce_payload = {0}; 3933 uint64_t csa_addr; 3934 int cnt; 3935 3936 cnt = (sizeof(ce_payload) >> 2) + 4 - 2; 3937 csa_addr = amdgpu_csa_vaddr(ring->adev); 3938 3939 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); 3940 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | 3941 WRITE_DATA_DST_SEL(8) | 3942 WR_CONFIRM) | 3943 WRITE_DATA_CACHE_POLICY(0)); 3944 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload))); 3945 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload))); 3946 amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2); 3947 } 3948 3949 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring) 3950 { 3951 struct v9_de_ib_state de_payload = {0}; 3952 uint64_t csa_addr, gds_addr; 3953 int cnt; 3954 3955 csa_addr = amdgpu_csa_vaddr(ring->adev); 3956 gds_addr = csa_addr + 4096; 3957 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); 3958 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); 3959 3960 cnt = (sizeof(de_payload) >> 2) + 4 - 2; 3961 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); 3962 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | 3963 WRITE_DATA_DST_SEL(8) | 3964 WR_CONFIRM) | 3965 WRITE_DATA_CACHE_POLICY(0)); 3966 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload))); 3967 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload))); 3968 amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2); 3969 } 3970 3971 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start) 3972 { 3973 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); 3974 amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */ 3975 } 3976 3977 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) 3978 { 3979 uint32_t dw2 = 0; 3980 3981 if (amdgpu_sriov_vf(ring->adev)) 3982 gfx_v9_0_ring_emit_ce_meta(ring); 3983 3984 gfx_v9_0_ring_emit_tmz(ring, true); 3985 3986 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ 3987 if (flags & AMDGPU_HAVE_CTX_SWITCH) { 3988 /* set load_global_config & load_global_uconfig */ 3989 dw2 |= 0x8001; 3990 /* set load_cs_sh_regs */ 3991 dw2 |= 0x01000000; 3992 /* set load_per_context_state & load_gfx_sh_regs for GFX */ 3993 dw2 |= 0x10002; 3994 3995 /* set load_ce_ram if preamble presented */ 3996 if (AMDGPU_PREAMBLE_IB_PRESENT & flags) 3997 dw2 |= 0x10000000; 3998 } else { 3999 /* still load_ce_ram if this is the first time preamble presented 4000 * although there is no context switch happens. 4001 */ 4002 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags) 4003 dw2 |= 0x10000000; 4004 } 4005 4006 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 4007 amdgpu_ring_write(ring, dw2); 4008 amdgpu_ring_write(ring, 0); 4009 } 4010 4011 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring) 4012 { 4013 unsigned ret; 4014 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3)); 4015 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)); 4016 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr)); 4017 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */ 4018 ret = ring->wptr & ring->buf_mask; 4019 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */ 4020 return ret; 4021 } 4022 4023 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset) 4024 { 4025 unsigned cur; 4026 BUG_ON(offset > ring->buf_mask); 4027 BUG_ON(ring->ring[offset] != 0x55aa55aa); 4028 4029 cur = (ring->wptr & ring->buf_mask) - 1; 4030 if (likely(cur > offset)) 4031 ring->ring[offset] = cur - offset; 4032 else 4033 ring->ring[offset] = (ring->ring_size>>2) - offset + cur; 4034 } 4035 4036 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) 4037 { 4038 struct amdgpu_device *adev = ring->adev; 4039 4040 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 4041 amdgpu_ring_write(ring, 0 | /* src: register*/ 4042 (5 << 8) | /* dst: memory */ 4043 (1 << 20)); /* write confirm */ 4044 amdgpu_ring_write(ring, reg); 4045 amdgpu_ring_write(ring, 0); 4046 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 4047 adev->virt.reg_val_offs * 4)); 4048 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 4049 adev->virt.reg_val_offs * 4)); 4050 } 4051 4052 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 4053 uint32_t val) 4054 { 4055 uint32_t cmd = 0; 4056 4057 switch (ring->funcs->type) { 4058 case AMDGPU_RING_TYPE_GFX: 4059 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; 4060 break; 4061 case AMDGPU_RING_TYPE_KIQ: 4062 cmd = (1 << 16); /* no inc addr */ 4063 break; 4064 default: 4065 cmd = WR_CONFIRM; 4066 break; 4067 } 4068 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4069 amdgpu_ring_write(ring, cmd); 4070 amdgpu_ring_write(ring, reg); 4071 amdgpu_ring_write(ring, 0); 4072 amdgpu_ring_write(ring, val); 4073 } 4074 4075 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 4076 uint32_t val, uint32_t mask) 4077 { 4078 gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 4079 } 4080 4081 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 4082 enum amdgpu_interrupt_state state) 4083 { 4084 switch (state) { 4085 case AMDGPU_IRQ_STATE_DISABLE: 4086 case AMDGPU_IRQ_STATE_ENABLE: 4087 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, 4088 TIME_STAMP_INT_ENABLE, 4089 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4090 break; 4091 default: 4092 break; 4093 } 4094 } 4095 4096 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, 4097 int me, int pipe, 4098 enum amdgpu_interrupt_state state) 4099 { 4100 u32 mec_int_cntl, mec_int_cntl_reg; 4101 4102 /* 4103 * amdgpu controls only the first MEC. That's why this function only 4104 * handles the setting of interrupts for this specific MEC. All other 4105 * pipes' interrupts are set by amdkfd. 4106 */ 4107 4108 if (me == 1) { 4109 switch (pipe) { 4110 case 0: 4111 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL); 4112 break; 4113 case 1: 4114 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL); 4115 break; 4116 case 2: 4117 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL); 4118 break; 4119 case 3: 4120 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL); 4121 break; 4122 default: 4123 DRM_DEBUG("invalid pipe %d\n", pipe); 4124 return; 4125 } 4126 } else { 4127 DRM_DEBUG("invalid me %d\n", me); 4128 return; 4129 } 4130 4131 switch (state) { 4132 case AMDGPU_IRQ_STATE_DISABLE: 4133 mec_int_cntl = RREG32(mec_int_cntl_reg); 4134 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4135 TIME_STAMP_INT_ENABLE, 0); 4136 WREG32(mec_int_cntl_reg, mec_int_cntl); 4137 break; 4138 case AMDGPU_IRQ_STATE_ENABLE: 4139 mec_int_cntl = RREG32(mec_int_cntl_reg); 4140 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4141 TIME_STAMP_INT_ENABLE, 1); 4142 WREG32(mec_int_cntl_reg, mec_int_cntl); 4143 break; 4144 default: 4145 break; 4146 } 4147 } 4148 4149 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev, 4150 struct amdgpu_irq_src *source, 4151 unsigned type, 4152 enum amdgpu_interrupt_state state) 4153 { 4154 switch (state) { 4155 case AMDGPU_IRQ_STATE_DISABLE: 4156 case AMDGPU_IRQ_STATE_ENABLE: 4157 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, 4158 PRIV_REG_INT_ENABLE, 4159 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4160 break; 4161 default: 4162 break; 4163 } 4164 4165 return 0; 4166 } 4167 4168 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev, 4169 struct amdgpu_irq_src *source, 4170 unsigned type, 4171 enum amdgpu_interrupt_state state) 4172 { 4173 switch (state) { 4174 case AMDGPU_IRQ_STATE_DISABLE: 4175 case AMDGPU_IRQ_STATE_ENABLE: 4176 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, 4177 PRIV_INSTR_INT_ENABLE, 4178 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4179 default: 4180 break; 4181 } 4182 4183 return 0; 4184 } 4185 4186 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev, 4187 struct amdgpu_irq_src *src, 4188 unsigned type, 4189 enum amdgpu_interrupt_state state) 4190 { 4191 switch (type) { 4192 case AMDGPU_CP_IRQ_GFX_EOP: 4193 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state); 4194 break; 4195 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 4196 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state); 4197 break; 4198 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 4199 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state); 4200 break; 4201 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 4202 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state); 4203 break; 4204 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 4205 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state); 4206 break; 4207 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: 4208 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state); 4209 break; 4210 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: 4211 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state); 4212 break; 4213 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: 4214 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state); 4215 break; 4216 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: 4217 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state); 4218 break; 4219 default: 4220 break; 4221 } 4222 return 0; 4223 } 4224 4225 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev, 4226 struct amdgpu_irq_src *source, 4227 struct amdgpu_iv_entry *entry) 4228 { 4229 int i; 4230 u8 me_id, pipe_id, queue_id; 4231 struct amdgpu_ring *ring; 4232 4233 DRM_DEBUG("IH: CP EOP\n"); 4234 me_id = (entry->ring_id & 0x0c) >> 2; 4235 pipe_id = (entry->ring_id & 0x03) >> 0; 4236 queue_id = (entry->ring_id & 0x70) >> 4; 4237 4238 switch (me_id) { 4239 case 0: 4240 amdgpu_fence_process(&adev->gfx.gfx_ring[0]); 4241 break; 4242 case 1: 4243 case 2: 4244 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4245 ring = &adev->gfx.compute_ring[i]; 4246 /* Per-queue interrupt is supported for MEC starting from VI. 4247 * The interrupt can only be enabled/disabled per pipe instead of per queue. 4248 */ 4249 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id)) 4250 amdgpu_fence_process(ring); 4251 } 4252 break; 4253 } 4254 return 0; 4255 } 4256 4257 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev, 4258 struct amdgpu_irq_src *source, 4259 struct amdgpu_iv_entry *entry) 4260 { 4261 DRM_ERROR("Illegal register access in command stream\n"); 4262 schedule_work(&adev->reset_work); 4263 return 0; 4264 } 4265 4266 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev, 4267 struct amdgpu_irq_src *source, 4268 struct amdgpu_iv_entry *entry) 4269 { 4270 DRM_ERROR("Illegal instruction in command stream\n"); 4271 schedule_work(&adev->reset_work); 4272 return 0; 4273 } 4274 4275 static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device *adev, 4276 struct amdgpu_irq_src *src, 4277 unsigned int type, 4278 enum amdgpu_interrupt_state state) 4279 { 4280 uint32_t tmp, target; 4281 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); 4282 4283 if (ring->me == 1) 4284 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL); 4285 else 4286 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL); 4287 target += ring->pipe; 4288 4289 switch (type) { 4290 case AMDGPU_CP_KIQ_IRQ_DRIVER0: 4291 if (state == AMDGPU_IRQ_STATE_DISABLE) { 4292 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL); 4293 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 4294 GENERIC2_INT_ENABLE, 0); 4295 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp); 4296 4297 tmp = RREG32(target); 4298 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL, 4299 GENERIC2_INT_ENABLE, 0); 4300 WREG32(target, tmp); 4301 } else { 4302 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL); 4303 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 4304 GENERIC2_INT_ENABLE, 1); 4305 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp); 4306 4307 tmp = RREG32(target); 4308 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL, 4309 GENERIC2_INT_ENABLE, 1); 4310 WREG32(target, tmp); 4311 } 4312 break; 4313 default: 4314 BUG(); /* kiq only support GENERIC2_INT now */ 4315 break; 4316 } 4317 return 0; 4318 } 4319 4320 static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev, 4321 struct amdgpu_irq_src *source, 4322 struct amdgpu_iv_entry *entry) 4323 { 4324 u8 me_id, pipe_id, queue_id; 4325 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); 4326 4327 me_id = (entry->ring_id & 0x0c) >> 2; 4328 pipe_id = (entry->ring_id & 0x03) >> 0; 4329 queue_id = (entry->ring_id & 0x70) >> 4; 4330 DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n", 4331 me_id, pipe_id, queue_id); 4332 4333 amdgpu_fence_process(ring); 4334 return 0; 4335 } 4336 4337 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = { 4338 .name = "gfx_v9_0", 4339 .early_init = gfx_v9_0_early_init, 4340 .late_init = gfx_v9_0_late_init, 4341 .sw_init = gfx_v9_0_sw_init, 4342 .sw_fini = gfx_v9_0_sw_fini, 4343 .hw_init = gfx_v9_0_hw_init, 4344 .hw_fini = gfx_v9_0_hw_fini, 4345 .suspend = gfx_v9_0_suspend, 4346 .resume = gfx_v9_0_resume, 4347 .is_idle = gfx_v9_0_is_idle, 4348 .wait_for_idle = gfx_v9_0_wait_for_idle, 4349 .soft_reset = gfx_v9_0_soft_reset, 4350 .set_clockgating_state = gfx_v9_0_set_clockgating_state, 4351 .set_powergating_state = gfx_v9_0_set_powergating_state, 4352 .get_clockgating_state = gfx_v9_0_get_clockgating_state, 4353 }; 4354 4355 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = { 4356 .type = AMDGPU_RING_TYPE_GFX, 4357 .align_mask = 0xff, 4358 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4359 .support_64bit_ptrs = true, 4360 .vmhub = AMDGPU_GFXHUB, 4361 .get_rptr = gfx_v9_0_ring_get_rptr_gfx, 4362 .get_wptr = gfx_v9_0_ring_get_wptr_gfx, 4363 .set_wptr = gfx_v9_0_ring_set_wptr_gfx, 4364 .emit_frame_size = /* totally 242 maximum if 16 IBs */ 4365 5 + /* COND_EXEC */ 4366 7 + /* PIPELINE_SYNC */ 4367 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4368 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4369 2 + /* VM_FLUSH */ 4370 8 + /* FENCE for VM_FLUSH */ 4371 20 + /* GDS switch */ 4372 4 + /* double SWITCH_BUFFER, 4373 the first COND_EXEC jump to the place just 4374 prior to this double SWITCH_BUFFER */ 4375 5 + /* COND_EXEC */ 4376 7 + /* HDP_flush */ 4377 4 + /* VGT_flush */ 4378 14 + /* CE_META */ 4379 31 + /* DE_META */ 4380 3 + /* CNTX_CTRL */ 4381 5 + /* HDP_INVL */ 4382 8 + 8 + /* FENCE x2 */ 4383 2, /* SWITCH_BUFFER */ 4384 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */ 4385 .emit_ib = gfx_v9_0_ring_emit_ib_gfx, 4386 .emit_fence = gfx_v9_0_ring_emit_fence, 4387 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync, 4388 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush, 4389 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch, 4390 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, 4391 .test_ring = gfx_v9_0_ring_test_ring, 4392 .test_ib = gfx_v9_0_ring_test_ib, 4393 .insert_nop = amdgpu_ring_insert_nop, 4394 .pad_ib = amdgpu_ring_generic_pad_ib, 4395 .emit_switch_buffer = gfx_v9_ring_emit_sb, 4396 .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl, 4397 .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec, 4398 .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec, 4399 .emit_tmz = gfx_v9_0_ring_emit_tmz, 4400 .emit_wreg = gfx_v9_0_ring_emit_wreg, 4401 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, 4402 }; 4403 4404 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { 4405 .type = AMDGPU_RING_TYPE_COMPUTE, 4406 .align_mask = 0xff, 4407 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4408 .support_64bit_ptrs = true, 4409 .vmhub = AMDGPU_GFXHUB, 4410 .get_rptr = gfx_v9_0_ring_get_rptr_compute, 4411 .get_wptr = gfx_v9_0_ring_get_wptr_compute, 4412 .set_wptr = gfx_v9_0_ring_set_wptr_compute, 4413 .emit_frame_size = 4414 20 + /* gfx_v9_0_ring_emit_gds_switch */ 4415 7 + /* gfx_v9_0_ring_emit_hdp_flush */ 4416 5 + /* hdp invalidate */ 4417 7 + /* gfx_v9_0_ring_emit_pipeline_sync */ 4418 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4419 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4420 2 + /* gfx_v9_0_ring_emit_vm_flush */ 4421 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */ 4422 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */ 4423 .emit_ib = gfx_v9_0_ring_emit_ib_compute, 4424 .emit_fence = gfx_v9_0_ring_emit_fence, 4425 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync, 4426 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush, 4427 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch, 4428 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, 4429 .test_ring = gfx_v9_0_ring_test_ring, 4430 .test_ib = gfx_v9_0_ring_test_ib, 4431 .insert_nop = amdgpu_ring_insert_nop, 4432 .pad_ib = amdgpu_ring_generic_pad_ib, 4433 .set_priority = gfx_v9_0_ring_set_priority_compute, 4434 .emit_wreg = gfx_v9_0_ring_emit_wreg, 4435 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, 4436 }; 4437 4438 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { 4439 .type = AMDGPU_RING_TYPE_KIQ, 4440 .align_mask = 0xff, 4441 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4442 .support_64bit_ptrs = true, 4443 .vmhub = AMDGPU_GFXHUB, 4444 .get_rptr = gfx_v9_0_ring_get_rptr_compute, 4445 .get_wptr = gfx_v9_0_ring_get_wptr_compute, 4446 .set_wptr = gfx_v9_0_ring_set_wptr_compute, 4447 .emit_frame_size = 4448 20 + /* gfx_v9_0_ring_emit_gds_switch */ 4449 7 + /* gfx_v9_0_ring_emit_hdp_flush */ 4450 5 + /* hdp invalidate */ 4451 7 + /* gfx_v9_0_ring_emit_pipeline_sync */ 4452 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4453 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4454 2 + /* gfx_v9_0_ring_emit_vm_flush */ 4455 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 4456 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */ 4457 .emit_ib = gfx_v9_0_ring_emit_ib_compute, 4458 .emit_fence = gfx_v9_0_ring_emit_fence_kiq, 4459 .test_ring = gfx_v9_0_ring_test_ring, 4460 .test_ib = gfx_v9_0_ring_test_ib, 4461 .insert_nop = amdgpu_ring_insert_nop, 4462 .pad_ib = amdgpu_ring_generic_pad_ib, 4463 .emit_rreg = gfx_v9_0_ring_emit_rreg, 4464 .emit_wreg = gfx_v9_0_ring_emit_wreg, 4465 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, 4466 }; 4467 4468 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev) 4469 { 4470 int i; 4471 4472 adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq; 4473 4474 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 4475 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx; 4476 4477 for (i = 0; i < adev->gfx.num_compute_rings; i++) 4478 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute; 4479 } 4480 4481 static const struct amdgpu_irq_src_funcs gfx_v9_0_kiq_irq_funcs = { 4482 .set = gfx_v9_0_kiq_set_interrupt_state, 4483 .process = gfx_v9_0_kiq_irq, 4484 }; 4485 4486 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = { 4487 .set = gfx_v9_0_set_eop_interrupt_state, 4488 .process = gfx_v9_0_eop_irq, 4489 }; 4490 4491 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = { 4492 .set = gfx_v9_0_set_priv_reg_fault_state, 4493 .process = gfx_v9_0_priv_reg_irq, 4494 }; 4495 4496 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = { 4497 .set = gfx_v9_0_set_priv_inst_fault_state, 4498 .process = gfx_v9_0_priv_inst_irq, 4499 }; 4500 4501 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev) 4502 { 4503 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 4504 adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs; 4505 4506 adev->gfx.priv_reg_irq.num_types = 1; 4507 adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs; 4508 4509 adev->gfx.priv_inst_irq.num_types = 1; 4510 adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs; 4511 4512 adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST; 4513 adev->gfx.kiq.irq.funcs = &gfx_v9_0_kiq_irq_funcs; 4514 } 4515 4516 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) 4517 { 4518 switch (adev->asic_type) { 4519 case CHIP_VEGA10: 4520 case CHIP_VEGA12: 4521 case CHIP_RAVEN: 4522 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs; 4523 break; 4524 default: 4525 break; 4526 } 4527 } 4528 4529 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev) 4530 { 4531 /* init asci gds info */ 4532 adev->gds.mem.total_size = RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE); 4533 adev->gds.gws.total_size = 64; 4534 adev->gds.oa.total_size = 16; 4535 4536 if (adev->gds.mem.total_size == 64 * 1024) { 4537 adev->gds.mem.gfx_partition_size = 4096; 4538 adev->gds.mem.cs_partition_size = 4096; 4539 4540 adev->gds.gws.gfx_partition_size = 4; 4541 adev->gds.gws.cs_partition_size = 4; 4542 4543 adev->gds.oa.gfx_partition_size = 4; 4544 adev->gds.oa.cs_partition_size = 1; 4545 } else { 4546 adev->gds.mem.gfx_partition_size = 1024; 4547 adev->gds.mem.cs_partition_size = 1024; 4548 4549 adev->gds.gws.gfx_partition_size = 16; 4550 adev->gds.gws.cs_partition_size = 16; 4551 4552 adev->gds.oa.gfx_partition_size = 4; 4553 adev->gds.oa.cs_partition_size = 4; 4554 } 4555 } 4556 4557 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, 4558 u32 bitmap) 4559 { 4560 u32 data; 4561 4562 if (!bitmap) 4563 return; 4564 4565 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 4566 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 4567 4568 WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data); 4569 } 4570 4571 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev) 4572 { 4573 u32 data, mask; 4574 4575 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG); 4576 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG); 4577 4578 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 4579 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 4580 4581 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh); 4582 4583 return (~data) & mask; 4584 } 4585 4586 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, 4587 struct amdgpu_cu_info *cu_info) 4588 { 4589 int i, j, k, counter, active_cu_number = 0; 4590 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; 4591 unsigned disable_masks[4 * 2]; 4592 4593 if (!adev || !cu_info) 4594 return -EINVAL; 4595 4596 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); 4597 4598 mutex_lock(&adev->grbm_idx_mutex); 4599 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 4600 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 4601 mask = 1; 4602 ao_bitmap = 0; 4603 counter = 0; 4604 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); 4605 if (i < 4 && j < 2) 4606 gfx_v9_0_set_user_cu_inactive_bitmap( 4607 adev, disable_masks[i * 2 + j]); 4608 bitmap = gfx_v9_0_get_cu_active_bitmap(adev); 4609 cu_info->bitmap[i][j] = bitmap; 4610 4611 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { 4612 if (bitmap & mask) { 4613 if (counter < adev->gfx.config.max_cu_per_sh) 4614 ao_bitmap |= mask; 4615 counter ++; 4616 } 4617 mask <<= 1; 4618 } 4619 active_cu_number += counter; 4620 if (i < 2 && j < 2) 4621 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); 4622 cu_info->ao_cu_bitmap[i][j] = ao_bitmap; 4623 } 4624 } 4625 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 4626 mutex_unlock(&adev->grbm_idx_mutex); 4627 4628 cu_info->number = active_cu_number; 4629 cu_info->ao_cu_mask = ao_cu_mask; 4630 4631 return 0; 4632 } 4633 4634 const struct amdgpu_ip_block_version gfx_v9_0_ip_block = 4635 { 4636 .type = AMD_IP_BLOCK_TYPE_GFX, 4637 .major = 9, 4638 .minor = 0, 4639 .rev = 0, 4640 .funcs = &gfx_v9_0_ip_funcs, 4641 }; 4642