1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/kernel.h> 24 #include <linux/firmware.h> 25 #include <drm/drmP.h> 26 #include "amdgpu.h" 27 #include "amdgpu_gfx.h" 28 #include "soc15.h" 29 #include "soc15d.h" 30 #include "amdgpu_atomfirmware.h" 31 #include "amdgpu_pm.h" 32 33 #include "gc/gc_9_0_offset.h" 34 #include "gc/gc_9_0_sh_mask.h" 35 #include "vega10_enum.h" 36 #include "hdp/hdp_4_0_offset.h" 37 38 #include "soc15_common.h" 39 #include "clearstate_gfx9.h" 40 #include "v9_structs.h" 41 42 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h" 43 44 #include "amdgpu_ras.h" 45 46 #define GFX9_NUM_GFX_RINGS 1 47 #define GFX9_MEC_HPD_SIZE 4096 48 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 49 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L 50 51 #define mmPWR_MISC_CNTL_STATUS 0x0183 52 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0 53 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0 54 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1 55 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L 56 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L 57 58 MODULE_FIRMWARE("amdgpu/vega10_ce.bin"); 59 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin"); 60 MODULE_FIRMWARE("amdgpu/vega10_me.bin"); 61 MODULE_FIRMWARE("amdgpu/vega10_mec.bin"); 62 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin"); 63 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin"); 64 65 MODULE_FIRMWARE("amdgpu/vega12_ce.bin"); 66 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin"); 67 MODULE_FIRMWARE("amdgpu/vega12_me.bin"); 68 MODULE_FIRMWARE("amdgpu/vega12_mec.bin"); 69 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin"); 70 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin"); 71 72 MODULE_FIRMWARE("amdgpu/vega20_ce.bin"); 73 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin"); 74 MODULE_FIRMWARE("amdgpu/vega20_me.bin"); 75 MODULE_FIRMWARE("amdgpu/vega20_mec.bin"); 76 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin"); 77 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin"); 78 79 MODULE_FIRMWARE("amdgpu/raven_ce.bin"); 80 MODULE_FIRMWARE("amdgpu/raven_pfp.bin"); 81 MODULE_FIRMWARE("amdgpu/raven_me.bin"); 82 MODULE_FIRMWARE("amdgpu/raven_mec.bin"); 83 MODULE_FIRMWARE("amdgpu/raven_mec2.bin"); 84 MODULE_FIRMWARE("amdgpu/raven_rlc.bin"); 85 86 MODULE_FIRMWARE("amdgpu/picasso_ce.bin"); 87 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin"); 88 MODULE_FIRMWARE("amdgpu/picasso_me.bin"); 89 MODULE_FIRMWARE("amdgpu/picasso_mec.bin"); 90 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin"); 91 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin"); 92 MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin"); 93 94 MODULE_FIRMWARE("amdgpu/raven2_ce.bin"); 95 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin"); 96 MODULE_FIRMWARE("amdgpu/raven2_me.bin"); 97 MODULE_FIRMWARE("amdgpu/raven2_mec.bin"); 98 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin"); 99 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin"); 100 MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin"); 101 102 static const struct soc15_reg_golden golden_settings_gc_9_0[] = 103 { 104 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400), 105 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000), 106 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000), 107 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024), 108 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001), 109 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), 110 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000), 111 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800), 112 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800), 113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87), 114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f), 115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000), 116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000), 117 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), 118 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), 119 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), 120 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff), 121 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), 122 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), 123 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) 124 }; 125 126 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = 127 { 128 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107), 129 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000), 130 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080), 131 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080), 132 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080), 133 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042), 134 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042), 135 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080), 136 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000), 137 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080), 138 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080), 139 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080), 140 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080), 141 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080), 142 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000), 143 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107), 144 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800), 145 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080) 146 }; 147 148 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = 149 { 150 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080), 151 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000), 152 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000), 153 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042), 154 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042), 155 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400), 156 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000), 157 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000), 158 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107), 159 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000), 160 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000) 161 }; 162 163 static const struct soc15_reg_golden golden_settings_gc_9_1[] = 164 { 165 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104), 166 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080), 167 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080), 168 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080), 169 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420), 170 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000), 171 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080), 172 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024), 173 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001), 174 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), 175 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080), 176 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080), 177 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080), 178 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080), 179 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080), 180 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000), 181 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000), 182 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120), 183 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), 184 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff), 185 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080), 186 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), 187 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), 188 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) 189 }; 190 191 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] = 192 { 193 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000), 194 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042), 195 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042), 196 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000), 197 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000), 198 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000), 199 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800) 200 }; 201 202 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] = 203 { 204 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000), 205 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104), 206 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000), 207 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080), 208 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080), 209 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080), 210 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041), 211 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041), 212 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080), 213 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000), 214 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080), 215 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080), 216 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080), 217 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080), 218 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080), 219 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000), 220 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010), 221 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000), 222 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080), 223 }; 224 225 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] = 226 { 227 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff), 228 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000), 229 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382) 230 }; 231 232 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] = 233 { 234 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420), 235 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000), 236 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024), 237 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001), 238 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), 239 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000), 240 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800), 241 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800), 242 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87), 243 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f), 244 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000), 245 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000), 246 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), 247 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), 248 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), 249 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff) 250 }; 251 252 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] = 253 { 254 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080), 255 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104), 256 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000), 257 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041), 258 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041), 259 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000), 260 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107), 261 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000), 262 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410), 263 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000), 264 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), 265 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), 266 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) 267 }; 268 269 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] = 270 { 271 mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0, 272 mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0, 273 mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0, 274 mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0, 275 mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0, 276 mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0, 277 mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0, 278 mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0, 279 }; 280 281 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] = 282 { 283 mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0, 284 mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0, 285 mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0, 286 mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0, 287 mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0, 288 mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0, 289 mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0, 290 mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0, 291 }; 292 293 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042 294 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041 295 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042 296 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041 297 298 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev); 299 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev); 300 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev); 301 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev); 302 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, 303 struct amdgpu_cu_info *cu_info); 304 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev); 305 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); 306 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring); 307 308 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) 309 { 310 switch (adev->asic_type) { 311 case CHIP_VEGA10: 312 soc15_program_register_sequence(adev, 313 golden_settings_gc_9_0, 314 ARRAY_SIZE(golden_settings_gc_9_0)); 315 soc15_program_register_sequence(adev, 316 golden_settings_gc_9_0_vg10, 317 ARRAY_SIZE(golden_settings_gc_9_0_vg10)); 318 break; 319 case CHIP_VEGA12: 320 soc15_program_register_sequence(adev, 321 golden_settings_gc_9_2_1, 322 ARRAY_SIZE(golden_settings_gc_9_2_1)); 323 soc15_program_register_sequence(adev, 324 golden_settings_gc_9_2_1_vg12, 325 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12)); 326 break; 327 case CHIP_VEGA20: 328 soc15_program_register_sequence(adev, 329 golden_settings_gc_9_0, 330 ARRAY_SIZE(golden_settings_gc_9_0)); 331 soc15_program_register_sequence(adev, 332 golden_settings_gc_9_0_vg20, 333 ARRAY_SIZE(golden_settings_gc_9_0_vg20)); 334 break; 335 case CHIP_RAVEN: 336 soc15_program_register_sequence(adev, golden_settings_gc_9_1, 337 ARRAY_SIZE(golden_settings_gc_9_1)); 338 if (adev->rev_id >= 8) 339 soc15_program_register_sequence(adev, 340 golden_settings_gc_9_1_rv2, 341 ARRAY_SIZE(golden_settings_gc_9_1_rv2)); 342 else 343 soc15_program_register_sequence(adev, 344 golden_settings_gc_9_1_rv1, 345 ARRAY_SIZE(golden_settings_gc_9_1_rv1)); 346 break; 347 default: 348 break; 349 } 350 351 soc15_program_register_sequence(adev, golden_settings_gc_9_x_common, 352 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common)); 353 } 354 355 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev) 356 { 357 adev->gfx.scratch.num_reg = 8; 358 adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0); 359 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; 360 } 361 362 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, 363 bool wc, uint32_t reg, uint32_t val) 364 { 365 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 366 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) | 367 WRITE_DATA_DST_SEL(0) | 368 (wc ? WR_CONFIRM : 0)); 369 amdgpu_ring_write(ring, reg); 370 amdgpu_ring_write(ring, 0); 371 amdgpu_ring_write(ring, val); 372 } 373 374 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, 375 int mem_space, int opt, uint32_t addr0, 376 uint32_t addr1, uint32_t ref, uint32_t mask, 377 uint32_t inv) 378 { 379 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 380 amdgpu_ring_write(ring, 381 /* memory (1) or register (0) */ 382 (WAIT_REG_MEM_MEM_SPACE(mem_space) | 383 WAIT_REG_MEM_OPERATION(opt) | /* wait */ 384 WAIT_REG_MEM_FUNCTION(3) | /* equal */ 385 WAIT_REG_MEM_ENGINE(eng_sel))); 386 387 if (mem_space) 388 BUG_ON(addr0 & 0x3); /* Dword align */ 389 amdgpu_ring_write(ring, addr0); 390 amdgpu_ring_write(ring, addr1); 391 amdgpu_ring_write(ring, ref); 392 amdgpu_ring_write(ring, mask); 393 amdgpu_ring_write(ring, inv); /* poll interval */ 394 } 395 396 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring) 397 { 398 struct amdgpu_device *adev = ring->adev; 399 uint32_t scratch; 400 uint32_t tmp = 0; 401 unsigned i; 402 int r; 403 404 r = amdgpu_gfx_scratch_get(adev, &scratch); 405 if (r) 406 return r; 407 408 WREG32(scratch, 0xCAFEDEAD); 409 r = amdgpu_ring_alloc(ring, 3); 410 if (r) 411 goto error_free_scratch; 412 413 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 414 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); 415 amdgpu_ring_write(ring, 0xDEADBEEF); 416 amdgpu_ring_commit(ring); 417 418 for (i = 0; i < adev->usec_timeout; i++) { 419 tmp = RREG32(scratch); 420 if (tmp == 0xDEADBEEF) 421 break; 422 DRM_UDELAY(1); 423 } 424 425 if (i >= adev->usec_timeout) 426 r = -ETIMEDOUT; 427 428 error_free_scratch: 429 amdgpu_gfx_scratch_free(adev, scratch); 430 return r; 431 } 432 433 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 434 { 435 struct amdgpu_device *adev = ring->adev; 436 struct amdgpu_ib ib; 437 struct dma_fence *f = NULL; 438 439 unsigned index; 440 uint64_t gpu_addr; 441 uint32_t tmp; 442 long r; 443 444 r = amdgpu_device_wb_get(adev, &index); 445 if (r) 446 return r; 447 448 gpu_addr = adev->wb.gpu_addr + (index * 4); 449 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); 450 memset(&ib, 0, sizeof(ib)); 451 r = amdgpu_ib_get(adev, NULL, 16, &ib); 452 if (r) 453 goto err1; 454 455 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); 456 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; 457 ib.ptr[2] = lower_32_bits(gpu_addr); 458 ib.ptr[3] = upper_32_bits(gpu_addr); 459 ib.ptr[4] = 0xDEADBEEF; 460 ib.length_dw = 5; 461 462 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 463 if (r) 464 goto err2; 465 466 r = dma_fence_wait_timeout(f, false, timeout); 467 if (r == 0) { 468 r = -ETIMEDOUT; 469 goto err2; 470 } else if (r < 0) { 471 goto err2; 472 } 473 474 tmp = adev->wb.wb[index]; 475 if (tmp == 0xDEADBEEF) 476 r = 0; 477 else 478 r = -EINVAL; 479 480 err2: 481 amdgpu_ib_free(adev, &ib, NULL); 482 dma_fence_put(f); 483 err1: 484 amdgpu_device_wb_free(adev, index); 485 return r; 486 } 487 488 489 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev) 490 { 491 release_firmware(adev->gfx.pfp_fw); 492 adev->gfx.pfp_fw = NULL; 493 release_firmware(adev->gfx.me_fw); 494 adev->gfx.me_fw = NULL; 495 release_firmware(adev->gfx.ce_fw); 496 adev->gfx.ce_fw = NULL; 497 release_firmware(adev->gfx.rlc_fw); 498 adev->gfx.rlc_fw = NULL; 499 release_firmware(adev->gfx.mec_fw); 500 adev->gfx.mec_fw = NULL; 501 release_firmware(adev->gfx.mec2_fw); 502 adev->gfx.mec2_fw = NULL; 503 504 kfree(adev->gfx.rlc.register_list_format); 505 } 506 507 static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev) 508 { 509 const struct rlc_firmware_header_v2_1 *rlc_hdr; 510 511 rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data; 512 adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver); 513 adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver); 514 adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes); 515 adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes); 516 adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver); 517 adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver); 518 adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes); 519 adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes); 520 adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver); 521 adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver); 522 adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes); 523 adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes); 524 adev->gfx.rlc.reg_list_format_direct_reg_list_length = 525 le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length); 526 } 527 528 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) 529 { 530 adev->gfx.me_fw_write_wait = false; 531 adev->gfx.mec_fw_write_wait = false; 532 533 switch (adev->asic_type) { 534 case CHIP_VEGA10: 535 if ((adev->gfx.me_fw_version >= 0x0000009c) && 536 (adev->gfx.me_feature_version >= 42) && 537 (adev->gfx.pfp_fw_version >= 0x000000b1) && 538 (adev->gfx.pfp_feature_version >= 42)) 539 adev->gfx.me_fw_write_wait = true; 540 541 if ((adev->gfx.mec_fw_version >= 0x00000193) && 542 (adev->gfx.mec_feature_version >= 42)) 543 adev->gfx.mec_fw_write_wait = true; 544 break; 545 case CHIP_VEGA12: 546 if ((adev->gfx.me_fw_version >= 0x0000009c) && 547 (adev->gfx.me_feature_version >= 44) && 548 (adev->gfx.pfp_fw_version >= 0x000000b2) && 549 (adev->gfx.pfp_feature_version >= 44)) 550 adev->gfx.me_fw_write_wait = true; 551 552 if ((adev->gfx.mec_fw_version >= 0x00000196) && 553 (adev->gfx.mec_feature_version >= 44)) 554 adev->gfx.mec_fw_write_wait = true; 555 break; 556 case CHIP_VEGA20: 557 if ((adev->gfx.me_fw_version >= 0x0000009c) && 558 (adev->gfx.me_feature_version >= 44) && 559 (adev->gfx.pfp_fw_version >= 0x000000b2) && 560 (adev->gfx.pfp_feature_version >= 44)) 561 adev->gfx.me_fw_write_wait = true; 562 563 if ((adev->gfx.mec_fw_version >= 0x00000197) && 564 (adev->gfx.mec_feature_version >= 44)) 565 adev->gfx.mec_fw_write_wait = true; 566 break; 567 case CHIP_RAVEN: 568 if ((adev->gfx.me_fw_version >= 0x0000009c) && 569 (adev->gfx.me_feature_version >= 42) && 570 (adev->gfx.pfp_fw_version >= 0x000000b1) && 571 (adev->gfx.pfp_feature_version >= 42)) 572 adev->gfx.me_fw_write_wait = true; 573 574 if ((adev->gfx.mec_fw_version >= 0x00000192) && 575 (adev->gfx.mec_feature_version >= 42)) 576 adev->gfx.mec_fw_write_wait = true; 577 break; 578 default: 579 break; 580 } 581 } 582 583 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) 584 { 585 switch (adev->asic_type) { 586 case CHIP_VEGA10: 587 case CHIP_VEGA12: 588 case CHIP_VEGA20: 589 break; 590 case CHIP_RAVEN: 591 if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) 592 break; 593 if ((adev->gfx.rlc_fw_version != 106 && 594 adev->gfx.rlc_fw_version < 531) || 595 (adev->gfx.rlc_fw_version == 53815) || 596 (adev->gfx.rlc_feature_version < 1) || 597 !adev->gfx.rlc.is_rlc_v2_1) 598 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 599 break; 600 default: 601 break; 602 } 603 } 604 605 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) 606 { 607 const char *chip_name; 608 char fw_name[30]; 609 int err; 610 struct amdgpu_firmware_info *info = NULL; 611 const struct common_firmware_header *header = NULL; 612 const struct gfx_firmware_header_v1_0 *cp_hdr; 613 const struct rlc_firmware_header_v2_0 *rlc_hdr; 614 unsigned int *tmp = NULL; 615 unsigned int i = 0; 616 uint16_t version_major; 617 uint16_t version_minor; 618 uint32_t smu_version; 619 620 DRM_DEBUG("\n"); 621 622 switch (adev->asic_type) { 623 case CHIP_VEGA10: 624 chip_name = "vega10"; 625 break; 626 case CHIP_VEGA12: 627 chip_name = "vega12"; 628 break; 629 case CHIP_VEGA20: 630 chip_name = "vega20"; 631 break; 632 case CHIP_RAVEN: 633 if (adev->rev_id >= 8) 634 chip_name = "raven2"; 635 else if (adev->pdev->device == 0x15d8) 636 chip_name = "picasso"; 637 else 638 chip_name = "raven"; 639 break; 640 default: 641 BUG(); 642 } 643 644 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); 645 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); 646 if (err) 647 goto out; 648 err = amdgpu_ucode_validate(adev->gfx.pfp_fw); 649 if (err) 650 goto out; 651 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; 652 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 653 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 654 655 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); 656 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); 657 if (err) 658 goto out; 659 err = amdgpu_ucode_validate(adev->gfx.me_fw); 660 if (err) 661 goto out; 662 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; 663 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 664 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 665 666 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); 667 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); 668 if (err) 669 goto out; 670 err = amdgpu_ucode_validate(adev->gfx.ce_fw); 671 if (err) 672 goto out; 673 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; 674 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 675 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 676 677 /* 678 * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin 679 * instead of picasso_rlc.bin. 680 * Judgment method: 681 * PCO AM4: revision >= 0xC8 && revision <= 0xCF 682 * or revision >= 0xD8 && revision <= 0xDF 683 * otherwise is PCO FP5 684 */ 685 if (!strcmp(chip_name, "picasso") && 686 (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) || 687 ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF)))) 688 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name); 689 else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) && 690 (smu_version >= 0x41e2b)) 691 /** 692 *SMC is loaded by SBIOS on APU and it's able to get the SMU version directly. 693 */ 694 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name); 695 else 696 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); 697 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); 698 if (err) 699 goto out; 700 err = amdgpu_ucode_validate(adev->gfx.rlc_fw); 701 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 702 703 version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 704 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 705 if (version_major == 2 && version_minor == 1) 706 adev->gfx.rlc.is_rlc_v2_1 = true; 707 708 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); 709 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); 710 adev->gfx.rlc.save_and_restore_offset = 711 le32_to_cpu(rlc_hdr->save_and_restore_offset); 712 adev->gfx.rlc.clear_state_descriptor_offset = 713 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset); 714 adev->gfx.rlc.avail_scratch_ram_locations = 715 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations); 716 adev->gfx.rlc.reg_restore_list_size = 717 le32_to_cpu(rlc_hdr->reg_restore_list_size); 718 adev->gfx.rlc.reg_list_format_start = 719 le32_to_cpu(rlc_hdr->reg_list_format_start); 720 adev->gfx.rlc.reg_list_format_separate_start = 721 le32_to_cpu(rlc_hdr->reg_list_format_separate_start); 722 adev->gfx.rlc.starting_offsets_start = 723 le32_to_cpu(rlc_hdr->starting_offsets_start); 724 adev->gfx.rlc.reg_list_format_size_bytes = 725 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes); 726 adev->gfx.rlc.reg_list_size_bytes = 727 le32_to_cpu(rlc_hdr->reg_list_size_bytes); 728 adev->gfx.rlc.register_list_format = 729 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes + 730 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL); 731 if (!adev->gfx.rlc.register_list_format) { 732 err = -ENOMEM; 733 goto out; 734 } 735 736 tmp = (unsigned int *)((uintptr_t)rlc_hdr + 737 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); 738 for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++) 739 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); 740 741 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; 742 743 tmp = (unsigned int *)((uintptr_t)rlc_hdr + 744 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); 745 for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++) 746 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); 747 748 if (adev->gfx.rlc.is_rlc_v2_1) 749 gfx_v9_0_init_rlc_ext_microcode(adev); 750 751 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); 752 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); 753 if (err) 754 goto out; 755 err = amdgpu_ucode_validate(adev->gfx.mec_fw); 756 if (err) 757 goto out; 758 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 759 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 760 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 761 762 763 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); 764 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); 765 if (!err) { 766 err = amdgpu_ucode_validate(adev->gfx.mec2_fw); 767 if (err) 768 goto out; 769 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 770 adev->gfx.mec2_fw->data; 771 adev->gfx.mec2_fw_version = 772 le32_to_cpu(cp_hdr->header.ucode_version); 773 adev->gfx.mec2_feature_version = 774 le32_to_cpu(cp_hdr->ucode_feature_version); 775 } else { 776 err = 0; 777 adev->gfx.mec2_fw = NULL; 778 } 779 780 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 781 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP]; 782 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP; 783 info->fw = adev->gfx.pfp_fw; 784 header = (const struct common_firmware_header *)info->fw->data; 785 adev->firmware.fw_size += 786 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 787 788 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME]; 789 info->ucode_id = AMDGPU_UCODE_ID_CP_ME; 790 info->fw = adev->gfx.me_fw; 791 header = (const struct common_firmware_header *)info->fw->data; 792 adev->firmware.fw_size += 793 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 794 795 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE]; 796 info->ucode_id = AMDGPU_UCODE_ID_CP_CE; 797 info->fw = adev->gfx.ce_fw; 798 header = (const struct common_firmware_header *)info->fw->data; 799 adev->firmware.fw_size += 800 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 801 802 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; 803 info->ucode_id = AMDGPU_UCODE_ID_RLC_G; 804 info->fw = adev->gfx.rlc_fw; 805 header = (const struct common_firmware_header *)info->fw->data; 806 adev->firmware.fw_size += 807 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 808 809 if (adev->gfx.rlc.is_rlc_v2_1 && 810 adev->gfx.rlc.save_restore_list_cntl_size_bytes && 811 adev->gfx.rlc.save_restore_list_gpm_size_bytes && 812 adev->gfx.rlc.save_restore_list_srm_size_bytes) { 813 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL]; 814 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL; 815 info->fw = adev->gfx.rlc_fw; 816 adev->firmware.fw_size += 817 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE); 818 819 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM]; 820 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM; 821 info->fw = adev->gfx.rlc_fw; 822 adev->firmware.fw_size += 823 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE); 824 825 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM]; 826 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM; 827 info->fw = adev->gfx.rlc_fw; 828 adev->firmware.fw_size += 829 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE); 830 } 831 832 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; 833 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1; 834 info->fw = adev->gfx.mec_fw; 835 header = (const struct common_firmware_header *)info->fw->data; 836 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data; 837 adev->firmware.fw_size += 838 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); 839 840 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT]; 841 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT; 842 info->fw = adev->gfx.mec_fw; 843 adev->firmware.fw_size += 844 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); 845 846 if (adev->gfx.mec2_fw) { 847 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2]; 848 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 849 info->fw = adev->gfx.mec2_fw; 850 header = (const struct common_firmware_header *)info->fw->data; 851 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data; 852 adev->firmware.fw_size += 853 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); 854 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT]; 855 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT; 856 info->fw = adev->gfx.mec2_fw; 857 adev->firmware.fw_size += 858 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); 859 } 860 861 } 862 863 out: 864 gfx_v9_0_check_if_need_gfxoff(adev); 865 gfx_v9_0_check_fw_write_wait(adev); 866 if (err) { 867 dev_err(adev->dev, 868 "gfx9: Failed to load firmware \"%s\"\n", 869 fw_name); 870 release_firmware(adev->gfx.pfp_fw); 871 adev->gfx.pfp_fw = NULL; 872 release_firmware(adev->gfx.me_fw); 873 adev->gfx.me_fw = NULL; 874 release_firmware(adev->gfx.ce_fw); 875 adev->gfx.ce_fw = NULL; 876 release_firmware(adev->gfx.rlc_fw); 877 adev->gfx.rlc_fw = NULL; 878 release_firmware(adev->gfx.mec_fw); 879 adev->gfx.mec_fw = NULL; 880 release_firmware(adev->gfx.mec2_fw); 881 adev->gfx.mec2_fw = NULL; 882 } 883 return err; 884 } 885 886 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev) 887 { 888 u32 count = 0; 889 const struct cs_section_def *sect = NULL; 890 const struct cs_extent_def *ext = NULL; 891 892 /* begin clear state */ 893 count += 2; 894 /* context control state */ 895 count += 3; 896 897 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) { 898 for (ext = sect->section; ext->extent != NULL; ++ext) { 899 if (sect->id == SECT_CONTEXT) 900 count += 2 + ext->reg_count; 901 else 902 return 0; 903 } 904 } 905 906 /* end clear state */ 907 count += 2; 908 /* clear state */ 909 count += 2; 910 911 return count; 912 } 913 914 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, 915 volatile u32 *buffer) 916 { 917 u32 count = 0, i; 918 const struct cs_section_def *sect = NULL; 919 const struct cs_extent_def *ext = NULL; 920 921 if (adev->gfx.rlc.cs_data == NULL) 922 return; 923 if (buffer == NULL) 924 return; 925 926 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 927 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 928 929 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 930 buffer[count++] = cpu_to_le32(0x80000000); 931 buffer[count++] = cpu_to_le32(0x80000000); 932 933 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { 934 for (ext = sect->section; ext->extent != NULL; ++ext) { 935 if (sect->id == SECT_CONTEXT) { 936 buffer[count++] = 937 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); 938 buffer[count++] = cpu_to_le32(ext->reg_index - 939 PACKET3_SET_CONTEXT_REG_START); 940 for (i = 0; i < ext->reg_count; i++) 941 buffer[count++] = cpu_to_le32(ext->extent[i]); 942 } else { 943 return; 944 } 945 } 946 } 947 948 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 949 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); 950 951 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); 952 buffer[count++] = cpu_to_le32(0); 953 } 954 955 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev) 956 { 957 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; 958 uint32_t pg_always_on_cu_num = 2; 959 uint32_t always_on_cu_num; 960 uint32_t i, j, k; 961 uint32_t mask, cu_bitmap, counter; 962 963 if (adev->flags & AMD_IS_APU) 964 always_on_cu_num = 4; 965 else if (adev->asic_type == CHIP_VEGA12) 966 always_on_cu_num = 8; 967 else 968 always_on_cu_num = 12; 969 970 mutex_lock(&adev->grbm_idx_mutex); 971 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 972 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 973 mask = 1; 974 cu_bitmap = 0; 975 counter = 0; 976 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); 977 978 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { 979 if (cu_info->bitmap[i][j] & mask) { 980 if (counter == pg_always_on_cu_num) 981 WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap); 982 if (counter < always_on_cu_num) 983 cu_bitmap |= mask; 984 else 985 break; 986 counter++; 987 } 988 mask <<= 1; 989 } 990 991 WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap); 992 cu_info->ao_cu_bitmap[i][j] = cu_bitmap; 993 } 994 } 995 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 996 mutex_unlock(&adev->grbm_idx_mutex); 997 } 998 999 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev) 1000 { 1001 uint32_t data; 1002 1003 /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */ 1004 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F); 1005 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7); 1006 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077); 1007 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16)); 1008 1009 /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */ 1010 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000); 1011 1012 /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */ 1013 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500); 1014 1015 mutex_lock(&adev->grbm_idx_mutex); 1016 /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/ 1017 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1018 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff); 1019 1020 /* set mmRLC_LB_PARAMS = 0x003F_1006 */ 1021 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003); 1022 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010); 1023 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F); 1024 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data); 1025 1026 /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */ 1027 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7); 1028 data &= 0x0000FFFF; 1029 data |= 0x00C00000; 1030 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data); 1031 1032 /* 1033 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven), 1034 * programmed in gfx_v9_0_init_always_on_cu_mask() 1035 */ 1036 1037 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved, 1038 * but used for RLC_LB_CNTL configuration */ 1039 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK; 1040 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09); 1041 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000); 1042 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data); 1043 mutex_unlock(&adev->grbm_idx_mutex); 1044 1045 gfx_v9_0_init_always_on_cu_mask(adev); 1046 } 1047 1048 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev) 1049 { 1050 uint32_t data; 1051 1052 /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */ 1053 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F); 1054 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8); 1055 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077); 1056 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16)); 1057 1058 /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */ 1059 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000); 1060 1061 /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */ 1062 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800); 1063 1064 mutex_lock(&adev->grbm_idx_mutex); 1065 /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/ 1066 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1067 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff); 1068 1069 /* set mmRLC_LB_PARAMS = 0x003F_1006 */ 1070 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003); 1071 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010); 1072 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F); 1073 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data); 1074 1075 /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */ 1076 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7); 1077 data &= 0x0000FFFF; 1078 data |= 0x00C00000; 1079 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data); 1080 1081 /* 1082 * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON), 1083 * programmed in gfx_v9_0_init_always_on_cu_mask() 1084 */ 1085 1086 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved, 1087 * but used for RLC_LB_CNTL configuration */ 1088 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK; 1089 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09); 1090 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000); 1091 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data); 1092 mutex_unlock(&adev->grbm_idx_mutex); 1093 1094 gfx_v9_0_init_always_on_cu_mask(adev); 1095 } 1096 1097 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable) 1098 { 1099 WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0); 1100 } 1101 1102 static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev) 1103 { 1104 return 5; 1105 } 1106 1107 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) 1108 { 1109 const struct cs_section_def *cs_data; 1110 int r; 1111 1112 adev->gfx.rlc.cs_data = gfx9_cs_data; 1113 1114 cs_data = adev->gfx.rlc.cs_data; 1115 1116 if (cs_data) { 1117 /* init clear state block */ 1118 r = amdgpu_gfx_rlc_init_csb(adev); 1119 if (r) 1120 return r; 1121 } 1122 1123 if (adev->asic_type == CHIP_RAVEN) { 1124 /* TODO: double check the cp_table_size for RV */ 1125 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ 1126 r = amdgpu_gfx_rlc_init_cpt(adev); 1127 if (r) 1128 return r; 1129 } 1130 1131 switch (adev->asic_type) { 1132 case CHIP_RAVEN: 1133 gfx_v9_0_init_lbpw(adev); 1134 break; 1135 case CHIP_VEGA20: 1136 gfx_v9_4_init_lbpw(adev); 1137 break; 1138 default: 1139 break; 1140 } 1141 1142 return 0; 1143 } 1144 1145 static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev) 1146 { 1147 int r; 1148 1149 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); 1150 if (unlikely(r != 0)) 1151 return r; 1152 1153 r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, 1154 AMDGPU_GEM_DOMAIN_VRAM); 1155 if (!r) 1156 adev->gfx.rlc.clear_state_gpu_addr = 1157 amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); 1158 1159 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); 1160 1161 return r; 1162 } 1163 1164 static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev) 1165 { 1166 int r; 1167 1168 if (!adev->gfx.rlc.clear_state_obj) 1169 return; 1170 1171 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); 1172 if (likely(r == 0)) { 1173 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); 1174 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); 1175 } 1176 } 1177 1178 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev) 1179 { 1180 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 1181 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); 1182 } 1183 1184 static int gfx_v9_0_mec_init(struct amdgpu_device *adev) 1185 { 1186 int r; 1187 u32 *hpd; 1188 const __le32 *fw_data; 1189 unsigned fw_size; 1190 u32 *fw; 1191 size_t mec_hpd_size; 1192 1193 const struct gfx_firmware_header_v1_0 *mec_hdr; 1194 1195 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 1196 1197 /* take ownership of the relevant compute queues */ 1198 amdgpu_gfx_compute_queue_acquire(adev); 1199 mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; 1200 1201 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 1202 AMDGPU_GEM_DOMAIN_VRAM, 1203 &adev->gfx.mec.hpd_eop_obj, 1204 &adev->gfx.mec.hpd_eop_gpu_addr, 1205 (void **)&hpd); 1206 if (r) { 1207 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 1208 gfx_v9_0_mec_fini(adev); 1209 return r; 1210 } 1211 1212 memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size); 1213 1214 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 1215 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 1216 1217 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 1218 1219 fw_data = (const __le32 *) 1220 (adev->gfx.mec_fw->data + 1221 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 1222 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; 1223 1224 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, 1225 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 1226 &adev->gfx.mec.mec_fw_obj, 1227 &adev->gfx.mec.mec_fw_gpu_addr, 1228 (void **)&fw); 1229 if (r) { 1230 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r); 1231 gfx_v9_0_mec_fini(adev); 1232 return r; 1233 } 1234 1235 memcpy(fw, fw_data, fw_size); 1236 1237 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 1238 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 1239 1240 return 0; 1241 } 1242 1243 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) 1244 { 1245 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX, 1246 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 1247 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | 1248 (address << SQ_IND_INDEX__INDEX__SHIFT) | 1249 (SQ_IND_INDEX__FORCE_READ_MASK)); 1250 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA); 1251 } 1252 1253 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, 1254 uint32_t wave, uint32_t thread, 1255 uint32_t regno, uint32_t num, uint32_t *out) 1256 { 1257 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX, 1258 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 1259 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | 1260 (regno << SQ_IND_INDEX__INDEX__SHIFT) | 1261 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) | 1262 (SQ_IND_INDEX__FORCE_READ_MASK) | 1263 (SQ_IND_INDEX__AUTO_INCR_MASK)); 1264 while (num--) 1265 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA); 1266 } 1267 1268 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) 1269 { 1270 /* type 1 wave data */ 1271 dst[(*no_fields)++] = 1; 1272 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS); 1273 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO); 1274 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI); 1275 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO); 1276 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI); 1277 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID); 1278 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0); 1279 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1); 1280 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC); 1281 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC); 1282 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS); 1283 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS); 1284 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0); 1285 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0); 1286 } 1287 1288 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, 1289 uint32_t wave, uint32_t start, 1290 uint32_t size, uint32_t *dst) 1291 { 1292 wave_read_regs( 1293 adev, simd, wave, 0, 1294 start + SQIND_WAVE_SGPRS_OFFSET, size, dst); 1295 } 1296 1297 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, 1298 uint32_t wave, uint32_t thread, 1299 uint32_t start, uint32_t size, 1300 uint32_t *dst) 1301 { 1302 wave_read_regs( 1303 adev, simd, wave, thread, 1304 start + SQIND_WAVE_VGPRS_OFFSET, size, dst); 1305 } 1306 1307 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev, 1308 u32 me, u32 pipe, u32 q) 1309 { 1310 soc15_grbm_select(adev, me, pipe, q, 0); 1311 } 1312 1313 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { 1314 .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter, 1315 .select_se_sh = &gfx_v9_0_select_se_sh, 1316 .read_wave_data = &gfx_v9_0_read_wave_data, 1317 .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs, 1318 .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs, 1319 .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q 1320 }; 1321 1322 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) 1323 { 1324 u32 gb_addr_config; 1325 int err; 1326 1327 adev->gfx.funcs = &gfx_v9_0_gfx_funcs; 1328 1329 switch (adev->asic_type) { 1330 case CHIP_VEGA10: 1331 adev->gfx.config.max_hw_contexts = 8; 1332 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1333 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1334 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1335 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1336 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN; 1337 break; 1338 case CHIP_VEGA12: 1339 adev->gfx.config.max_hw_contexts = 8; 1340 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1341 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1342 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1343 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1344 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN; 1345 DRM_INFO("fix gfx.config for vega12\n"); 1346 break; 1347 case CHIP_VEGA20: 1348 adev->gfx.config.max_hw_contexts = 8; 1349 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1350 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1351 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1352 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1353 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG); 1354 gb_addr_config &= ~0xf3e777ff; 1355 gb_addr_config |= 0x22014042; 1356 /* check vbios table if gpu info is not available */ 1357 err = amdgpu_atomfirmware_get_gfx_info(adev); 1358 if (err) 1359 return err; 1360 break; 1361 case CHIP_RAVEN: 1362 adev->gfx.config.max_hw_contexts = 8; 1363 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1364 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1365 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1366 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 1367 if (adev->rev_id >= 8) 1368 gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN; 1369 else 1370 gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN; 1371 break; 1372 default: 1373 BUG(); 1374 break; 1375 } 1376 1377 adev->gfx.config.gb_addr_config = gb_addr_config; 1378 1379 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << 1380 REG_GET_FIELD( 1381 adev->gfx.config.gb_addr_config, 1382 GB_ADDR_CONFIG, 1383 NUM_PIPES); 1384 1385 adev->gfx.config.max_tile_pipes = 1386 adev->gfx.config.gb_addr_config_fields.num_pipes; 1387 1388 adev->gfx.config.gb_addr_config_fields.num_banks = 1 << 1389 REG_GET_FIELD( 1390 adev->gfx.config.gb_addr_config, 1391 GB_ADDR_CONFIG, 1392 NUM_BANKS); 1393 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << 1394 REG_GET_FIELD( 1395 adev->gfx.config.gb_addr_config, 1396 GB_ADDR_CONFIG, 1397 MAX_COMPRESSED_FRAGS); 1398 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << 1399 REG_GET_FIELD( 1400 adev->gfx.config.gb_addr_config, 1401 GB_ADDR_CONFIG, 1402 NUM_RB_PER_SE); 1403 adev->gfx.config.gb_addr_config_fields.num_se = 1 << 1404 REG_GET_FIELD( 1405 adev->gfx.config.gb_addr_config, 1406 GB_ADDR_CONFIG, 1407 NUM_SHADER_ENGINES); 1408 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + 1409 REG_GET_FIELD( 1410 adev->gfx.config.gb_addr_config, 1411 GB_ADDR_CONFIG, 1412 PIPE_INTERLEAVE_SIZE)); 1413 1414 return 0; 1415 } 1416 1417 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev, 1418 struct amdgpu_ngg_buf *ngg_buf, 1419 int size_se, 1420 int default_size_se) 1421 { 1422 int r; 1423 1424 if (size_se < 0) { 1425 dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se); 1426 return -EINVAL; 1427 } 1428 size_se = size_se ? size_se : default_size_se; 1429 1430 ngg_buf->size = size_se * adev->gfx.config.max_shader_engines; 1431 r = amdgpu_bo_create_kernel(adev, ngg_buf->size, 1432 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 1433 &ngg_buf->bo, 1434 &ngg_buf->gpu_addr, 1435 NULL); 1436 if (r) { 1437 dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r); 1438 return r; 1439 } 1440 ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo); 1441 1442 return r; 1443 } 1444 1445 static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev) 1446 { 1447 int i; 1448 1449 for (i = 0; i < NGG_BUF_MAX; i++) 1450 amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo, 1451 &adev->gfx.ngg.buf[i].gpu_addr, 1452 NULL); 1453 1454 memset(&adev->gfx.ngg.buf[0], 0, 1455 sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX); 1456 1457 adev->gfx.ngg.init = false; 1458 1459 return 0; 1460 } 1461 1462 static int gfx_v9_0_ngg_init(struct amdgpu_device *adev) 1463 { 1464 int r; 1465 1466 if (!amdgpu_ngg || adev->gfx.ngg.init == true) 1467 return 0; 1468 1469 /* GDS reserve memory: 64 bytes alignment */ 1470 adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40); 1471 adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size; 1472 adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size; 1473 adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE); 1474 adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE); 1475 1476 /* Primitive Buffer */ 1477 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM], 1478 amdgpu_prim_buf_per_se, 1479 64 * 1024); 1480 if (r) { 1481 dev_err(adev->dev, "Failed to create Primitive Buffer\n"); 1482 goto err; 1483 } 1484 1485 /* Position Buffer */ 1486 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS], 1487 amdgpu_pos_buf_per_se, 1488 256 * 1024); 1489 if (r) { 1490 dev_err(adev->dev, "Failed to create Position Buffer\n"); 1491 goto err; 1492 } 1493 1494 /* Control Sideband */ 1495 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL], 1496 amdgpu_cntl_sb_buf_per_se, 1497 256); 1498 if (r) { 1499 dev_err(adev->dev, "Failed to create Control Sideband Buffer\n"); 1500 goto err; 1501 } 1502 1503 /* Parameter Cache, not created by default */ 1504 if (amdgpu_param_buf_per_se <= 0) 1505 goto out; 1506 1507 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM], 1508 amdgpu_param_buf_per_se, 1509 512 * 1024); 1510 if (r) { 1511 dev_err(adev->dev, "Failed to create Parameter Cache\n"); 1512 goto err; 1513 } 1514 1515 out: 1516 adev->gfx.ngg.init = true; 1517 return 0; 1518 err: 1519 gfx_v9_0_ngg_fini(adev); 1520 return r; 1521 } 1522 1523 static int gfx_v9_0_ngg_en(struct amdgpu_device *adev) 1524 { 1525 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; 1526 int r; 1527 u32 data, base; 1528 1529 if (!amdgpu_ngg) 1530 return 0; 1531 1532 /* Program buffer size */ 1533 data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, 1534 adev->gfx.ngg.buf[NGG_PRIM].size >> 8); 1535 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, 1536 adev->gfx.ngg.buf[NGG_POS].size >> 8); 1537 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data); 1538 1539 data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, 1540 adev->gfx.ngg.buf[NGG_CNTL].size >> 8); 1541 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, 1542 adev->gfx.ngg.buf[NGG_PARAM].size >> 10); 1543 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data); 1544 1545 /* Program buffer base address */ 1546 base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr); 1547 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base); 1548 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data); 1549 1550 base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr); 1551 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base); 1552 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data); 1553 1554 base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr); 1555 data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base); 1556 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data); 1557 1558 base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr); 1559 data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base); 1560 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data); 1561 1562 base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr); 1563 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base); 1564 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data); 1565 1566 base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr); 1567 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base); 1568 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data); 1569 1570 /* Clear GDS reserved memory */ 1571 r = amdgpu_ring_alloc(ring, 17); 1572 if (r) { 1573 DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n", 1574 ring->name, r); 1575 return r; 1576 } 1577 1578 gfx_v9_0_write_data_to_reg(ring, 0, false, 1579 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 1580 (adev->gds.mem.total_size + 1581 adev->gfx.ngg.gds_reserve_size)); 1582 1583 amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5)); 1584 amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC | 1585 PACKET3_DMA_DATA_DST_SEL(1) | 1586 PACKET3_DMA_DATA_SRC_SEL(2))); 1587 amdgpu_ring_write(ring, 0); 1588 amdgpu_ring_write(ring, 0); 1589 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr); 1590 amdgpu_ring_write(ring, 0); 1591 amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT | 1592 adev->gfx.ngg.gds_reserve_size); 1593 1594 gfx_v9_0_write_data_to_reg(ring, 0, false, 1595 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0); 1596 1597 amdgpu_ring_commit(ring); 1598 1599 return 0; 1600 } 1601 1602 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 1603 int mec, int pipe, int queue) 1604 { 1605 int r; 1606 unsigned irq_type; 1607 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; 1608 1609 ring = &adev->gfx.compute_ring[ring_id]; 1610 1611 /* mec0 is me1 */ 1612 ring->me = mec + 1; 1613 ring->pipe = pipe; 1614 ring->queue = queue; 1615 1616 ring->ring_obj = NULL; 1617 ring->use_doorbell = true; 1618 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; 1619 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr 1620 + (ring_id * GFX9_MEC_HPD_SIZE); 1621 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); 1622 1623 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 1624 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 1625 + ring->pipe; 1626 1627 /* type-2 packets are deprecated on MEC, use type-3 instead */ 1628 r = amdgpu_ring_init(adev, ring, 1024, 1629 &adev->gfx.eop_irq, irq_type); 1630 if (r) 1631 return r; 1632 1633 1634 return 0; 1635 } 1636 1637 static int gfx_v9_0_sw_init(void *handle) 1638 { 1639 int i, j, k, r, ring_id; 1640 struct amdgpu_ring *ring; 1641 struct amdgpu_kiq *kiq; 1642 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1643 1644 switch (adev->asic_type) { 1645 case CHIP_VEGA10: 1646 case CHIP_VEGA12: 1647 case CHIP_VEGA20: 1648 case CHIP_RAVEN: 1649 adev->gfx.mec.num_mec = 2; 1650 break; 1651 default: 1652 adev->gfx.mec.num_mec = 1; 1653 break; 1654 } 1655 1656 adev->gfx.mec.num_pipe_per_mec = 4; 1657 adev->gfx.mec.num_queue_per_pipe = 8; 1658 1659 /* EOP Event */ 1660 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq); 1661 if (r) 1662 return r; 1663 1664 /* Privileged reg */ 1665 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT, 1666 &adev->gfx.priv_reg_irq); 1667 if (r) 1668 return r; 1669 1670 /* Privileged inst */ 1671 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT, 1672 &adev->gfx.priv_inst_irq); 1673 if (r) 1674 return r; 1675 1676 /* ECC error */ 1677 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR, 1678 &adev->gfx.cp_ecc_error_irq); 1679 if (r) 1680 return r; 1681 1682 /* FUE error */ 1683 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR, 1684 &adev->gfx.cp_ecc_error_irq); 1685 if (r) 1686 return r; 1687 1688 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1689 1690 gfx_v9_0_scratch_init(adev); 1691 1692 r = gfx_v9_0_init_microcode(adev); 1693 if (r) { 1694 DRM_ERROR("Failed to load gfx firmware!\n"); 1695 return r; 1696 } 1697 1698 r = adev->gfx.rlc.funcs->init(adev); 1699 if (r) { 1700 DRM_ERROR("Failed to init rlc BOs!\n"); 1701 return r; 1702 } 1703 1704 r = gfx_v9_0_mec_init(adev); 1705 if (r) { 1706 DRM_ERROR("Failed to init MEC BOs!\n"); 1707 return r; 1708 } 1709 1710 /* set up the gfx ring */ 1711 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 1712 ring = &adev->gfx.gfx_ring[i]; 1713 ring->ring_obj = NULL; 1714 if (!i) 1715 sprintf(ring->name, "gfx"); 1716 else 1717 sprintf(ring->name, "gfx_%d", i); 1718 ring->use_doorbell = true; 1719 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; 1720 r = amdgpu_ring_init(adev, ring, 1024, 1721 &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP); 1722 if (r) 1723 return r; 1724 } 1725 1726 /* set up the compute queues - allocate horizontally across pipes */ 1727 ring_id = 0; 1728 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 1729 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 1730 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 1731 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j)) 1732 continue; 1733 1734 r = gfx_v9_0_compute_ring_init(adev, 1735 ring_id, 1736 i, k, j); 1737 if (r) 1738 return r; 1739 1740 ring_id++; 1741 } 1742 } 1743 } 1744 1745 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE); 1746 if (r) { 1747 DRM_ERROR("Failed to init KIQ BOs!\n"); 1748 return r; 1749 } 1750 1751 kiq = &adev->gfx.kiq; 1752 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); 1753 if (r) 1754 return r; 1755 1756 /* create MQD for all compute queues as wel as KIQ for SRIOV case */ 1757 r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation)); 1758 if (r) 1759 return r; 1760 1761 adev->gfx.ce_ram_size = 0x8000; 1762 1763 r = gfx_v9_0_gpu_early_init(adev); 1764 if (r) 1765 return r; 1766 1767 r = gfx_v9_0_ngg_init(adev); 1768 if (r) 1769 return r; 1770 1771 return 0; 1772 } 1773 1774 1775 static int gfx_v9_0_sw_fini(void *handle) 1776 { 1777 int i; 1778 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1779 1780 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) && 1781 adev->gfx.ras_if) { 1782 struct ras_common_if *ras_if = adev->gfx.ras_if; 1783 struct ras_ih_if ih_info = { 1784 .head = *ras_if, 1785 }; 1786 1787 amdgpu_ras_debugfs_remove(adev, ras_if); 1788 amdgpu_ras_sysfs_remove(adev, ras_if); 1789 amdgpu_ras_interrupt_remove_handler(adev, &ih_info); 1790 amdgpu_ras_feature_enable(adev, ras_if, 0); 1791 kfree(ras_if); 1792 } 1793 1794 amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL); 1795 amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL); 1796 amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL); 1797 1798 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 1799 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); 1800 for (i = 0; i < adev->gfx.num_compute_rings; i++) 1801 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1802 1803 amdgpu_gfx_compute_mqd_sw_fini(adev); 1804 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); 1805 amdgpu_gfx_kiq_fini(adev); 1806 1807 gfx_v9_0_mec_fini(adev); 1808 gfx_v9_0_ngg_fini(adev); 1809 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 1810 &adev->gfx.rlc.clear_state_gpu_addr, 1811 (void **)&adev->gfx.rlc.cs_ptr); 1812 if (adev->asic_type == CHIP_RAVEN) { 1813 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 1814 &adev->gfx.rlc.cp_table_gpu_addr, 1815 (void **)&adev->gfx.rlc.cp_table_ptr); 1816 } 1817 gfx_v9_0_free_microcode(adev); 1818 1819 return 0; 1820 } 1821 1822 1823 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev) 1824 { 1825 /* TODO */ 1826 } 1827 1828 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance) 1829 { 1830 u32 data; 1831 1832 if (instance == 0xffffffff) 1833 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); 1834 else 1835 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); 1836 1837 if (se_num == 0xffffffff) 1838 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); 1839 else 1840 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1841 1842 if (sh_num == 0xffffffff) 1843 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); 1844 else 1845 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); 1846 1847 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); 1848 } 1849 1850 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev) 1851 { 1852 u32 data, mask; 1853 1854 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE); 1855 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE); 1856 1857 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; 1858 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; 1859 1860 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se / 1861 adev->gfx.config.max_sh_per_se); 1862 1863 return (~data) & mask; 1864 } 1865 1866 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev) 1867 { 1868 int i, j; 1869 u32 data; 1870 u32 active_rbs = 0; 1871 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / 1872 adev->gfx.config.max_sh_per_se; 1873 1874 mutex_lock(&adev->grbm_idx_mutex); 1875 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1876 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1877 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); 1878 data = gfx_v9_0_get_rb_active_bitmap(adev); 1879 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * 1880 rb_bitmap_width_per_sh); 1881 } 1882 } 1883 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1884 mutex_unlock(&adev->grbm_idx_mutex); 1885 1886 adev->gfx.config.backend_enable_mask = active_rbs; 1887 adev->gfx.config.num_rbs = hweight32(active_rbs); 1888 } 1889 1890 #define DEFAULT_SH_MEM_BASES (0x6000) 1891 #define FIRST_COMPUTE_VMID (8) 1892 #define LAST_COMPUTE_VMID (16) 1893 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev) 1894 { 1895 int i; 1896 uint32_t sh_mem_config; 1897 uint32_t sh_mem_bases; 1898 1899 /* 1900 * Configure apertures: 1901 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 1902 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 1903 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 1904 */ 1905 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16); 1906 1907 sh_mem_config = SH_MEM_ADDRESS_MODE_64 | 1908 SH_MEM_ALIGNMENT_MODE_UNALIGNED << 1909 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; 1910 1911 mutex_lock(&adev->srbm_mutex); 1912 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { 1913 soc15_grbm_select(adev, 0, 0, 0, i); 1914 /* CP and shaders */ 1915 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config); 1916 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases); 1917 } 1918 soc15_grbm_select(adev, 0, 0, 0, 0); 1919 mutex_unlock(&adev->srbm_mutex); 1920 } 1921 1922 static void gfx_v9_0_constants_init(struct amdgpu_device *adev) 1923 { 1924 u32 tmp; 1925 int i; 1926 1927 WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); 1928 1929 gfx_v9_0_tiling_mode_table_init(adev); 1930 1931 gfx_v9_0_setup_rb(adev); 1932 gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info); 1933 adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2); 1934 1935 /* XXX SH_MEM regs */ 1936 /* where to put LDS, scratch, GPUVM in FSA64 space */ 1937 mutex_lock(&adev->srbm_mutex); 1938 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) { 1939 soc15_grbm_select(adev, 0, 0, 0, i); 1940 /* CP and shaders */ 1941 if (i == 0) { 1942 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 1943 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 1944 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp); 1945 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0); 1946 } else { 1947 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 1948 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 1949 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp); 1950 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, 1951 (adev->gmc.private_aperture_start >> 48)); 1952 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, 1953 (adev->gmc.shared_aperture_start >> 48)); 1954 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp); 1955 } 1956 } 1957 soc15_grbm_select(adev, 0, 0, 0, 0); 1958 1959 mutex_unlock(&adev->srbm_mutex); 1960 1961 gfx_v9_0_init_compute_vmid(adev); 1962 } 1963 1964 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) 1965 { 1966 u32 i, j, k; 1967 u32 mask; 1968 1969 mutex_lock(&adev->grbm_idx_mutex); 1970 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1971 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1972 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); 1973 for (k = 0; k < adev->usec_timeout; k++) { 1974 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0) 1975 break; 1976 udelay(1); 1977 } 1978 if (k == adev->usec_timeout) { 1979 gfx_v9_0_select_se_sh(adev, 0xffffffff, 1980 0xffffffff, 0xffffffff); 1981 mutex_unlock(&adev->grbm_idx_mutex); 1982 DRM_INFO("Timeout wait for RLC serdes %u,%u\n", 1983 i, j); 1984 return; 1985 } 1986 } 1987 } 1988 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1989 mutex_unlock(&adev->grbm_idx_mutex); 1990 1991 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | 1992 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK | 1993 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK | 1994 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK; 1995 for (k = 0; k < adev->usec_timeout; k++) { 1996 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) 1997 break; 1998 udelay(1); 1999 } 2000 } 2001 2002 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, 2003 bool enable) 2004 { 2005 u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0); 2006 2007 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0); 2008 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0); 2009 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0); 2010 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0); 2011 2012 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp); 2013 } 2014 2015 static void gfx_v9_0_init_csb(struct amdgpu_device *adev) 2016 { 2017 /* csib */ 2018 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI), 2019 adev->gfx.rlc.clear_state_gpu_addr >> 32); 2020 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO), 2021 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); 2022 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH), 2023 adev->gfx.rlc.clear_state_size); 2024 } 2025 2026 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format, 2027 int indirect_offset, 2028 int list_size, 2029 int *unique_indirect_regs, 2030 int unique_indirect_reg_count, 2031 int *indirect_start_offsets, 2032 int *indirect_start_offsets_count, 2033 int max_start_offsets_count) 2034 { 2035 int idx; 2036 2037 for (; indirect_offset < list_size; indirect_offset++) { 2038 WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count); 2039 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset; 2040 *indirect_start_offsets_count = *indirect_start_offsets_count + 1; 2041 2042 while (register_list_format[indirect_offset] != 0xFFFFFFFF) { 2043 indirect_offset += 2; 2044 2045 /* look for the matching indice */ 2046 for (idx = 0; idx < unique_indirect_reg_count; idx++) { 2047 if (unique_indirect_regs[idx] == 2048 register_list_format[indirect_offset] || 2049 !unique_indirect_regs[idx]) 2050 break; 2051 } 2052 2053 BUG_ON(idx >= unique_indirect_reg_count); 2054 2055 if (!unique_indirect_regs[idx]) 2056 unique_indirect_regs[idx] = register_list_format[indirect_offset]; 2057 2058 indirect_offset++; 2059 } 2060 } 2061 } 2062 2063 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev) 2064 { 2065 int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; 2066 int unique_indirect_reg_count = 0; 2067 2068 int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; 2069 int indirect_start_offsets_count = 0; 2070 2071 int list_size = 0; 2072 int i = 0, j = 0; 2073 u32 tmp = 0; 2074 2075 u32 *register_list_format = 2076 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL); 2077 if (!register_list_format) 2078 return -ENOMEM; 2079 memcpy(register_list_format, adev->gfx.rlc.register_list_format, 2080 adev->gfx.rlc.reg_list_format_size_bytes); 2081 2082 /* setup unique_indirect_regs array and indirect_start_offsets array */ 2083 unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs); 2084 gfx_v9_1_parse_ind_reg_list(register_list_format, 2085 adev->gfx.rlc.reg_list_format_direct_reg_list_length, 2086 adev->gfx.rlc.reg_list_format_size_bytes >> 2, 2087 unique_indirect_regs, 2088 unique_indirect_reg_count, 2089 indirect_start_offsets, 2090 &indirect_start_offsets_count, 2091 ARRAY_SIZE(indirect_start_offsets)); 2092 2093 /* enable auto inc in case it is disabled */ 2094 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL)); 2095 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; 2096 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp); 2097 2098 /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */ 2099 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR), 2100 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET); 2101 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++) 2102 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA), 2103 adev->gfx.rlc.register_restore[i]); 2104 2105 /* load indirect register */ 2106 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR), 2107 adev->gfx.rlc.reg_list_format_start); 2108 2109 /* direct register portion */ 2110 for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++) 2111 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), 2112 register_list_format[i]); 2113 2114 /* indirect register portion */ 2115 while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) { 2116 if (register_list_format[i] == 0xFFFFFFFF) { 2117 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]); 2118 continue; 2119 } 2120 2121 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]); 2122 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]); 2123 2124 for (j = 0; j < unique_indirect_reg_count; j++) { 2125 if (register_list_format[i] == unique_indirect_regs[j]) { 2126 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j); 2127 break; 2128 } 2129 } 2130 2131 BUG_ON(j >= unique_indirect_reg_count); 2132 2133 i++; 2134 } 2135 2136 /* set save/restore list size */ 2137 list_size = adev->gfx.rlc.reg_list_size_bytes >> 2; 2138 list_size = list_size >> 1; 2139 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR), 2140 adev->gfx.rlc.reg_restore_list_size); 2141 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size); 2142 2143 /* write the starting offsets to RLC scratch ram */ 2144 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR), 2145 adev->gfx.rlc.starting_offsets_start); 2146 for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++) 2147 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), 2148 indirect_start_offsets[i]); 2149 2150 /* load unique indirect regs*/ 2151 for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) { 2152 if (unique_indirect_regs[i] != 0) { 2153 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) 2154 + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i], 2155 unique_indirect_regs[i] & 0x3FFFF); 2156 2157 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) 2158 + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i], 2159 unique_indirect_regs[i] >> 20); 2160 } 2161 } 2162 2163 kfree(register_list_format); 2164 return 0; 2165 } 2166 2167 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev) 2168 { 2169 WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1); 2170 } 2171 2172 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev, 2173 bool enable) 2174 { 2175 uint32_t data = 0; 2176 uint32_t default_data = 0; 2177 2178 default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS)); 2179 if (enable == true) { 2180 /* enable GFXIP control over CGPG */ 2181 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK; 2182 if(default_data != data) 2183 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data); 2184 2185 /* update status */ 2186 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK; 2187 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT); 2188 if(default_data != data) 2189 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data); 2190 } else { 2191 /* restore GFXIP control over GCPG */ 2192 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK; 2193 if(default_data != data) 2194 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data); 2195 } 2196 } 2197 2198 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev) 2199 { 2200 uint32_t data = 0; 2201 2202 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | 2203 AMD_PG_SUPPORT_GFX_SMG | 2204 AMD_PG_SUPPORT_GFX_DMG)) { 2205 /* init IDLE_POLL_COUNT = 60 */ 2206 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL)); 2207 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK; 2208 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 2209 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data); 2210 2211 /* init RLC PG Delay */ 2212 data = 0; 2213 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT); 2214 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT); 2215 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT); 2216 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT); 2217 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data); 2218 2219 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2)); 2220 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK; 2221 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT); 2222 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data); 2223 2224 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3)); 2225 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK; 2226 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT); 2227 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data); 2228 2229 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL)); 2230 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK; 2231 2232 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */ 2233 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT); 2234 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data); 2235 2236 pwr_10_0_gfxip_control_over_cgpg(adev, true); 2237 } 2238 } 2239 2240 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev, 2241 bool enable) 2242 { 2243 uint32_t data = 0; 2244 uint32_t default_data = 0; 2245 2246 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 2247 data = REG_SET_FIELD(data, RLC_PG_CNTL, 2248 SMU_CLK_SLOWDOWN_ON_PU_ENABLE, 2249 enable ? 1 : 0); 2250 if (default_data != data) 2251 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 2252 } 2253 2254 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev, 2255 bool enable) 2256 { 2257 uint32_t data = 0; 2258 uint32_t default_data = 0; 2259 2260 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 2261 data = REG_SET_FIELD(data, RLC_PG_CNTL, 2262 SMU_CLK_SLOWDOWN_ON_PD_ENABLE, 2263 enable ? 1 : 0); 2264 if(default_data != data) 2265 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 2266 } 2267 2268 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev, 2269 bool enable) 2270 { 2271 uint32_t data = 0; 2272 uint32_t default_data = 0; 2273 2274 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 2275 data = REG_SET_FIELD(data, RLC_PG_CNTL, 2276 CP_PG_DISABLE, 2277 enable ? 0 : 1); 2278 if(default_data != data) 2279 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 2280 } 2281 2282 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev, 2283 bool enable) 2284 { 2285 uint32_t data, default_data; 2286 2287 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 2288 data = REG_SET_FIELD(data, RLC_PG_CNTL, 2289 GFX_POWER_GATING_ENABLE, 2290 enable ? 1 : 0); 2291 if(default_data != data) 2292 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 2293 } 2294 2295 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev, 2296 bool enable) 2297 { 2298 uint32_t data, default_data; 2299 2300 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 2301 data = REG_SET_FIELD(data, RLC_PG_CNTL, 2302 GFX_PIPELINE_PG_ENABLE, 2303 enable ? 1 : 0); 2304 if(default_data != data) 2305 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 2306 2307 if (!enable) 2308 /* read any GFX register to wake up GFX */ 2309 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL)); 2310 } 2311 2312 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, 2313 bool enable) 2314 { 2315 uint32_t data, default_data; 2316 2317 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 2318 data = REG_SET_FIELD(data, RLC_PG_CNTL, 2319 STATIC_PER_CU_PG_ENABLE, 2320 enable ? 1 : 0); 2321 if(default_data != data) 2322 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 2323 } 2324 2325 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev, 2326 bool enable) 2327 { 2328 uint32_t data, default_data; 2329 2330 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); 2331 data = REG_SET_FIELD(data, RLC_PG_CNTL, 2332 DYN_PER_CU_PG_ENABLE, 2333 enable ? 1 : 0); 2334 if(default_data != data) 2335 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); 2336 } 2337 2338 static void gfx_v9_0_init_pg(struct amdgpu_device *adev) 2339 { 2340 gfx_v9_0_init_csb(adev); 2341 2342 /* 2343 * Rlc save restore list is workable since v2_1. 2344 * And it's needed by gfxoff feature. 2345 */ 2346 if (adev->gfx.rlc.is_rlc_v2_1) { 2347 gfx_v9_1_init_rlc_save_restore_list(adev); 2348 gfx_v9_0_enable_save_restore_machine(adev); 2349 } 2350 2351 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | 2352 AMD_PG_SUPPORT_GFX_SMG | 2353 AMD_PG_SUPPORT_GFX_DMG | 2354 AMD_PG_SUPPORT_CP | 2355 AMD_PG_SUPPORT_GDS | 2356 AMD_PG_SUPPORT_RLC_SMU_HS)) { 2357 WREG32(mmRLC_JUMP_TABLE_RESTORE, 2358 adev->gfx.rlc.cp_table_gpu_addr >> 8); 2359 gfx_v9_0_init_gfx_power_gating(adev); 2360 } 2361 } 2362 2363 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev) 2364 { 2365 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0); 2366 gfx_v9_0_enable_gui_idle_interrupt(adev, false); 2367 gfx_v9_0_wait_for_rlc_serdes(adev); 2368 } 2369 2370 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev) 2371 { 2372 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 2373 udelay(50); 2374 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); 2375 udelay(50); 2376 } 2377 2378 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev) 2379 { 2380 #ifdef AMDGPU_RLC_DEBUG_RETRY 2381 u32 rlc_ucode_ver; 2382 #endif 2383 2384 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); 2385 udelay(50); 2386 2387 /* carrizo do enable cp interrupt after cp inited */ 2388 if (!(adev->flags & AMD_IS_APU)) { 2389 gfx_v9_0_enable_gui_idle_interrupt(adev, true); 2390 udelay(50); 2391 } 2392 2393 #ifdef AMDGPU_RLC_DEBUG_RETRY 2394 /* RLC_GPM_GENERAL_6 : RLC Ucode version */ 2395 rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6); 2396 if(rlc_ucode_ver == 0x108) { 2397 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n", 2398 rlc_ucode_ver, adev->gfx.rlc_fw_version); 2399 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles, 2400 * default is 0x9C4 to create a 100us interval */ 2401 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4); 2402 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr 2403 * to disable the page fault retry interrupts, default is 2404 * 0x100 (256) */ 2405 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100); 2406 } 2407 #endif 2408 } 2409 2410 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev) 2411 { 2412 const struct rlc_firmware_header_v2_0 *hdr; 2413 const __le32 *fw_data; 2414 unsigned i, fw_size; 2415 2416 if (!adev->gfx.rlc_fw) 2417 return -EINVAL; 2418 2419 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 2420 amdgpu_ucode_print_rlc_hdr(&hdr->header); 2421 2422 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2423 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2424 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 2425 2426 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, 2427 RLCG_UCODE_LOADING_START_ADDRESS); 2428 for (i = 0; i < fw_size; i++) 2429 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); 2430 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); 2431 2432 return 0; 2433 } 2434 2435 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) 2436 { 2437 int r; 2438 2439 if (amdgpu_sriov_vf(adev)) { 2440 gfx_v9_0_init_csb(adev); 2441 return 0; 2442 } 2443 2444 adev->gfx.rlc.funcs->stop(adev); 2445 2446 /* disable CG */ 2447 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0); 2448 2449 gfx_v9_0_init_pg(adev); 2450 2451 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 2452 /* legacy rlc firmware loading */ 2453 r = gfx_v9_0_rlc_load_microcode(adev); 2454 if (r) 2455 return r; 2456 } 2457 2458 switch (adev->asic_type) { 2459 case CHIP_RAVEN: 2460 if (amdgpu_lbpw == 0) 2461 gfx_v9_0_enable_lbpw(adev, false); 2462 else 2463 gfx_v9_0_enable_lbpw(adev, true); 2464 break; 2465 case CHIP_VEGA20: 2466 if (amdgpu_lbpw > 0) 2467 gfx_v9_0_enable_lbpw(adev, true); 2468 else 2469 gfx_v9_0_enable_lbpw(adev, false); 2470 break; 2471 default: 2472 break; 2473 } 2474 2475 adev->gfx.rlc.funcs->start(adev); 2476 2477 return 0; 2478 } 2479 2480 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) 2481 { 2482 int i; 2483 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL); 2484 2485 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); 2486 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); 2487 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); 2488 if (!enable) { 2489 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 2490 adev->gfx.gfx_ring[i].sched.ready = false; 2491 } 2492 WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); 2493 udelay(50); 2494 } 2495 2496 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev) 2497 { 2498 const struct gfx_firmware_header_v1_0 *pfp_hdr; 2499 const struct gfx_firmware_header_v1_0 *ce_hdr; 2500 const struct gfx_firmware_header_v1_0 *me_hdr; 2501 const __le32 *fw_data; 2502 unsigned i, fw_size; 2503 2504 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw) 2505 return -EINVAL; 2506 2507 pfp_hdr = (const struct gfx_firmware_header_v1_0 *) 2508 adev->gfx.pfp_fw->data; 2509 ce_hdr = (const struct gfx_firmware_header_v1_0 *) 2510 adev->gfx.ce_fw->data; 2511 me_hdr = (const struct gfx_firmware_header_v1_0 *) 2512 adev->gfx.me_fw->data; 2513 2514 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 2515 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); 2516 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 2517 2518 gfx_v9_0_cp_gfx_enable(adev, false); 2519 2520 /* PFP */ 2521 fw_data = (const __le32 *) 2522 (adev->gfx.pfp_fw->data + 2523 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); 2524 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4; 2525 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0); 2526 for (i = 0; i < fw_size; i++) 2527 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++)); 2528 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version); 2529 2530 /* CE */ 2531 fw_data = (const __le32 *) 2532 (adev->gfx.ce_fw->data + 2533 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes)); 2534 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4; 2535 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0); 2536 for (i = 0; i < fw_size; i++) 2537 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++)); 2538 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version); 2539 2540 /* ME */ 2541 fw_data = (const __le32 *) 2542 (adev->gfx.me_fw->data + 2543 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); 2544 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4; 2545 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0); 2546 for (i = 0; i < fw_size; i++) 2547 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++)); 2548 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version); 2549 2550 return 0; 2551 } 2552 2553 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev) 2554 { 2555 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; 2556 const struct cs_section_def *sect = NULL; 2557 const struct cs_extent_def *ext = NULL; 2558 int r, i, tmp; 2559 2560 /* init the CP */ 2561 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1); 2562 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1); 2563 2564 gfx_v9_0_cp_gfx_enable(adev, true); 2565 2566 r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3); 2567 if (r) { 2568 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 2569 return r; 2570 } 2571 2572 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 2573 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 2574 2575 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 2576 amdgpu_ring_write(ring, 0x80000000); 2577 amdgpu_ring_write(ring, 0x80000000); 2578 2579 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) { 2580 for (ext = sect->section; ext->extent != NULL; ++ext) { 2581 if (sect->id == SECT_CONTEXT) { 2582 amdgpu_ring_write(ring, 2583 PACKET3(PACKET3_SET_CONTEXT_REG, 2584 ext->reg_count)); 2585 amdgpu_ring_write(ring, 2586 ext->reg_index - PACKET3_SET_CONTEXT_REG_START); 2587 for (i = 0; i < ext->reg_count; i++) 2588 amdgpu_ring_write(ring, ext->extent[i]); 2589 } 2590 } 2591 } 2592 2593 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 2594 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 2595 2596 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 2597 amdgpu_ring_write(ring, 0); 2598 2599 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); 2600 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); 2601 amdgpu_ring_write(ring, 0x8000); 2602 amdgpu_ring_write(ring, 0x8000); 2603 2604 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1)); 2605 tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE | 2606 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START)); 2607 amdgpu_ring_write(ring, tmp); 2608 amdgpu_ring_write(ring, 0); 2609 2610 amdgpu_ring_commit(ring); 2611 2612 return 0; 2613 } 2614 2615 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev) 2616 { 2617 struct amdgpu_ring *ring; 2618 u32 tmp; 2619 u32 rb_bufsz; 2620 u64 rb_addr, rptr_addr, wptr_gpu_addr; 2621 2622 /* Set the write pointer delay */ 2623 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0); 2624 2625 /* set the RB to use vmid 0 */ 2626 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0); 2627 2628 /* Set ring buffer size */ 2629 ring = &adev->gfx.gfx_ring[0]; 2630 rb_bufsz = order_base_2(ring->ring_size / 8); 2631 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz); 2632 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2); 2633 #ifdef __BIG_ENDIAN 2634 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1); 2635 #endif 2636 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp); 2637 2638 /* Initialize the ring buffer's write pointers */ 2639 ring->wptr = 0; 2640 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); 2641 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); 2642 2643 /* set the wb address wether it's enabled or not */ 2644 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); 2645 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); 2646 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 2647 2648 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 2649 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr)); 2650 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr)); 2651 2652 mdelay(1); 2653 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp); 2654 2655 rb_addr = ring->gpu_addr >> 8; 2656 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr); 2657 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr)); 2658 2659 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL); 2660 if (ring->use_doorbell) { 2661 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2662 DOORBELL_OFFSET, ring->doorbell_index); 2663 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2664 DOORBELL_EN, 1); 2665 } else { 2666 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0); 2667 } 2668 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp); 2669 2670 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, 2671 DOORBELL_RANGE_LOWER, ring->doorbell_index); 2672 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp); 2673 2674 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER, 2675 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); 2676 2677 2678 /* start the ring */ 2679 gfx_v9_0_cp_gfx_start(adev); 2680 ring->sched.ready = true; 2681 2682 return 0; 2683 } 2684 2685 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 2686 { 2687 int i; 2688 2689 if (enable) { 2690 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0); 2691 } else { 2692 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 2693 (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); 2694 for (i = 0; i < adev->gfx.num_compute_rings; i++) 2695 adev->gfx.compute_ring[i].sched.ready = false; 2696 adev->gfx.kiq.ring.sched.ready = false; 2697 } 2698 udelay(50); 2699 } 2700 2701 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev) 2702 { 2703 const struct gfx_firmware_header_v1_0 *mec_hdr; 2704 const __le32 *fw_data; 2705 unsigned i; 2706 u32 tmp; 2707 2708 if (!adev->gfx.mec_fw) 2709 return -EINVAL; 2710 2711 gfx_v9_0_cp_compute_enable(adev, false); 2712 2713 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 2714 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 2715 2716 fw_data = (const __le32 *) 2717 (adev->gfx.mec_fw->data + 2718 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 2719 tmp = 0; 2720 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 2721 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2722 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp); 2723 2724 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO, 2725 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000); 2726 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI, 2727 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 2728 2729 /* MEC1 */ 2730 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, 2731 mec_hdr->jt_offset); 2732 for (i = 0; i < mec_hdr->jt_size; i++) 2733 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA, 2734 le32_to_cpup(fw_data + mec_hdr->jt_offset + i)); 2735 2736 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, 2737 adev->gfx.mec_fw_version); 2738 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */ 2739 2740 return 0; 2741 } 2742 2743 /* KIQ functions */ 2744 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring) 2745 { 2746 uint32_t tmp; 2747 struct amdgpu_device *adev = ring->adev; 2748 2749 /* tell RLC which is KIQ queue */ 2750 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS); 2751 tmp &= 0xffffff00; 2752 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 2753 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp); 2754 tmp |= 0x80; 2755 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp); 2756 } 2757 2758 static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev) 2759 { 2760 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; 2761 uint64_t queue_mask = 0; 2762 int r, i; 2763 2764 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { 2765 if (!test_bit(i, adev->gfx.mec.queue_bitmap)) 2766 continue; 2767 2768 /* This situation may be hit in the future if a new HW 2769 * generation exposes more than 64 queues. If so, the 2770 * definition of queue_mask needs updating */ 2771 if (WARN_ON(i >= (sizeof(queue_mask)*8))) { 2772 DRM_ERROR("Invalid KCQ enabled: %d\n", i); 2773 break; 2774 } 2775 2776 queue_mask |= (1ull << i); 2777 } 2778 2779 r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8); 2780 if (r) { 2781 DRM_ERROR("Failed to lock KIQ (%d).\n", r); 2782 return r; 2783 } 2784 2785 /* set resources */ 2786 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 2787 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | 2788 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ 2789 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ 2790 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ 2791 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ 2792 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ 2793 amdgpu_ring_write(kiq_ring, 0); /* oac mask */ 2794 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ 2795 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 2796 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; 2797 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 2798 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 2799 2800 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 2801 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ 2802 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 2803 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 2804 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ 2805 PACKET3_MAP_QUEUES_QUEUE(ring->queue) | 2806 PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 2807 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) | 2808 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 2809 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ 2810 PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */ 2811 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 2812 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); 2813 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); 2814 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); 2815 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); 2816 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 2817 } 2818 2819 r = amdgpu_ring_test_helper(kiq_ring); 2820 if (r) 2821 DRM_ERROR("KCQ enable failed\n"); 2822 2823 return r; 2824 } 2825 2826 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring) 2827 { 2828 struct amdgpu_device *adev = ring->adev; 2829 struct v9_mqd *mqd = ring->mqd_ptr; 2830 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 2831 uint32_t tmp; 2832 2833 mqd->header = 0xC0310800; 2834 mqd->compute_pipelinestat_enable = 0x00000001; 2835 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 2836 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 2837 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 2838 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 2839 mqd->compute_misc_reserved = 0x00000003; 2840 2841 mqd->dynamic_cu_mask_addr_lo = 2842 lower_32_bits(ring->mqd_gpu_addr 2843 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); 2844 mqd->dynamic_cu_mask_addr_hi = 2845 upper_32_bits(ring->mqd_gpu_addr 2846 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); 2847 2848 eop_base_addr = ring->eop_gpu_addr >> 8; 2849 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 2850 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 2851 2852 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 2853 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL); 2854 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 2855 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1)); 2856 2857 mqd->cp_hqd_eop_control = tmp; 2858 2859 /* enable doorbell? */ 2860 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL); 2861 2862 if (ring->use_doorbell) { 2863 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2864 DOORBELL_OFFSET, ring->doorbell_index); 2865 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2866 DOORBELL_EN, 1); 2867 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2868 DOORBELL_SOURCE, 0); 2869 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2870 DOORBELL_HIT, 0); 2871 } else { 2872 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2873 DOORBELL_EN, 0); 2874 } 2875 2876 mqd->cp_hqd_pq_doorbell_control = tmp; 2877 2878 /* disable the queue if it's active */ 2879 ring->wptr = 0; 2880 mqd->cp_hqd_dequeue_request = 0; 2881 mqd->cp_hqd_pq_rptr = 0; 2882 mqd->cp_hqd_pq_wptr_lo = 0; 2883 mqd->cp_hqd_pq_wptr_hi = 0; 2884 2885 /* set the pointer to the MQD */ 2886 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc; 2887 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); 2888 2889 /* set MQD vmid to 0 */ 2890 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL); 2891 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 2892 mqd->cp_mqd_control = tmp; 2893 2894 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 2895 hqd_gpu_addr = ring->gpu_addr >> 8; 2896 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 2897 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 2898 2899 /* set up the HQD, this is similar to CP_RB0_CNTL */ 2900 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL); 2901 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 2902 (order_base_2(ring->ring_size / 4) - 1)); 2903 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 2904 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); 2905 #ifdef __BIG_ENDIAN 2906 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1); 2907 #endif 2908 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); 2909 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0); 2910 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 2911 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 2912 mqd->cp_hqd_pq_control = tmp; 2913 2914 /* set the wb address whether it's enabled or not */ 2915 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); 2916 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 2917 mqd->cp_hqd_pq_rptr_report_addr_hi = 2918 upper_32_bits(wb_gpu_addr) & 0xffff; 2919 2920 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 2921 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 2922 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 2923 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 2924 2925 tmp = 0; 2926 /* enable the doorbell if requested */ 2927 if (ring->use_doorbell) { 2928 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL); 2929 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2930 DOORBELL_OFFSET, ring->doorbell_index); 2931 2932 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2933 DOORBELL_EN, 1); 2934 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2935 DOORBELL_SOURCE, 0); 2936 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2937 DOORBELL_HIT, 0); 2938 } 2939 2940 mqd->cp_hqd_pq_doorbell_control = tmp; 2941 2942 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 2943 ring->wptr = 0; 2944 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR); 2945 2946 /* set the vmid for the queue */ 2947 mqd->cp_hqd_vmid = 0; 2948 2949 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE); 2950 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); 2951 mqd->cp_hqd_persistent_state = tmp; 2952 2953 /* set MIN_IB_AVAIL_SIZE */ 2954 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL); 2955 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); 2956 mqd->cp_hqd_ib_control = tmp; 2957 2958 /* activate the queue */ 2959 mqd->cp_hqd_active = 1; 2960 2961 return 0; 2962 } 2963 2964 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring) 2965 { 2966 struct amdgpu_device *adev = ring->adev; 2967 struct v9_mqd *mqd = ring->mqd_ptr; 2968 int j; 2969 2970 /* disable wptr polling */ 2971 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); 2972 2973 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR, 2974 mqd->cp_hqd_eop_base_addr_lo); 2975 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI, 2976 mqd->cp_hqd_eop_base_addr_hi); 2977 2978 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 2979 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL, 2980 mqd->cp_hqd_eop_control); 2981 2982 /* enable doorbell? */ 2983 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 2984 mqd->cp_hqd_pq_doorbell_control); 2985 2986 /* disable the queue if it's active */ 2987 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) { 2988 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1); 2989 for (j = 0; j < adev->usec_timeout; j++) { 2990 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1)) 2991 break; 2992 udelay(1); 2993 } 2994 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 2995 mqd->cp_hqd_dequeue_request); 2996 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 2997 mqd->cp_hqd_pq_rptr); 2998 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 2999 mqd->cp_hqd_pq_wptr_lo); 3000 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 3001 mqd->cp_hqd_pq_wptr_hi); 3002 } 3003 3004 /* set the pointer to the MQD */ 3005 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, 3006 mqd->cp_mqd_base_addr_lo); 3007 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, 3008 mqd->cp_mqd_base_addr_hi); 3009 3010 /* set MQD vmid to 0 */ 3011 WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL, 3012 mqd->cp_mqd_control); 3013 3014 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 3015 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE, 3016 mqd->cp_hqd_pq_base_lo); 3017 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI, 3018 mqd->cp_hqd_pq_base_hi); 3019 3020 /* set up the HQD, this is similar to CP_RB0_CNTL */ 3021 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL, 3022 mqd->cp_hqd_pq_control); 3023 3024 /* set the wb address whether it's enabled or not */ 3025 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR, 3026 mqd->cp_hqd_pq_rptr_report_addr_lo); 3027 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI, 3028 mqd->cp_hqd_pq_rptr_report_addr_hi); 3029 3030 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 3031 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR, 3032 mqd->cp_hqd_pq_wptr_poll_addr_lo); 3033 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, 3034 mqd->cp_hqd_pq_wptr_poll_addr_hi); 3035 3036 /* enable the doorbell if requested */ 3037 if (ring->use_doorbell) { 3038 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER, 3039 (adev->doorbell_index.kiq * 2) << 2); 3040 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER, 3041 (adev->doorbell_index.userqueue_end * 2) << 2); 3042 } 3043 3044 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 3045 mqd->cp_hqd_pq_doorbell_control); 3046 3047 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 3048 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 3049 mqd->cp_hqd_pq_wptr_lo); 3050 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 3051 mqd->cp_hqd_pq_wptr_hi); 3052 3053 /* set the vmid for the queue */ 3054 WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid); 3055 3056 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 3057 mqd->cp_hqd_persistent_state); 3058 3059 /* activate the queue */ 3060 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 3061 mqd->cp_hqd_active); 3062 3063 if (ring->use_doorbell) 3064 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); 3065 3066 return 0; 3067 } 3068 3069 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring) 3070 { 3071 struct amdgpu_device *adev = ring->adev; 3072 int j; 3073 3074 /* disable the queue if it's active */ 3075 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) { 3076 3077 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1); 3078 3079 for (j = 0; j < adev->usec_timeout; j++) { 3080 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1)) 3081 break; 3082 udelay(1); 3083 } 3084 3085 if (j == AMDGPU_MAX_USEC_TIMEOUT) { 3086 DRM_DEBUG("KIQ dequeue request failed.\n"); 3087 3088 /* Manual disable if dequeue request times out */ 3089 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0); 3090 } 3091 3092 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 3093 0); 3094 } 3095 3096 WREG32_SOC15(GC, 0, mmCP_HQD_IQ_TIMER, 0); 3097 WREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL, 0); 3098 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0); 3099 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000); 3100 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0); 3101 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 0); 3102 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0); 3103 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0); 3104 3105 return 0; 3106 } 3107 3108 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) 3109 { 3110 struct amdgpu_device *adev = ring->adev; 3111 struct v9_mqd *mqd = ring->mqd_ptr; 3112 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; 3113 3114 gfx_v9_0_kiq_setting(ring); 3115 3116 if (adev->in_gpu_reset) { /* for GPU_RESET case */ 3117 /* reset MQD to a clean status */ 3118 if (adev->gfx.mec.mqd_backup[mqd_idx]) 3119 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); 3120 3121 /* reset ring buffer */ 3122 ring->wptr = 0; 3123 amdgpu_ring_clear_ring(ring); 3124 3125 mutex_lock(&adev->srbm_mutex); 3126 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3127 gfx_v9_0_kiq_init_register(ring); 3128 soc15_grbm_select(adev, 0, 0, 0, 0); 3129 mutex_unlock(&adev->srbm_mutex); 3130 } else { 3131 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); 3132 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; 3133 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; 3134 mutex_lock(&adev->srbm_mutex); 3135 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3136 gfx_v9_0_mqd_init(ring); 3137 gfx_v9_0_kiq_init_register(ring); 3138 soc15_grbm_select(adev, 0, 0, 0, 0); 3139 mutex_unlock(&adev->srbm_mutex); 3140 3141 if (adev->gfx.mec.mqd_backup[mqd_idx]) 3142 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); 3143 } 3144 3145 return 0; 3146 } 3147 3148 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) 3149 { 3150 struct amdgpu_device *adev = ring->adev; 3151 struct v9_mqd *mqd = ring->mqd_ptr; 3152 int mqd_idx = ring - &adev->gfx.compute_ring[0]; 3153 3154 if (!adev->in_gpu_reset && !adev->in_suspend) { 3155 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); 3156 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; 3157 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; 3158 mutex_lock(&adev->srbm_mutex); 3159 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3160 gfx_v9_0_mqd_init(ring); 3161 soc15_grbm_select(adev, 0, 0, 0, 0); 3162 mutex_unlock(&adev->srbm_mutex); 3163 3164 if (adev->gfx.mec.mqd_backup[mqd_idx]) 3165 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); 3166 } else if (adev->in_gpu_reset) { /* for GPU_RESET case */ 3167 /* reset MQD to a clean status */ 3168 if (adev->gfx.mec.mqd_backup[mqd_idx]) 3169 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); 3170 3171 /* reset ring buffer */ 3172 ring->wptr = 0; 3173 amdgpu_ring_clear_ring(ring); 3174 } else { 3175 amdgpu_ring_clear_ring(ring); 3176 } 3177 3178 return 0; 3179 } 3180 3181 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev) 3182 { 3183 struct amdgpu_ring *ring; 3184 int r; 3185 3186 ring = &adev->gfx.kiq.ring; 3187 3188 r = amdgpu_bo_reserve(ring->mqd_obj, false); 3189 if (unlikely(r != 0)) 3190 return r; 3191 3192 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3193 if (unlikely(r != 0)) 3194 return r; 3195 3196 gfx_v9_0_kiq_init_queue(ring); 3197 amdgpu_bo_kunmap(ring->mqd_obj); 3198 ring->mqd_ptr = NULL; 3199 amdgpu_bo_unreserve(ring->mqd_obj); 3200 ring->sched.ready = true; 3201 return 0; 3202 } 3203 3204 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev) 3205 { 3206 struct amdgpu_ring *ring = NULL; 3207 int r = 0, i; 3208 3209 gfx_v9_0_cp_compute_enable(adev, true); 3210 3211 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3212 ring = &adev->gfx.compute_ring[i]; 3213 3214 r = amdgpu_bo_reserve(ring->mqd_obj, false); 3215 if (unlikely(r != 0)) 3216 goto done; 3217 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3218 if (!r) { 3219 r = gfx_v9_0_kcq_init_queue(ring); 3220 amdgpu_bo_kunmap(ring->mqd_obj); 3221 ring->mqd_ptr = NULL; 3222 } 3223 amdgpu_bo_unreserve(ring->mqd_obj); 3224 if (r) 3225 goto done; 3226 } 3227 3228 r = gfx_v9_0_kiq_kcq_enable(adev); 3229 done: 3230 return r; 3231 } 3232 3233 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) 3234 { 3235 int r, i; 3236 struct amdgpu_ring *ring; 3237 3238 if (!(adev->flags & AMD_IS_APU)) 3239 gfx_v9_0_enable_gui_idle_interrupt(adev, false); 3240 3241 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 3242 /* legacy firmware loading */ 3243 r = gfx_v9_0_cp_gfx_load_microcode(adev); 3244 if (r) 3245 return r; 3246 3247 r = gfx_v9_0_cp_compute_load_microcode(adev); 3248 if (r) 3249 return r; 3250 } 3251 3252 r = gfx_v9_0_kiq_resume(adev); 3253 if (r) 3254 return r; 3255 3256 r = gfx_v9_0_cp_gfx_resume(adev); 3257 if (r) 3258 return r; 3259 3260 r = gfx_v9_0_kcq_resume(adev); 3261 if (r) 3262 return r; 3263 3264 ring = &adev->gfx.gfx_ring[0]; 3265 r = amdgpu_ring_test_helper(ring); 3266 if (r) 3267 return r; 3268 3269 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3270 ring = &adev->gfx.compute_ring[i]; 3271 amdgpu_ring_test_helper(ring); 3272 } 3273 3274 gfx_v9_0_enable_gui_idle_interrupt(adev, true); 3275 3276 return 0; 3277 } 3278 3279 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable) 3280 { 3281 gfx_v9_0_cp_gfx_enable(adev, enable); 3282 gfx_v9_0_cp_compute_enable(adev, enable); 3283 } 3284 3285 static int gfx_v9_0_hw_init(void *handle) 3286 { 3287 int r; 3288 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3289 3290 gfx_v9_0_init_golden_registers(adev); 3291 3292 gfx_v9_0_constants_init(adev); 3293 3294 r = gfx_v9_0_csb_vram_pin(adev); 3295 if (r) 3296 return r; 3297 3298 r = adev->gfx.rlc.funcs->resume(adev); 3299 if (r) 3300 return r; 3301 3302 r = gfx_v9_0_cp_resume(adev); 3303 if (r) 3304 return r; 3305 3306 r = gfx_v9_0_ngg_en(adev); 3307 if (r) 3308 return r; 3309 3310 return r; 3311 } 3312 3313 static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev) 3314 { 3315 int r, i; 3316 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; 3317 3318 r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings); 3319 if (r) 3320 DRM_ERROR("Failed to lock KIQ (%d).\n", r); 3321 3322 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3323 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; 3324 3325 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); 3326 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 3327 PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */ 3328 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | 3329 PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) | 3330 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); 3331 amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 3332 amdgpu_ring_write(kiq_ring, 0); 3333 amdgpu_ring_write(kiq_ring, 0); 3334 amdgpu_ring_write(kiq_ring, 0); 3335 } 3336 r = amdgpu_ring_test_helper(kiq_ring); 3337 if (r) 3338 DRM_ERROR("KCQ disable failed\n"); 3339 3340 return r; 3341 } 3342 3343 static int gfx_v9_0_hw_fini(void *handle) 3344 { 3345 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3346 3347 amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); 3348 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 3349 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 3350 3351 /* disable KCQ to avoid CPC touch memory not valid anymore */ 3352 gfx_v9_0_kcq_disable(adev); 3353 3354 if (amdgpu_sriov_vf(adev)) { 3355 gfx_v9_0_cp_gfx_enable(adev, false); 3356 /* must disable polling for SRIOV when hw finished, otherwise 3357 * CPC engine may still keep fetching WB address which is already 3358 * invalid after sw finished and trigger DMAR reading error in 3359 * hypervisor side. 3360 */ 3361 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); 3362 return 0; 3363 } 3364 3365 /* Use deinitialize sequence from CAIL when unbinding device from driver, 3366 * otherwise KIQ is hanging when binding back 3367 */ 3368 if (!adev->in_gpu_reset && !adev->in_suspend) { 3369 mutex_lock(&adev->srbm_mutex); 3370 soc15_grbm_select(adev, adev->gfx.kiq.ring.me, 3371 adev->gfx.kiq.ring.pipe, 3372 adev->gfx.kiq.ring.queue, 0); 3373 gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring); 3374 soc15_grbm_select(adev, 0, 0, 0, 0); 3375 mutex_unlock(&adev->srbm_mutex); 3376 } 3377 3378 gfx_v9_0_cp_enable(adev, false); 3379 adev->gfx.rlc.funcs->stop(adev); 3380 3381 gfx_v9_0_csb_vram_unpin(adev); 3382 3383 return 0; 3384 } 3385 3386 static int gfx_v9_0_suspend(void *handle) 3387 { 3388 return gfx_v9_0_hw_fini(handle); 3389 } 3390 3391 static int gfx_v9_0_resume(void *handle) 3392 { 3393 return gfx_v9_0_hw_init(handle); 3394 } 3395 3396 static bool gfx_v9_0_is_idle(void *handle) 3397 { 3398 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3399 3400 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS), 3401 GRBM_STATUS, GUI_ACTIVE)) 3402 return false; 3403 else 3404 return true; 3405 } 3406 3407 static int gfx_v9_0_wait_for_idle(void *handle) 3408 { 3409 unsigned i; 3410 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3411 3412 for (i = 0; i < adev->usec_timeout; i++) { 3413 if (gfx_v9_0_is_idle(handle)) 3414 return 0; 3415 udelay(1); 3416 } 3417 return -ETIMEDOUT; 3418 } 3419 3420 static int gfx_v9_0_soft_reset(void *handle) 3421 { 3422 u32 grbm_soft_reset = 0; 3423 u32 tmp; 3424 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3425 3426 /* GRBM_STATUS */ 3427 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS); 3428 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | 3429 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | 3430 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | 3431 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | 3432 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | 3433 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) { 3434 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 3435 GRBM_SOFT_RESET, SOFT_RESET_CP, 1); 3436 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 3437 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); 3438 } 3439 3440 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { 3441 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 3442 GRBM_SOFT_RESET, SOFT_RESET_CP, 1); 3443 } 3444 3445 /* GRBM_STATUS2 */ 3446 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2); 3447 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY)) 3448 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 3449 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 3450 3451 3452 if (grbm_soft_reset) { 3453 /* stop the rlc */ 3454 adev->gfx.rlc.funcs->stop(adev); 3455 3456 /* Disable GFX parsing/prefetching */ 3457 gfx_v9_0_cp_gfx_enable(adev, false); 3458 3459 /* Disable MEC parsing/prefetching */ 3460 gfx_v9_0_cp_compute_enable(adev, false); 3461 3462 if (grbm_soft_reset) { 3463 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); 3464 tmp |= grbm_soft_reset; 3465 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); 3466 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); 3467 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); 3468 3469 udelay(50); 3470 3471 tmp &= ~grbm_soft_reset; 3472 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); 3473 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); 3474 } 3475 3476 /* Wait a little for things to settle down */ 3477 udelay(50); 3478 } 3479 return 0; 3480 } 3481 3482 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) 3483 { 3484 uint64_t clock; 3485 3486 mutex_lock(&adev->gfx.gpu_clock_mutex); 3487 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); 3488 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | 3489 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); 3490 mutex_unlock(&adev->gfx.gpu_clock_mutex); 3491 return clock; 3492 } 3493 3494 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring, 3495 uint32_t vmid, 3496 uint32_t gds_base, uint32_t gds_size, 3497 uint32_t gws_base, uint32_t gws_size, 3498 uint32_t oa_base, uint32_t oa_size) 3499 { 3500 struct amdgpu_device *adev = ring->adev; 3501 3502 /* GDS Base */ 3503 gfx_v9_0_write_data_to_reg(ring, 0, false, 3504 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid, 3505 gds_base); 3506 3507 /* GDS Size */ 3508 gfx_v9_0_write_data_to_reg(ring, 0, false, 3509 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid, 3510 gds_size); 3511 3512 /* GWS */ 3513 gfx_v9_0_write_data_to_reg(ring, 0, false, 3514 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid, 3515 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); 3516 3517 /* OA */ 3518 gfx_v9_0_write_data_to_reg(ring, 0, false, 3519 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid, 3520 (1 << (oa_size + oa_base)) - (1 << oa_base)); 3521 } 3522 3523 static int gfx_v9_0_early_init(void *handle) 3524 { 3525 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3526 3527 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS; 3528 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; 3529 gfx_v9_0_set_ring_funcs(adev); 3530 gfx_v9_0_set_irq_funcs(adev); 3531 gfx_v9_0_set_gds_init(adev); 3532 gfx_v9_0_set_rlc_funcs(adev); 3533 3534 return 0; 3535 } 3536 3537 static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev, 3538 struct amdgpu_iv_entry *entry); 3539 3540 static int gfx_v9_0_ecc_late_init(void *handle) 3541 { 3542 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3543 struct ras_common_if **ras_if = &adev->gfx.ras_if; 3544 struct ras_ih_if ih_info = { 3545 .cb = gfx_v9_0_process_ras_data_cb, 3546 }; 3547 struct ras_fs_if fs_info = { 3548 .sysfs_name = "gfx_err_count", 3549 .debugfs_name = "gfx_err_inject", 3550 }; 3551 struct ras_common_if ras_block = { 3552 .block = AMDGPU_RAS_BLOCK__GFX, 3553 .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, 3554 .sub_block_index = 0, 3555 .name = "gfx", 3556 }; 3557 int r; 3558 3559 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) { 3560 amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0); 3561 return 0; 3562 } 3563 3564 if (*ras_if) 3565 goto resume; 3566 3567 *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL); 3568 if (!*ras_if) 3569 return -ENOMEM; 3570 3571 **ras_if = ras_block; 3572 3573 r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); 3574 if (r) 3575 goto feature; 3576 3577 ih_info.head = **ras_if; 3578 fs_info.head = **ras_if; 3579 3580 r = amdgpu_ras_interrupt_add_handler(adev, &ih_info); 3581 if (r) 3582 goto interrupt; 3583 3584 r = amdgpu_ras_debugfs_create(adev, &fs_info); 3585 if (r) 3586 goto debugfs; 3587 3588 r = amdgpu_ras_sysfs_create(adev, &fs_info); 3589 if (r) 3590 goto sysfs; 3591 resume: 3592 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); 3593 if (r) 3594 goto irq; 3595 3596 return 0; 3597 irq: 3598 amdgpu_ras_sysfs_remove(adev, *ras_if); 3599 sysfs: 3600 amdgpu_ras_debugfs_remove(adev, *ras_if); 3601 debugfs: 3602 amdgpu_ras_interrupt_remove_handler(adev, &ih_info); 3603 interrupt: 3604 amdgpu_ras_feature_enable(adev, *ras_if, 0); 3605 feature: 3606 kfree(*ras_if); 3607 *ras_if = NULL; 3608 return -EINVAL; 3609 } 3610 3611 static int gfx_v9_0_late_init(void *handle) 3612 { 3613 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3614 int r; 3615 3616 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 3617 if (r) 3618 return r; 3619 3620 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 3621 if (r) 3622 return r; 3623 3624 r = gfx_v9_0_ecc_late_init(handle); 3625 if (r) 3626 return r; 3627 3628 return 0; 3629 } 3630 3631 static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev) 3632 { 3633 uint32_t rlc_setting; 3634 3635 /* if RLC is not enabled, do nothing */ 3636 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL); 3637 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) 3638 return false; 3639 3640 return true; 3641 } 3642 3643 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev) 3644 { 3645 uint32_t data; 3646 unsigned i; 3647 3648 data = RLC_SAFE_MODE__CMD_MASK; 3649 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 3650 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); 3651 3652 /* wait for RLC_SAFE_MODE */ 3653 for (i = 0; i < adev->usec_timeout; i++) { 3654 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) 3655 break; 3656 udelay(1); 3657 } 3658 } 3659 3660 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev) 3661 { 3662 uint32_t data; 3663 3664 data = RLC_SAFE_MODE__CMD_MASK; 3665 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); 3666 } 3667 3668 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, 3669 bool enable) 3670 { 3671 amdgpu_gfx_rlc_enter_safe_mode(adev); 3672 3673 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { 3674 gfx_v9_0_enable_gfx_cg_power_gating(adev, true); 3675 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE) 3676 gfx_v9_0_enable_gfx_pipeline_powergating(adev, true); 3677 } else { 3678 gfx_v9_0_enable_gfx_cg_power_gating(adev, false); 3679 gfx_v9_0_enable_gfx_pipeline_powergating(adev, false); 3680 } 3681 3682 amdgpu_gfx_rlc_exit_safe_mode(adev); 3683 } 3684 3685 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev, 3686 bool enable) 3687 { 3688 /* TODO: double check if we need to perform under safe mode */ 3689 /* gfx_v9_0_enter_rlc_safe_mode(adev); */ 3690 3691 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) 3692 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true); 3693 else 3694 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false); 3695 3696 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable) 3697 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true); 3698 else 3699 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false); 3700 3701 /* gfx_v9_0_exit_rlc_safe_mode(adev); */ 3702 } 3703 3704 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 3705 bool enable) 3706 { 3707 uint32_t data, def; 3708 3709 amdgpu_gfx_rlc_enter_safe_mode(adev); 3710 3711 /* It is disabled by HW by default */ 3712 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { 3713 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 3714 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3715 3716 if (adev->asic_type != CHIP_VEGA12) 3717 data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK; 3718 3719 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 3720 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | 3721 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); 3722 3723 /* only for Vega10 & Raven1 */ 3724 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK; 3725 3726 if (def != data) 3727 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); 3728 3729 /* MGLS is a global flag to control all MGLS in GFX */ 3730 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { 3731 /* 2 - RLC memory Light sleep */ 3732 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { 3733 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); 3734 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; 3735 if (def != data) 3736 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data); 3737 } 3738 /* 3 - CP memory Light sleep */ 3739 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { 3740 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); 3741 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 3742 if (def != data) 3743 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); 3744 } 3745 } 3746 } else { 3747 /* 1 - MGCG_OVERRIDE */ 3748 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3749 3750 if (adev->asic_type != CHIP_VEGA12) 3751 data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK; 3752 3753 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 3754 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 3755 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | 3756 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); 3757 3758 if (def != data) 3759 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); 3760 3761 /* 2 - disable MGLS in RLC */ 3762 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); 3763 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { 3764 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; 3765 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data); 3766 } 3767 3768 /* 3 - disable MGLS in CP */ 3769 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); 3770 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { 3771 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 3772 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); 3773 } 3774 } 3775 3776 amdgpu_gfx_rlc_exit_safe_mode(adev); 3777 } 3778 3779 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, 3780 bool enable) 3781 { 3782 uint32_t data, def; 3783 3784 amdgpu_gfx_rlc_enter_safe_mode(adev); 3785 3786 /* Enable 3D CGCG/CGLS */ 3787 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) { 3788 /* write cmd to clear cgcg/cgls ov */ 3789 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3790 /* unset CGCG override */ 3791 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK; 3792 /* update CGCG and CGLS override bits */ 3793 if (def != data) 3794 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); 3795 3796 /* enable 3Dcgcg FSM(0x0000363f) */ 3797 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); 3798 3799 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 3800 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 3801 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 3802 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 3803 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 3804 if (def != data) 3805 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data); 3806 3807 /* set IDLE_POLL_COUNT(0x00900100) */ 3808 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL); 3809 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 3810 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 3811 if (def != data) 3812 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data); 3813 } else { 3814 /* Disable CGCG/CGLS */ 3815 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); 3816 /* disable cgcg, cgls should be disabled */ 3817 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK | 3818 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK); 3819 /* disable cgcg and cgls in FSM */ 3820 if (def != data) 3821 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data); 3822 } 3823 3824 amdgpu_gfx_rlc_exit_safe_mode(adev); 3825 } 3826 3827 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, 3828 bool enable) 3829 { 3830 uint32_t def, data; 3831 3832 amdgpu_gfx_rlc_enter_safe_mode(adev); 3833 3834 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { 3835 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3836 /* unset CGCG override */ 3837 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; 3838 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 3839 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 3840 else 3841 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 3842 /* update CGCG and CGLS override bits */ 3843 if (def != data) 3844 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); 3845 3846 /* enable cgcg FSM(0x0000363F) */ 3847 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); 3848 3849 data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 3850 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 3851 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 3852 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 3853 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 3854 if (def != data) 3855 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data); 3856 3857 /* set IDLE_POLL_COUNT(0x00900100) */ 3858 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL); 3859 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 3860 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 3861 if (def != data) 3862 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data); 3863 } else { 3864 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); 3865 /* reset CGCG/CGLS bits */ 3866 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); 3867 /* disable cgcg and cgls in FSM */ 3868 if (def != data) 3869 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data); 3870 } 3871 3872 amdgpu_gfx_rlc_exit_safe_mode(adev); 3873 } 3874 3875 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, 3876 bool enable) 3877 { 3878 if (enable) { 3879 /* CGCG/CGLS should be enabled after MGCG/MGLS 3880 * === MGCG + MGLS === 3881 */ 3882 gfx_v9_0_update_medium_grain_clock_gating(adev, enable); 3883 /* === CGCG /CGLS for GFX 3D Only === */ 3884 gfx_v9_0_update_3d_clock_gating(adev, enable); 3885 /* === CGCG + CGLS === */ 3886 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable); 3887 } else { 3888 /* CGCG/CGLS should be disabled before MGCG/MGLS 3889 * === CGCG + CGLS === 3890 */ 3891 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable); 3892 /* === CGCG /CGLS for GFX 3D Only === */ 3893 gfx_v9_0_update_3d_clock_gating(adev, enable); 3894 /* === MGCG + MGLS === */ 3895 gfx_v9_0_update_medium_grain_clock_gating(adev, enable); 3896 } 3897 return 0; 3898 } 3899 3900 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { 3901 .is_rlc_enabled = gfx_v9_0_is_rlc_enabled, 3902 .set_safe_mode = gfx_v9_0_set_safe_mode, 3903 .unset_safe_mode = gfx_v9_0_unset_safe_mode, 3904 .init = gfx_v9_0_rlc_init, 3905 .get_csb_size = gfx_v9_0_get_csb_size, 3906 .get_csb_buffer = gfx_v9_0_get_csb_buffer, 3907 .get_cp_table_num = gfx_v9_0_cp_jump_table_num, 3908 .resume = gfx_v9_0_rlc_resume, 3909 .stop = gfx_v9_0_rlc_stop, 3910 .reset = gfx_v9_0_rlc_reset, 3911 .start = gfx_v9_0_rlc_start 3912 }; 3913 3914 static int gfx_v9_0_set_powergating_state(void *handle, 3915 enum amd_powergating_state state) 3916 { 3917 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3918 bool enable = (state == AMD_PG_STATE_GATE) ? true : false; 3919 3920 switch (adev->asic_type) { 3921 case CHIP_RAVEN: 3922 if (!enable) { 3923 amdgpu_gfx_off_ctrl(adev, false); 3924 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); 3925 } 3926 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { 3927 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true); 3928 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true); 3929 } else { 3930 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false); 3931 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false); 3932 } 3933 3934 if (adev->pg_flags & AMD_PG_SUPPORT_CP) 3935 gfx_v9_0_enable_cp_power_gating(adev, true); 3936 else 3937 gfx_v9_0_enable_cp_power_gating(adev, false); 3938 3939 /* update gfx cgpg state */ 3940 gfx_v9_0_update_gfx_cg_power_gating(adev, enable); 3941 3942 /* update mgcg state */ 3943 gfx_v9_0_update_gfx_mg_power_gating(adev, enable); 3944 3945 if (enable) 3946 amdgpu_gfx_off_ctrl(adev, true); 3947 break; 3948 case CHIP_VEGA12: 3949 if (!enable) { 3950 amdgpu_gfx_off_ctrl(adev, false); 3951 cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); 3952 } else { 3953 amdgpu_gfx_off_ctrl(adev, true); 3954 } 3955 break; 3956 default: 3957 break; 3958 } 3959 3960 return 0; 3961 } 3962 3963 static int gfx_v9_0_set_clockgating_state(void *handle, 3964 enum amd_clockgating_state state) 3965 { 3966 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3967 3968 if (amdgpu_sriov_vf(adev)) 3969 return 0; 3970 3971 switch (adev->asic_type) { 3972 case CHIP_VEGA10: 3973 case CHIP_VEGA12: 3974 case CHIP_VEGA20: 3975 case CHIP_RAVEN: 3976 gfx_v9_0_update_gfx_clock_gating(adev, 3977 state == AMD_CG_STATE_GATE ? true : false); 3978 break; 3979 default: 3980 break; 3981 } 3982 return 0; 3983 } 3984 3985 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags) 3986 { 3987 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3988 int data; 3989 3990 if (amdgpu_sriov_vf(adev)) 3991 *flags = 0; 3992 3993 /* AMD_CG_SUPPORT_GFX_MGCG */ 3994 data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3995 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) 3996 *flags |= AMD_CG_SUPPORT_GFX_MGCG; 3997 3998 /* AMD_CG_SUPPORT_GFX_CGCG */ 3999 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); 4000 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) 4001 *flags |= AMD_CG_SUPPORT_GFX_CGCG; 4002 4003 /* AMD_CG_SUPPORT_GFX_CGLS */ 4004 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) 4005 *flags |= AMD_CG_SUPPORT_GFX_CGLS; 4006 4007 /* AMD_CG_SUPPORT_GFX_RLC_LS */ 4008 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); 4009 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) 4010 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; 4011 4012 /* AMD_CG_SUPPORT_GFX_CP_LS */ 4013 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); 4014 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) 4015 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; 4016 4017 /* AMD_CG_SUPPORT_GFX_3D_CGCG */ 4018 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); 4019 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) 4020 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; 4021 4022 /* AMD_CG_SUPPORT_GFX_3D_CGLS */ 4023 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK) 4024 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS; 4025 } 4026 4027 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) 4028 { 4029 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/ 4030 } 4031 4032 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) 4033 { 4034 struct amdgpu_device *adev = ring->adev; 4035 u64 wptr; 4036 4037 /* XXX check if swapping is necessary on BE */ 4038 if (ring->use_doorbell) { 4039 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]); 4040 } else { 4041 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR); 4042 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32; 4043 } 4044 4045 return wptr; 4046 } 4047 4048 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) 4049 { 4050 struct amdgpu_device *adev = ring->adev; 4051 4052 if (ring->use_doorbell) { 4053 /* XXX check if swapping is necessary on BE */ 4054 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr); 4055 WDOORBELL64(ring->doorbell_index, ring->wptr); 4056 } else { 4057 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); 4058 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); 4059 } 4060 } 4061 4062 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 4063 { 4064 struct amdgpu_device *adev = ring->adev; 4065 u32 ref_and_mask, reg_mem_engine; 4066 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; 4067 4068 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 4069 switch (ring->me) { 4070 case 1: 4071 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; 4072 break; 4073 case 2: 4074 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; 4075 break; 4076 default: 4077 return; 4078 } 4079 reg_mem_engine = 0; 4080 } else { 4081 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; 4082 reg_mem_engine = 1; /* pfp */ 4083 } 4084 4085 gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, 4086 adev->nbio_funcs->get_hdp_flush_req_offset(adev), 4087 adev->nbio_funcs->get_hdp_flush_done_offset(adev), 4088 ref_and_mask, ref_and_mask, 0x20); 4089 } 4090 4091 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 4092 struct amdgpu_job *job, 4093 struct amdgpu_ib *ib, 4094 uint32_t flags) 4095 { 4096 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 4097 u32 header, control = 0; 4098 4099 if (ib->flags & AMDGPU_IB_FLAG_CE) 4100 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2); 4101 else 4102 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 4103 4104 control |= ib->length_dw | (vmid << 24); 4105 4106 if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { 4107 control |= INDIRECT_BUFFER_PRE_ENB(1); 4108 4109 if (!(ib->flags & AMDGPU_IB_FLAG_CE)) 4110 gfx_v9_0_ring_emit_de_meta(ring); 4111 } 4112 4113 amdgpu_ring_write(ring, header); 4114 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 4115 amdgpu_ring_write(ring, 4116 #ifdef __BIG_ENDIAN 4117 (2 << 0) | 4118 #endif 4119 lower_32_bits(ib->gpu_addr)); 4120 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 4121 amdgpu_ring_write(ring, control); 4122 } 4123 4124 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 4125 struct amdgpu_job *job, 4126 struct amdgpu_ib *ib, 4127 uint32_t flags) 4128 { 4129 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 4130 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 4131 4132 /* Currently, there is a high possibility to get wave ID mismatch 4133 * between ME and GDS, leading to a hw deadlock, because ME generates 4134 * different wave IDs than the GDS expects. This situation happens 4135 * randomly when at least 5 compute pipes use GDS ordered append. 4136 * The wave IDs generated by ME are also wrong after suspend/resume. 4137 * Those are probably bugs somewhere else in the kernel driver. 4138 * 4139 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and 4140 * GDS to 0 for this ring (me/pipe). 4141 */ 4142 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { 4143 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 4144 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID); 4145 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); 4146 } 4147 4148 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 4149 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 4150 amdgpu_ring_write(ring, 4151 #ifdef __BIG_ENDIAN 4152 (2 << 0) | 4153 #endif 4154 lower_32_bits(ib->gpu_addr)); 4155 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 4156 amdgpu_ring_write(ring, control); 4157 } 4158 4159 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 4160 u64 seq, unsigned flags) 4161 { 4162 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 4163 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 4164 bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY; 4165 4166 /* RELEASE_MEM - flush caches, send int */ 4167 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 4168 amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN | 4169 EOP_TC_NC_ACTION_EN) : 4170 (EOP_TCL1_ACTION_EN | 4171 EOP_TC_ACTION_EN | 4172 EOP_TC_WB_ACTION_EN | 4173 EOP_TC_MD_ACTION_EN)) | 4174 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 4175 EVENT_INDEX(5))); 4176 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 4177 4178 /* 4179 * the address should be Qword aligned if 64bit write, Dword 4180 * aligned if only send 32bit data low (discard data high) 4181 */ 4182 if (write64bit) 4183 BUG_ON(addr & 0x7); 4184 else 4185 BUG_ON(addr & 0x3); 4186 amdgpu_ring_write(ring, lower_32_bits(addr)); 4187 amdgpu_ring_write(ring, upper_32_bits(addr)); 4188 amdgpu_ring_write(ring, lower_32_bits(seq)); 4189 amdgpu_ring_write(ring, upper_32_bits(seq)); 4190 amdgpu_ring_write(ring, 0); 4191 } 4192 4193 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 4194 { 4195 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 4196 uint32_t seq = ring->fence_drv.sync_seq; 4197 uint64_t addr = ring->fence_drv.gpu_addr; 4198 4199 gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0, 4200 lower_32_bits(addr), upper_32_bits(addr), 4201 seq, 0xffffffff, 4); 4202 } 4203 4204 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 4205 unsigned vmid, uint64_t pd_addr) 4206 { 4207 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 4208 4209 /* compute doesn't have PFP */ 4210 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) { 4211 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 4212 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 4213 amdgpu_ring_write(ring, 0x0); 4214 } 4215 } 4216 4217 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring) 4218 { 4219 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */ 4220 } 4221 4222 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring) 4223 { 4224 u64 wptr; 4225 4226 /* XXX check if swapping is necessary on BE */ 4227 if (ring->use_doorbell) 4228 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]); 4229 else 4230 BUG(); 4231 return wptr; 4232 } 4233 4234 static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring, 4235 bool acquire) 4236 { 4237 struct amdgpu_device *adev = ring->adev; 4238 int pipe_num, tmp, reg; 4239 int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1; 4240 4241 pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe; 4242 4243 /* first me only has 2 entries, GFX and HP3D */ 4244 if (ring->me > 0) 4245 pipe_num -= 2; 4246 4247 reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num; 4248 tmp = RREG32(reg); 4249 tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent); 4250 WREG32(reg, tmp); 4251 } 4252 4253 static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev, 4254 struct amdgpu_ring *ring, 4255 bool acquire) 4256 { 4257 int i, pipe; 4258 bool reserve; 4259 struct amdgpu_ring *iring; 4260 4261 mutex_lock(&adev->gfx.pipe_reserve_mutex); 4262 pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0); 4263 if (acquire) 4264 set_bit(pipe, adev->gfx.pipe_reserve_bitmap); 4265 else 4266 clear_bit(pipe, adev->gfx.pipe_reserve_bitmap); 4267 4268 if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) { 4269 /* Clear all reservations - everyone reacquires all resources */ 4270 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) 4271 gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i], 4272 true); 4273 4274 for (i = 0; i < adev->gfx.num_compute_rings; ++i) 4275 gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i], 4276 true); 4277 } else { 4278 /* Lower all pipes without a current reservation */ 4279 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { 4280 iring = &adev->gfx.gfx_ring[i]; 4281 pipe = amdgpu_gfx_queue_to_bit(adev, 4282 iring->me, 4283 iring->pipe, 4284 0); 4285 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); 4286 gfx_v9_0_ring_set_pipe_percent(iring, reserve); 4287 } 4288 4289 for (i = 0; i < adev->gfx.num_compute_rings; ++i) { 4290 iring = &adev->gfx.compute_ring[i]; 4291 pipe = amdgpu_gfx_queue_to_bit(adev, 4292 iring->me, 4293 iring->pipe, 4294 0); 4295 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); 4296 gfx_v9_0_ring_set_pipe_percent(iring, reserve); 4297 } 4298 } 4299 4300 mutex_unlock(&adev->gfx.pipe_reserve_mutex); 4301 } 4302 4303 static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev, 4304 struct amdgpu_ring *ring, 4305 bool acquire) 4306 { 4307 uint32_t pipe_priority = acquire ? 0x2 : 0x0; 4308 uint32_t queue_priority = acquire ? 0xf : 0x0; 4309 4310 mutex_lock(&adev->srbm_mutex); 4311 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4312 4313 WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority); 4314 WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority); 4315 4316 soc15_grbm_select(adev, 0, 0, 0, 0); 4317 mutex_unlock(&adev->srbm_mutex); 4318 } 4319 4320 static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring, 4321 enum drm_sched_priority priority) 4322 { 4323 struct amdgpu_device *adev = ring->adev; 4324 bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW; 4325 4326 if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) 4327 return; 4328 4329 gfx_v9_0_hqd_set_priority(adev, ring, acquire); 4330 gfx_v9_0_pipe_reserve_resources(adev, ring, acquire); 4331 } 4332 4333 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring) 4334 { 4335 struct amdgpu_device *adev = ring->adev; 4336 4337 /* XXX check if swapping is necessary on BE */ 4338 if (ring->use_doorbell) { 4339 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr); 4340 WDOORBELL64(ring->doorbell_index, ring->wptr); 4341 } else{ 4342 BUG(); /* only DOORBELL method supported on gfx9 now */ 4343 } 4344 } 4345 4346 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, 4347 u64 seq, unsigned int flags) 4348 { 4349 struct amdgpu_device *adev = ring->adev; 4350 4351 /* we only allocate 32bit for each seq wb address */ 4352 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 4353 4354 /* write fence seq to the "addr" */ 4355 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4356 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 4357 WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); 4358 amdgpu_ring_write(ring, lower_32_bits(addr)); 4359 amdgpu_ring_write(ring, upper_32_bits(addr)); 4360 amdgpu_ring_write(ring, lower_32_bits(seq)); 4361 4362 if (flags & AMDGPU_FENCE_FLAG_INT) { 4363 /* set register to trigger INT */ 4364 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4365 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 4366 WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); 4367 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS)); 4368 amdgpu_ring_write(ring, 0); 4369 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ 4370 } 4371 } 4372 4373 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring) 4374 { 4375 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 4376 amdgpu_ring_write(ring, 0); 4377 } 4378 4379 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring) 4380 { 4381 struct v9_ce_ib_state ce_payload = {0}; 4382 uint64_t csa_addr; 4383 int cnt; 4384 4385 cnt = (sizeof(ce_payload) >> 2) + 4 - 2; 4386 csa_addr = amdgpu_csa_vaddr(ring->adev); 4387 4388 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); 4389 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | 4390 WRITE_DATA_DST_SEL(8) | 4391 WR_CONFIRM) | 4392 WRITE_DATA_CACHE_POLICY(0)); 4393 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload))); 4394 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload))); 4395 amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2); 4396 } 4397 4398 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring) 4399 { 4400 struct v9_de_ib_state de_payload = {0}; 4401 uint64_t csa_addr, gds_addr; 4402 int cnt; 4403 4404 csa_addr = amdgpu_csa_vaddr(ring->adev); 4405 gds_addr = csa_addr + 4096; 4406 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); 4407 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); 4408 4409 cnt = (sizeof(de_payload) >> 2) + 4 - 2; 4410 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); 4411 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | 4412 WRITE_DATA_DST_SEL(8) | 4413 WR_CONFIRM) | 4414 WRITE_DATA_CACHE_POLICY(0)); 4415 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload))); 4416 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload))); 4417 amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2); 4418 } 4419 4420 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start) 4421 { 4422 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); 4423 amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */ 4424 } 4425 4426 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) 4427 { 4428 uint32_t dw2 = 0; 4429 4430 if (amdgpu_sriov_vf(ring->adev)) 4431 gfx_v9_0_ring_emit_ce_meta(ring); 4432 4433 gfx_v9_0_ring_emit_tmz(ring, true); 4434 4435 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ 4436 if (flags & AMDGPU_HAVE_CTX_SWITCH) { 4437 /* set load_global_config & load_global_uconfig */ 4438 dw2 |= 0x8001; 4439 /* set load_cs_sh_regs */ 4440 dw2 |= 0x01000000; 4441 /* set load_per_context_state & load_gfx_sh_regs for GFX */ 4442 dw2 |= 0x10002; 4443 4444 /* set load_ce_ram if preamble presented */ 4445 if (AMDGPU_PREAMBLE_IB_PRESENT & flags) 4446 dw2 |= 0x10000000; 4447 } else { 4448 /* still load_ce_ram if this is the first time preamble presented 4449 * although there is no context switch happens. 4450 */ 4451 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags) 4452 dw2 |= 0x10000000; 4453 } 4454 4455 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 4456 amdgpu_ring_write(ring, dw2); 4457 amdgpu_ring_write(ring, 0); 4458 } 4459 4460 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring) 4461 { 4462 unsigned ret; 4463 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3)); 4464 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)); 4465 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr)); 4466 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */ 4467 ret = ring->wptr & ring->buf_mask; 4468 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */ 4469 return ret; 4470 } 4471 4472 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset) 4473 { 4474 unsigned cur; 4475 BUG_ON(offset > ring->buf_mask); 4476 BUG_ON(ring->ring[offset] != 0x55aa55aa); 4477 4478 cur = (ring->wptr & ring->buf_mask) - 1; 4479 if (likely(cur > offset)) 4480 ring->ring[offset] = cur - offset; 4481 else 4482 ring->ring[offset] = (ring->ring_size>>2) - offset + cur; 4483 } 4484 4485 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) 4486 { 4487 struct amdgpu_device *adev = ring->adev; 4488 4489 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 4490 amdgpu_ring_write(ring, 0 | /* src: register*/ 4491 (5 << 8) | /* dst: memory */ 4492 (1 << 20)); /* write confirm */ 4493 amdgpu_ring_write(ring, reg); 4494 amdgpu_ring_write(ring, 0); 4495 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 4496 adev->virt.reg_val_offs * 4)); 4497 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 4498 adev->virt.reg_val_offs * 4)); 4499 } 4500 4501 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 4502 uint32_t val) 4503 { 4504 uint32_t cmd = 0; 4505 4506 switch (ring->funcs->type) { 4507 case AMDGPU_RING_TYPE_GFX: 4508 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; 4509 break; 4510 case AMDGPU_RING_TYPE_KIQ: 4511 cmd = (1 << 16); /* no inc addr */ 4512 break; 4513 default: 4514 cmd = WR_CONFIRM; 4515 break; 4516 } 4517 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4518 amdgpu_ring_write(ring, cmd); 4519 amdgpu_ring_write(ring, reg); 4520 amdgpu_ring_write(ring, 0); 4521 amdgpu_ring_write(ring, val); 4522 } 4523 4524 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 4525 uint32_t val, uint32_t mask) 4526 { 4527 gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 4528 } 4529 4530 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 4531 uint32_t reg0, uint32_t reg1, 4532 uint32_t ref, uint32_t mask) 4533 { 4534 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 4535 struct amdgpu_device *adev = ring->adev; 4536 bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ? 4537 adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait; 4538 4539 if (fw_version_ok) 4540 gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, 4541 ref, mask, 0x20); 4542 else 4543 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1, 4544 ref, mask); 4545 } 4546 4547 static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid) 4548 { 4549 struct amdgpu_device *adev = ring->adev; 4550 uint32_t value = 0; 4551 4552 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); 4553 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 4554 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 4555 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 4556 WREG32(mmSQ_CMD, value); 4557 } 4558 4559 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 4560 enum amdgpu_interrupt_state state) 4561 { 4562 switch (state) { 4563 case AMDGPU_IRQ_STATE_DISABLE: 4564 case AMDGPU_IRQ_STATE_ENABLE: 4565 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, 4566 TIME_STAMP_INT_ENABLE, 4567 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4568 break; 4569 default: 4570 break; 4571 } 4572 } 4573 4574 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, 4575 int me, int pipe, 4576 enum amdgpu_interrupt_state state) 4577 { 4578 u32 mec_int_cntl, mec_int_cntl_reg; 4579 4580 /* 4581 * amdgpu controls only the first MEC. That's why this function only 4582 * handles the setting of interrupts for this specific MEC. All other 4583 * pipes' interrupts are set by amdkfd. 4584 */ 4585 4586 if (me == 1) { 4587 switch (pipe) { 4588 case 0: 4589 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL); 4590 break; 4591 case 1: 4592 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL); 4593 break; 4594 case 2: 4595 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL); 4596 break; 4597 case 3: 4598 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL); 4599 break; 4600 default: 4601 DRM_DEBUG("invalid pipe %d\n", pipe); 4602 return; 4603 } 4604 } else { 4605 DRM_DEBUG("invalid me %d\n", me); 4606 return; 4607 } 4608 4609 switch (state) { 4610 case AMDGPU_IRQ_STATE_DISABLE: 4611 mec_int_cntl = RREG32(mec_int_cntl_reg); 4612 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4613 TIME_STAMP_INT_ENABLE, 0); 4614 WREG32(mec_int_cntl_reg, mec_int_cntl); 4615 break; 4616 case AMDGPU_IRQ_STATE_ENABLE: 4617 mec_int_cntl = RREG32(mec_int_cntl_reg); 4618 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4619 TIME_STAMP_INT_ENABLE, 1); 4620 WREG32(mec_int_cntl_reg, mec_int_cntl); 4621 break; 4622 default: 4623 break; 4624 } 4625 } 4626 4627 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev, 4628 struct amdgpu_irq_src *source, 4629 unsigned type, 4630 enum amdgpu_interrupt_state state) 4631 { 4632 switch (state) { 4633 case AMDGPU_IRQ_STATE_DISABLE: 4634 case AMDGPU_IRQ_STATE_ENABLE: 4635 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, 4636 PRIV_REG_INT_ENABLE, 4637 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4638 break; 4639 default: 4640 break; 4641 } 4642 4643 return 0; 4644 } 4645 4646 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev, 4647 struct amdgpu_irq_src *source, 4648 unsigned type, 4649 enum amdgpu_interrupt_state state) 4650 { 4651 switch (state) { 4652 case AMDGPU_IRQ_STATE_DISABLE: 4653 case AMDGPU_IRQ_STATE_ENABLE: 4654 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, 4655 PRIV_INSTR_INT_ENABLE, 4656 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4657 default: 4658 break; 4659 } 4660 4661 return 0; 4662 } 4663 4664 #define ENABLE_ECC_ON_ME_PIPE(me, pipe) \ 4665 WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\ 4666 CP_ECC_ERROR_INT_ENABLE, 1) 4667 4668 #define DISABLE_ECC_ON_ME_PIPE(me, pipe) \ 4669 WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\ 4670 CP_ECC_ERROR_INT_ENABLE, 0) 4671 4672 static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev, 4673 struct amdgpu_irq_src *source, 4674 unsigned type, 4675 enum amdgpu_interrupt_state state) 4676 { 4677 switch (state) { 4678 case AMDGPU_IRQ_STATE_DISABLE: 4679 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, 4680 CP_ECC_ERROR_INT_ENABLE, 0); 4681 DISABLE_ECC_ON_ME_PIPE(1, 0); 4682 DISABLE_ECC_ON_ME_PIPE(1, 1); 4683 DISABLE_ECC_ON_ME_PIPE(1, 2); 4684 DISABLE_ECC_ON_ME_PIPE(1, 3); 4685 break; 4686 4687 case AMDGPU_IRQ_STATE_ENABLE: 4688 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, 4689 CP_ECC_ERROR_INT_ENABLE, 1); 4690 ENABLE_ECC_ON_ME_PIPE(1, 0); 4691 ENABLE_ECC_ON_ME_PIPE(1, 1); 4692 ENABLE_ECC_ON_ME_PIPE(1, 2); 4693 ENABLE_ECC_ON_ME_PIPE(1, 3); 4694 break; 4695 default: 4696 break; 4697 } 4698 4699 return 0; 4700 } 4701 4702 4703 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev, 4704 struct amdgpu_irq_src *src, 4705 unsigned type, 4706 enum amdgpu_interrupt_state state) 4707 { 4708 switch (type) { 4709 case AMDGPU_CP_IRQ_GFX_EOP: 4710 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state); 4711 break; 4712 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 4713 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state); 4714 break; 4715 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 4716 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state); 4717 break; 4718 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 4719 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state); 4720 break; 4721 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 4722 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state); 4723 break; 4724 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: 4725 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state); 4726 break; 4727 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: 4728 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state); 4729 break; 4730 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: 4731 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state); 4732 break; 4733 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: 4734 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state); 4735 break; 4736 default: 4737 break; 4738 } 4739 return 0; 4740 } 4741 4742 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev, 4743 struct amdgpu_irq_src *source, 4744 struct amdgpu_iv_entry *entry) 4745 { 4746 int i; 4747 u8 me_id, pipe_id, queue_id; 4748 struct amdgpu_ring *ring; 4749 4750 DRM_DEBUG("IH: CP EOP\n"); 4751 me_id = (entry->ring_id & 0x0c) >> 2; 4752 pipe_id = (entry->ring_id & 0x03) >> 0; 4753 queue_id = (entry->ring_id & 0x70) >> 4; 4754 4755 switch (me_id) { 4756 case 0: 4757 amdgpu_fence_process(&adev->gfx.gfx_ring[0]); 4758 break; 4759 case 1: 4760 case 2: 4761 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4762 ring = &adev->gfx.compute_ring[i]; 4763 /* Per-queue interrupt is supported for MEC starting from VI. 4764 * The interrupt can only be enabled/disabled per pipe instead of per queue. 4765 */ 4766 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id)) 4767 amdgpu_fence_process(ring); 4768 } 4769 break; 4770 } 4771 return 0; 4772 } 4773 4774 static void gfx_v9_0_fault(struct amdgpu_device *adev, 4775 struct amdgpu_iv_entry *entry) 4776 { 4777 u8 me_id, pipe_id, queue_id; 4778 struct amdgpu_ring *ring; 4779 int i; 4780 4781 me_id = (entry->ring_id & 0x0c) >> 2; 4782 pipe_id = (entry->ring_id & 0x03) >> 0; 4783 queue_id = (entry->ring_id & 0x70) >> 4; 4784 4785 switch (me_id) { 4786 case 0: 4787 drm_sched_fault(&adev->gfx.gfx_ring[0].sched); 4788 break; 4789 case 1: 4790 case 2: 4791 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4792 ring = &adev->gfx.compute_ring[i]; 4793 if (ring->me == me_id && ring->pipe == pipe_id && 4794 ring->queue == queue_id) 4795 drm_sched_fault(&ring->sched); 4796 } 4797 break; 4798 } 4799 } 4800 4801 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev, 4802 struct amdgpu_irq_src *source, 4803 struct amdgpu_iv_entry *entry) 4804 { 4805 DRM_ERROR("Illegal register access in command stream\n"); 4806 gfx_v9_0_fault(adev, entry); 4807 return 0; 4808 } 4809 4810 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev, 4811 struct amdgpu_irq_src *source, 4812 struct amdgpu_iv_entry *entry) 4813 { 4814 DRM_ERROR("Illegal instruction in command stream\n"); 4815 gfx_v9_0_fault(adev, entry); 4816 return 0; 4817 } 4818 4819 static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev, 4820 struct amdgpu_iv_entry *entry) 4821 { 4822 /* TODO ue will trigger an interrupt. */ 4823 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); 4824 amdgpu_ras_reset_gpu(adev, 0); 4825 return AMDGPU_RAS_UE; 4826 } 4827 4828 static int gfx_v9_0_cp_ecc_error_irq(struct amdgpu_device *adev, 4829 struct amdgpu_irq_src *source, 4830 struct amdgpu_iv_entry *entry) 4831 { 4832 struct ras_common_if *ras_if = adev->gfx.ras_if; 4833 struct ras_dispatch_if ih_data = { 4834 .entry = entry, 4835 }; 4836 4837 if (!ras_if) 4838 return 0; 4839 4840 ih_data.head = *ras_if; 4841 4842 DRM_ERROR("CP ECC ERROR IRQ\n"); 4843 amdgpu_ras_interrupt_dispatch(adev, &ih_data); 4844 return 0; 4845 } 4846 4847 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = { 4848 .name = "gfx_v9_0", 4849 .early_init = gfx_v9_0_early_init, 4850 .late_init = gfx_v9_0_late_init, 4851 .sw_init = gfx_v9_0_sw_init, 4852 .sw_fini = gfx_v9_0_sw_fini, 4853 .hw_init = gfx_v9_0_hw_init, 4854 .hw_fini = gfx_v9_0_hw_fini, 4855 .suspend = gfx_v9_0_suspend, 4856 .resume = gfx_v9_0_resume, 4857 .is_idle = gfx_v9_0_is_idle, 4858 .wait_for_idle = gfx_v9_0_wait_for_idle, 4859 .soft_reset = gfx_v9_0_soft_reset, 4860 .set_clockgating_state = gfx_v9_0_set_clockgating_state, 4861 .set_powergating_state = gfx_v9_0_set_powergating_state, 4862 .get_clockgating_state = gfx_v9_0_get_clockgating_state, 4863 }; 4864 4865 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = { 4866 .type = AMDGPU_RING_TYPE_GFX, 4867 .align_mask = 0xff, 4868 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4869 .support_64bit_ptrs = true, 4870 .vmhub = AMDGPU_GFXHUB, 4871 .get_rptr = gfx_v9_0_ring_get_rptr_gfx, 4872 .get_wptr = gfx_v9_0_ring_get_wptr_gfx, 4873 .set_wptr = gfx_v9_0_ring_set_wptr_gfx, 4874 .emit_frame_size = /* totally 242 maximum if 16 IBs */ 4875 5 + /* COND_EXEC */ 4876 7 + /* PIPELINE_SYNC */ 4877 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4878 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4879 2 + /* VM_FLUSH */ 4880 8 + /* FENCE for VM_FLUSH */ 4881 20 + /* GDS switch */ 4882 4 + /* double SWITCH_BUFFER, 4883 the first COND_EXEC jump to the place just 4884 prior to this double SWITCH_BUFFER */ 4885 5 + /* COND_EXEC */ 4886 7 + /* HDP_flush */ 4887 4 + /* VGT_flush */ 4888 14 + /* CE_META */ 4889 31 + /* DE_META */ 4890 3 + /* CNTX_CTRL */ 4891 5 + /* HDP_INVL */ 4892 8 + 8 + /* FENCE x2 */ 4893 2, /* SWITCH_BUFFER */ 4894 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */ 4895 .emit_ib = gfx_v9_0_ring_emit_ib_gfx, 4896 .emit_fence = gfx_v9_0_ring_emit_fence, 4897 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync, 4898 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush, 4899 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch, 4900 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, 4901 .test_ring = gfx_v9_0_ring_test_ring, 4902 .test_ib = gfx_v9_0_ring_test_ib, 4903 .insert_nop = amdgpu_ring_insert_nop, 4904 .pad_ib = amdgpu_ring_generic_pad_ib, 4905 .emit_switch_buffer = gfx_v9_ring_emit_sb, 4906 .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl, 4907 .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec, 4908 .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec, 4909 .emit_tmz = gfx_v9_0_ring_emit_tmz, 4910 .emit_wreg = gfx_v9_0_ring_emit_wreg, 4911 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, 4912 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, 4913 .soft_recovery = gfx_v9_0_ring_soft_recovery, 4914 }; 4915 4916 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { 4917 .type = AMDGPU_RING_TYPE_COMPUTE, 4918 .align_mask = 0xff, 4919 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4920 .support_64bit_ptrs = true, 4921 .vmhub = AMDGPU_GFXHUB, 4922 .get_rptr = gfx_v9_0_ring_get_rptr_compute, 4923 .get_wptr = gfx_v9_0_ring_get_wptr_compute, 4924 .set_wptr = gfx_v9_0_ring_set_wptr_compute, 4925 .emit_frame_size = 4926 20 + /* gfx_v9_0_ring_emit_gds_switch */ 4927 7 + /* gfx_v9_0_ring_emit_hdp_flush */ 4928 5 + /* hdp invalidate */ 4929 7 + /* gfx_v9_0_ring_emit_pipeline_sync */ 4930 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4931 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4932 2 + /* gfx_v9_0_ring_emit_vm_flush */ 4933 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */ 4934 .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */ 4935 .emit_ib = gfx_v9_0_ring_emit_ib_compute, 4936 .emit_fence = gfx_v9_0_ring_emit_fence, 4937 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync, 4938 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush, 4939 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch, 4940 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, 4941 .test_ring = gfx_v9_0_ring_test_ring, 4942 .test_ib = gfx_v9_0_ring_test_ib, 4943 .insert_nop = amdgpu_ring_insert_nop, 4944 .pad_ib = amdgpu_ring_generic_pad_ib, 4945 .set_priority = gfx_v9_0_ring_set_priority_compute, 4946 .emit_wreg = gfx_v9_0_ring_emit_wreg, 4947 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, 4948 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, 4949 }; 4950 4951 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { 4952 .type = AMDGPU_RING_TYPE_KIQ, 4953 .align_mask = 0xff, 4954 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4955 .support_64bit_ptrs = true, 4956 .vmhub = AMDGPU_GFXHUB, 4957 .get_rptr = gfx_v9_0_ring_get_rptr_compute, 4958 .get_wptr = gfx_v9_0_ring_get_wptr_compute, 4959 .set_wptr = gfx_v9_0_ring_set_wptr_compute, 4960 .emit_frame_size = 4961 20 + /* gfx_v9_0_ring_emit_gds_switch */ 4962 7 + /* gfx_v9_0_ring_emit_hdp_flush */ 4963 5 + /* hdp invalidate */ 4964 7 + /* gfx_v9_0_ring_emit_pipeline_sync */ 4965 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4966 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4967 2 + /* gfx_v9_0_ring_emit_vm_flush */ 4968 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 4969 .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */ 4970 .emit_fence = gfx_v9_0_ring_emit_fence_kiq, 4971 .test_ring = gfx_v9_0_ring_test_ring, 4972 .insert_nop = amdgpu_ring_insert_nop, 4973 .pad_ib = amdgpu_ring_generic_pad_ib, 4974 .emit_rreg = gfx_v9_0_ring_emit_rreg, 4975 .emit_wreg = gfx_v9_0_ring_emit_wreg, 4976 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, 4977 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, 4978 }; 4979 4980 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev) 4981 { 4982 int i; 4983 4984 adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq; 4985 4986 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 4987 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx; 4988 4989 for (i = 0; i < adev->gfx.num_compute_rings; i++) 4990 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute; 4991 } 4992 4993 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = { 4994 .set = gfx_v9_0_set_eop_interrupt_state, 4995 .process = gfx_v9_0_eop_irq, 4996 }; 4997 4998 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = { 4999 .set = gfx_v9_0_set_priv_reg_fault_state, 5000 .process = gfx_v9_0_priv_reg_irq, 5001 }; 5002 5003 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = { 5004 .set = gfx_v9_0_set_priv_inst_fault_state, 5005 .process = gfx_v9_0_priv_inst_irq, 5006 }; 5007 5008 static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = { 5009 .set = gfx_v9_0_set_cp_ecc_error_state, 5010 .process = gfx_v9_0_cp_ecc_error_irq, 5011 }; 5012 5013 5014 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev) 5015 { 5016 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 5017 adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs; 5018 5019 adev->gfx.priv_reg_irq.num_types = 1; 5020 adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs; 5021 5022 adev->gfx.priv_inst_irq.num_types = 1; 5023 adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs; 5024 5025 adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/ 5026 adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs; 5027 } 5028 5029 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) 5030 { 5031 switch (adev->asic_type) { 5032 case CHIP_VEGA10: 5033 case CHIP_VEGA12: 5034 case CHIP_VEGA20: 5035 case CHIP_RAVEN: 5036 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs; 5037 break; 5038 default: 5039 break; 5040 } 5041 } 5042 5043 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev) 5044 { 5045 /* init asci gds info */ 5046 switch (adev->asic_type) { 5047 case CHIP_VEGA10: 5048 case CHIP_VEGA12: 5049 case CHIP_VEGA20: 5050 adev->gds.mem.total_size = 0x10000; 5051 break; 5052 case CHIP_RAVEN: 5053 adev->gds.mem.total_size = 0x1000; 5054 break; 5055 default: 5056 adev->gds.mem.total_size = 0x10000; 5057 break; 5058 } 5059 5060 switch (adev->asic_type) { 5061 case CHIP_VEGA10: 5062 case CHIP_VEGA20: 5063 adev->gds.gds_compute_max_wave_id = 0x7ff; 5064 break; 5065 case CHIP_VEGA12: 5066 adev->gds.gds_compute_max_wave_id = 0x27f; 5067 break; 5068 case CHIP_RAVEN: 5069 if (adev->rev_id >= 0x8) 5070 adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */ 5071 else 5072 adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */ 5073 break; 5074 default: 5075 /* this really depends on the chip */ 5076 adev->gds.gds_compute_max_wave_id = 0x7ff; 5077 break; 5078 } 5079 5080 adev->gds.gws.total_size = 64; 5081 adev->gds.oa.total_size = 16; 5082 5083 if (adev->gds.mem.total_size == 64 * 1024) { 5084 adev->gds.mem.gfx_partition_size = 4096; 5085 adev->gds.mem.cs_partition_size = 4096; 5086 5087 adev->gds.gws.gfx_partition_size = 4; 5088 adev->gds.gws.cs_partition_size = 4; 5089 5090 adev->gds.oa.gfx_partition_size = 4; 5091 adev->gds.oa.cs_partition_size = 1; 5092 } else { 5093 adev->gds.mem.gfx_partition_size = 1024; 5094 adev->gds.mem.cs_partition_size = 1024; 5095 5096 adev->gds.gws.gfx_partition_size = 16; 5097 adev->gds.gws.cs_partition_size = 16; 5098 5099 adev->gds.oa.gfx_partition_size = 4; 5100 adev->gds.oa.cs_partition_size = 4; 5101 } 5102 } 5103 5104 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, 5105 u32 bitmap) 5106 { 5107 u32 data; 5108 5109 if (!bitmap) 5110 return; 5111 5112 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 5113 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 5114 5115 WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data); 5116 } 5117 5118 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev) 5119 { 5120 u32 data, mask; 5121 5122 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG); 5123 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG); 5124 5125 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 5126 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 5127 5128 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh); 5129 5130 return (~data) & mask; 5131 } 5132 5133 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, 5134 struct amdgpu_cu_info *cu_info) 5135 { 5136 int i, j, k, counter, active_cu_number = 0; 5137 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; 5138 unsigned disable_masks[4 * 2]; 5139 5140 if (!adev || !cu_info) 5141 return -EINVAL; 5142 5143 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); 5144 5145 mutex_lock(&adev->grbm_idx_mutex); 5146 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 5147 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 5148 mask = 1; 5149 ao_bitmap = 0; 5150 counter = 0; 5151 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); 5152 if (i < 4 && j < 2) 5153 gfx_v9_0_set_user_cu_inactive_bitmap( 5154 adev, disable_masks[i * 2 + j]); 5155 bitmap = gfx_v9_0_get_cu_active_bitmap(adev); 5156 cu_info->bitmap[i][j] = bitmap; 5157 5158 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { 5159 if (bitmap & mask) { 5160 if (counter < adev->gfx.config.max_cu_per_sh) 5161 ao_bitmap |= mask; 5162 counter ++; 5163 } 5164 mask <<= 1; 5165 } 5166 active_cu_number += counter; 5167 if (i < 2 && j < 2) 5168 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); 5169 cu_info->ao_cu_bitmap[i][j] = ao_bitmap; 5170 } 5171 } 5172 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 5173 mutex_unlock(&adev->grbm_idx_mutex); 5174 5175 cu_info->number = active_cu_number; 5176 cu_info->ao_cu_mask = ao_cu_mask; 5177 cu_info->simd_per_cu = NUM_SIMD_PER_CU; 5178 5179 return 0; 5180 } 5181 5182 const struct amdgpu_ip_block_version gfx_v9_0_ip_block = 5183 { 5184 .type = AMD_IP_BLOCK_TYPE_GFX, 5185 .major = 9, 5186 .minor = 0, 5187 .rev = 0, 5188 .funcs = &gfx_v9_0_ip_funcs, 5189 }; 5190