1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/kernel.h> 24 #include <linux/firmware.h> 25 #include <drm/drmP.h> 26 #include "amdgpu.h" 27 #include "amdgpu_gfx.h" 28 #include "vi.h" 29 #include "vi_structs.h" 30 #include "vid.h" 31 #include "amdgpu_ucode.h" 32 #include "amdgpu_atombios.h" 33 #include "atombios_i2c.h" 34 #include "clearstate_vi.h" 35 36 #include "gmc/gmc_8_2_d.h" 37 #include "gmc/gmc_8_2_sh_mask.h" 38 39 #include "oss/oss_3_0_d.h" 40 #include "oss/oss_3_0_sh_mask.h" 41 42 #include "bif/bif_5_0_d.h" 43 #include "bif/bif_5_0_sh_mask.h" 44 #include "gca/gfx_8_0_d.h" 45 #include "gca/gfx_8_0_enum.h" 46 #include "gca/gfx_8_0_sh_mask.h" 47 48 #include "dce/dce_10_0_d.h" 49 #include "dce/dce_10_0_sh_mask.h" 50 51 #include "smu/smu_7_1_3_d.h" 52 53 #include "ivsrcid/ivsrcid_vislands30.h" 54 55 #define GFX8_NUM_GFX_RINGS 1 56 #define GFX8_MEC_HPD_SIZE 4096 57 58 #define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001 59 #define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001 60 #define POLARIS11_GB_ADDR_CONFIG_GOLDEN 0x22011002 61 #define TONGA_GB_ADDR_CONFIG_GOLDEN 0x22011003 62 63 #define ARRAY_MODE(x) ((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT) 64 #define PIPE_CONFIG(x) ((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT) 65 #define TILE_SPLIT(x) ((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT) 66 #define MICRO_TILE_MODE_NEW(x) ((x) << GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT) 67 #define SAMPLE_SPLIT(x) ((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT) 68 #define BANK_WIDTH(x) ((x) << GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT) 69 #define BANK_HEIGHT(x) ((x) << GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT) 70 #define MACRO_TILE_ASPECT(x) ((x) << GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT) 71 #define NUM_BANKS(x) ((x) << GB_MACROTILE_MODE0__NUM_BANKS__SHIFT) 72 73 #define RLC_CGTT_MGCG_OVERRIDE__CPF_MASK 0x00000001L 74 #define RLC_CGTT_MGCG_OVERRIDE__RLC_MASK 0x00000002L 75 #define RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK 0x00000004L 76 #define RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK 0x00000008L 77 #define RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK 0x00000010L 78 #define RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK 0x00000020L 79 80 /* BPM SERDES CMD */ 81 #define SET_BPM_SERDES_CMD 1 82 #define CLE_BPM_SERDES_CMD 0 83 84 /* BPM Register Address*/ 85 enum { 86 BPM_REG_CGLS_EN = 0, /* Enable/Disable CGLS */ 87 BPM_REG_CGLS_ON, /* ON/OFF CGLS: shall be controlled by RLC FW */ 88 BPM_REG_CGCG_OVERRIDE, /* Set/Clear CGCG Override */ 89 BPM_REG_MGCG_OVERRIDE, /* Set/Clear MGCG Override */ 90 BPM_REG_FGCG_OVERRIDE, /* Set/Clear FGCG Override */ 91 BPM_REG_FGCG_MAX 92 }; 93 94 #define RLC_FormatDirectRegListLength 14 95 96 MODULE_FIRMWARE("amdgpu/carrizo_ce.bin"); 97 MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin"); 98 MODULE_FIRMWARE("amdgpu/carrizo_me.bin"); 99 MODULE_FIRMWARE("amdgpu/carrizo_mec.bin"); 100 MODULE_FIRMWARE("amdgpu/carrizo_mec2.bin"); 101 MODULE_FIRMWARE("amdgpu/carrizo_rlc.bin"); 102 103 MODULE_FIRMWARE("amdgpu/stoney_ce.bin"); 104 MODULE_FIRMWARE("amdgpu/stoney_pfp.bin"); 105 MODULE_FIRMWARE("amdgpu/stoney_me.bin"); 106 MODULE_FIRMWARE("amdgpu/stoney_mec.bin"); 107 MODULE_FIRMWARE("amdgpu/stoney_rlc.bin"); 108 109 MODULE_FIRMWARE("amdgpu/tonga_ce.bin"); 110 MODULE_FIRMWARE("amdgpu/tonga_pfp.bin"); 111 MODULE_FIRMWARE("amdgpu/tonga_me.bin"); 112 MODULE_FIRMWARE("amdgpu/tonga_mec.bin"); 113 MODULE_FIRMWARE("amdgpu/tonga_mec2.bin"); 114 MODULE_FIRMWARE("amdgpu/tonga_rlc.bin"); 115 116 MODULE_FIRMWARE("amdgpu/topaz_ce.bin"); 117 MODULE_FIRMWARE("amdgpu/topaz_pfp.bin"); 118 MODULE_FIRMWARE("amdgpu/topaz_me.bin"); 119 MODULE_FIRMWARE("amdgpu/topaz_mec.bin"); 120 MODULE_FIRMWARE("amdgpu/topaz_rlc.bin"); 121 122 MODULE_FIRMWARE("amdgpu/fiji_ce.bin"); 123 MODULE_FIRMWARE("amdgpu/fiji_pfp.bin"); 124 MODULE_FIRMWARE("amdgpu/fiji_me.bin"); 125 MODULE_FIRMWARE("amdgpu/fiji_mec.bin"); 126 MODULE_FIRMWARE("amdgpu/fiji_mec2.bin"); 127 MODULE_FIRMWARE("amdgpu/fiji_rlc.bin"); 128 129 MODULE_FIRMWARE("amdgpu/polaris10_ce.bin"); 130 MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin"); 131 MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin"); 132 MODULE_FIRMWARE("amdgpu/polaris10_pfp_2.bin"); 133 MODULE_FIRMWARE("amdgpu/polaris10_me.bin"); 134 MODULE_FIRMWARE("amdgpu/polaris10_me_2.bin"); 135 MODULE_FIRMWARE("amdgpu/polaris10_mec.bin"); 136 MODULE_FIRMWARE("amdgpu/polaris10_mec_2.bin"); 137 MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin"); 138 MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin"); 139 MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin"); 140 141 MODULE_FIRMWARE("amdgpu/polaris11_ce.bin"); 142 MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin"); 143 MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin"); 144 MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin"); 145 MODULE_FIRMWARE("amdgpu/polaris11_me.bin"); 146 MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin"); 147 MODULE_FIRMWARE("amdgpu/polaris11_mec.bin"); 148 MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin"); 149 MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin"); 150 MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin"); 151 MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin"); 152 153 MODULE_FIRMWARE("amdgpu/polaris12_ce.bin"); 154 MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin"); 155 MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin"); 156 MODULE_FIRMWARE("amdgpu/polaris12_pfp_2.bin"); 157 MODULE_FIRMWARE("amdgpu/polaris12_me.bin"); 158 MODULE_FIRMWARE("amdgpu/polaris12_me_2.bin"); 159 MODULE_FIRMWARE("amdgpu/polaris12_mec.bin"); 160 MODULE_FIRMWARE("amdgpu/polaris12_mec_2.bin"); 161 MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin"); 162 MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin"); 163 MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin"); 164 165 MODULE_FIRMWARE("amdgpu/vegam_ce.bin"); 166 MODULE_FIRMWARE("amdgpu/vegam_pfp.bin"); 167 MODULE_FIRMWARE("amdgpu/vegam_me.bin"); 168 MODULE_FIRMWARE("amdgpu/vegam_mec.bin"); 169 MODULE_FIRMWARE("amdgpu/vegam_mec2.bin"); 170 MODULE_FIRMWARE("amdgpu/vegam_rlc.bin"); 171 172 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = 173 { 174 {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0}, 175 {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1}, 176 {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2}, 177 {mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3}, 178 {mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4}, 179 {mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5}, 180 {mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6}, 181 {mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7}, 182 {mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8}, 183 {mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9}, 184 {mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10}, 185 {mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11}, 186 {mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12}, 187 {mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13}, 188 {mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14}, 189 {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15} 190 }; 191 192 static const u32 golden_settings_tonga_a11[] = 193 { 194 mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208, 195 mmCB_HW_CONTROL_3, 0x00000040, 0x00000040, 196 mmDB_DEBUG2, 0xf00fffff, 0x00000400, 197 mmGB_GPU_ID, 0x0000000f, 0x00000000, 198 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, 199 mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc, 200 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, 201 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c, 202 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd, 203 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, 204 mmTCC_CTRL, 0x00100000, 0xf31fff7f, 205 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002, 206 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb, 207 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b, 208 mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876, 209 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004, 210 }; 211 212 static const u32 tonga_golden_common_all[] = 213 { 214 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 215 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012, 216 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A, 217 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003, 218 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, 219 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, 220 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, 221 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF 222 }; 223 224 static const u32 tonga_mgcg_cgcg_init[] = 225 { 226 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff, 227 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 228 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, 229 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100, 230 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, 231 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, 232 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100, 233 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, 234 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, 235 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, 236 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100, 237 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100, 238 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100, 239 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100, 240 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100, 241 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100, 242 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100, 243 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100, 244 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100, 245 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100, 246 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100, 247 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100, 248 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100, 249 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100, 250 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100, 251 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100, 252 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100, 253 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, 254 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, 255 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100, 256 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 257 mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000, 258 mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 259 mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007, 260 mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005, 261 mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 262 mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000, 263 mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 264 mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007, 265 mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005, 266 mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 267 mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000, 268 mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 269 mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007, 270 mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005, 271 mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 272 mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000, 273 mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 274 mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007, 275 mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005, 276 mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 277 mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000, 278 mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 279 mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007, 280 mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005, 281 mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 282 mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000, 283 mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 284 mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007, 285 mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005, 286 mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 287 mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000, 288 mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 289 mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007, 290 mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005, 291 mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 292 mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000, 293 mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 294 mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007, 295 mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005, 296 mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 297 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, 298 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, 299 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, 300 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, 301 }; 302 303 static const u32 golden_settings_vegam_a11[] = 304 { 305 mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208, 306 mmCB_HW_CONTROL_2, 0x0f000000, 0x0d000000, 307 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, 308 mmDB_DEBUG2, 0xf00fffff, 0x00000400, 309 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, 310 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, 311 mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x3a00161a, 312 mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002e, 313 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c, 314 mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c, 315 mmSQ_CONFIG, 0x07f80000, 0x01180000, 316 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, 317 mmTCC_CTRL, 0x00100000, 0xf31fff7f, 318 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7, 319 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000, 320 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x32761054, 321 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004, 322 }; 323 324 static const u32 vegam_golden_common_all[] = 325 { 326 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 327 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003, 328 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, 329 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, 330 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, 331 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF, 332 }; 333 334 static const u32 golden_settings_polaris11_a11[] = 335 { 336 mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208, 337 mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000, 338 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, 339 mmDB_DEBUG2, 0xf00fffff, 0x00000400, 340 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, 341 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, 342 mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012, 343 mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000, 344 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c, 345 mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c, 346 mmSQ_CONFIG, 0x07f80000, 0x01180000, 347 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, 348 mmTCC_CTRL, 0x00100000, 0xf31fff7f, 349 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3, 350 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000, 351 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210, 352 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004, 353 }; 354 355 static const u32 polaris11_golden_common_all[] = 356 { 357 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 358 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002, 359 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, 360 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, 361 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, 362 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF, 363 }; 364 365 static const u32 golden_settings_polaris10_a11[] = 366 { 367 mmATC_MISC_CG, 0x000c0fc0, 0x000c0200, 368 mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208, 369 mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000, 370 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, 371 mmDB_DEBUG2, 0xf00fffff, 0x00000400, 372 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, 373 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, 374 mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012, 375 mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002a, 376 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c, 377 mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c, 378 mmSQ_CONFIG, 0x07f80000, 0x07180000, 379 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, 380 mmTCC_CTRL, 0x00100000, 0xf31fff7f, 381 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7, 382 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000, 383 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004, 384 }; 385 386 static const u32 polaris10_golden_common_all[] = 387 { 388 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 389 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012, 390 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A, 391 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003, 392 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, 393 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, 394 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, 395 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF, 396 }; 397 398 static const u32 fiji_golden_common_all[] = 399 { 400 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 401 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a, 402 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e, 403 mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003, 404 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, 405 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, 406 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, 407 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF, 408 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 409 mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009, 410 }; 411 412 static const u32 golden_settings_fiji_a10[] = 413 { 414 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, 415 mmDB_DEBUG2, 0xf00fffff, 0x00000400, 416 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, 417 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, 418 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c, 419 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd, 420 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, 421 mmTCC_CTRL, 0x00100000, 0xf31fff7f, 422 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002, 423 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff, 424 mmVGT_RESET_DEBUG, 0x00000004, 0x00000004, 425 }; 426 427 static const u32 fiji_mgcg_cgcg_init[] = 428 { 429 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff, 430 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 431 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, 432 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100, 433 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, 434 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, 435 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100, 436 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, 437 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, 438 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, 439 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100, 440 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100, 441 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100, 442 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100, 443 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100, 444 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100, 445 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100, 446 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100, 447 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100, 448 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100, 449 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100, 450 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100, 451 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100, 452 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100, 453 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100, 454 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100, 455 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100, 456 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, 457 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, 458 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100, 459 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 460 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, 461 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, 462 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, 463 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, 464 }; 465 466 static const u32 golden_settings_iceland_a11[] = 467 { 468 mmCB_HW_CONTROL_3, 0x00000040, 0x00000040, 469 mmDB_DEBUG2, 0xf00fffff, 0x00000400, 470 mmDB_DEBUG3, 0xc0000000, 0xc0000000, 471 mmGB_GPU_ID, 0x0000000f, 0x00000000, 472 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, 473 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, 474 mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002, 475 mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000, 476 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c, 477 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd, 478 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, 479 mmTCC_CTRL, 0x00100000, 0xf31fff7f, 480 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002, 481 mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1, 482 mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000, 483 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010, 484 }; 485 486 static const u32 iceland_golden_common_all[] = 487 { 488 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 489 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002, 490 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000, 491 mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001, 492 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, 493 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, 494 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, 495 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF 496 }; 497 498 static const u32 iceland_mgcg_cgcg_init[] = 499 { 500 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff, 501 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 502 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, 503 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100, 504 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0xc0000100, 505 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0xc0000100, 506 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0xc0000100, 507 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, 508 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, 509 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, 510 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100, 511 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100, 512 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100, 513 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100, 514 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100, 515 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100, 516 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100, 517 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100, 518 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100, 519 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100, 520 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100, 521 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100, 522 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0xff000100, 523 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100, 524 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100, 525 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100, 526 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100, 527 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, 528 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, 529 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100, 530 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 531 mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000, 532 mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 533 mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87, 534 mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005, 535 mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 536 mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000, 537 mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 538 mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007, 539 mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005, 540 mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 541 mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000, 542 mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 543 mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007, 544 mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005, 545 mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 546 mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000, 547 mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 548 mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007, 549 mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005, 550 mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 551 mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000, 552 mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 553 mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87, 554 mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005, 555 mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 556 mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000, 557 mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 558 mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007, 559 mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005, 560 mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 561 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, 562 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, 563 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, 564 }; 565 566 static const u32 cz_golden_settings_a11[] = 567 { 568 mmCB_HW_CONTROL_3, 0x00000040, 0x00000040, 569 mmDB_DEBUG2, 0xf00fffff, 0x00000400, 570 mmGB_GPU_ID, 0x0000000f, 0x00000000, 571 mmPA_SC_ENHANCE, 0xffffffff, 0x00000001, 572 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, 573 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c, 574 mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd, 575 mmTA_CNTL_AUX, 0x000f000f, 0x00010000, 576 mmTCC_CTRL, 0x00100000, 0xf31fff7f, 577 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002, 578 mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3, 579 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302 580 }; 581 582 static const u32 cz_golden_common_all[] = 583 { 584 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 585 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002, 586 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000, 587 mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001, 588 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, 589 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, 590 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, 591 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF 592 }; 593 594 static const u32 cz_mgcg_cgcg_init[] = 595 { 596 mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff, 597 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 598 mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, 599 mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100, 600 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, 601 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, 602 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x00000100, 603 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, 604 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, 605 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, 606 mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100, 607 mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100, 608 mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100, 609 mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100, 610 mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100, 611 mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100, 612 mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100, 613 mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100, 614 mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100, 615 mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100, 616 mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100, 617 mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100, 618 mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100, 619 mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100, 620 mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100, 621 mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100, 622 mmTA_CGTT_CTRL, 0xffffffff, 0x00000100, 623 mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, 624 mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, 625 mmTD_CGTT_CTRL, 0xffffffff, 0x00000100, 626 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 627 mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000, 628 mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 629 mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007, 630 mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005, 631 mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 632 mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000, 633 mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 634 mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007, 635 mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005, 636 mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 637 mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000, 638 mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 639 mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007, 640 mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005, 641 mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 642 mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000, 643 mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 644 mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007, 645 mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005, 646 mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 647 mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000, 648 mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 649 mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007, 650 mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005, 651 mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 652 mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000, 653 mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 654 mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007, 655 mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005, 656 mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 657 mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000, 658 mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 659 mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007, 660 mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005, 661 mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 662 mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000, 663 mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, 664 mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007, 665 mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005, 666 mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, 667 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, 668 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, 669 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f, 670 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, 671 }; 672 673 static const u32 stoney_golden_settings_a11[] = 674 { 675 mmDB_DEBUG2, 0xf00fffff, 0x00000400, 676 mmGB_GPU_ID, 0x0000000f, 0x00000000, 677 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, 678 mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, 679 mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c, 680 mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, 681 mmTCC_CTRL, 0x00100000, 0xf31fff7f, 682 mmTCC_EXE_DISABLE, 0x00000002, 0x00000002, 683 mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1, 684 mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010, 685 }; 686 687 static const u32 stoney_golden_common_all[] = 688 { 689 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 690 mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000000, 691 mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000, 692 mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001, 693 mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, 694 mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, 695 mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, 696 mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF, 697 }; 698 699 static const u32 stoney_mgcg_cgcg_init[] = 700 { 701 mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 702 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f, 703 mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201, 704 mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201, 705 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200, 706 }; 707 708 709 static const char * const sq_edc_source_names[] = { 710 "SQ_EDC_INFO_SOURCE_INVALID: No EDC error has occurred", 711 "SQ_EDC_INFO_SOURCE_INST: EDC source is Instruction Fetch", 712 "SQ_EDC_INFO_SOURCE_SGPR: EDC source is SGPR or SQC data return", 713 "SQ_EDC_INFO_SOURCE_VGPR: EDC source is VGPR", 714 "SQ_EDC_INFO_SOURCE_LDS: EDC source is LDS", 715 "SQ_EDC_INFO_SOURCE_GDS: EDC source is GDS", 716 "SQ_EDC_INFO_SOURCE_TA: EDC source is TA", 717 }; 718 719 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev); 720 static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev); 721 static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev); 722 static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev); 723 static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev); 724 static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev); 725 static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring); 726 static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring); 727 728 static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) 729 { 730 switch (adev->asic_type) { 731 case CHIP_TOPAZ: 732 amdgpu_device_program_register_sequence(adev, 733 iceland_mgcg_cgcg_init, 734 ARRAY_SIZE(iceland_mgcg_cgcg_init)); 735 amdgpu_device_program_register_sequence(adev, 736 golden_settings_iceland_a11, 737 ARRAY_SIZE(golden_settings_iceland_a11)); 738 amdgpu_device_program_register_sequence(adev, 739 iceland_golden_common_all, 740 ARRAY_SIZE(iceland_golden_common_all)); 741 break; 742 case CHIP_FIJI: 743 amdgpu_device_program_register_sequence(adev, 744 fiji_mgcg_cgcg_init, 745 ARRAY_SIZE(fiji_mgcg_cgcg_init)); 746 amdgpu_device_program_register_sequence(adev, 747 golden_settings_fiji_a10, 748 ARRAY_SIZE(golden_settings_fiji_a10)); 749 amdgpu_device_program_register_sequence(adev, 750 fiji_golden_common_all, 751 ARRAY_SIZE(fiji_golden_common_all)); 752 break; 753 754 case CHIP_TONGA: 755 amdgpu_device_program_register_sequence(adev, 756 tonga_mgcg_cgcg_init, 757 ARRAY_SIZE(tonga_mgcg_cgcg_init)); 758 amdgpu_device_program_register_sequence(adev, 759 golden_settings_tonga_a11, 760 ARRAY_SIZE(golden_settings_tonga_a11)); 761 amdgpu_device_program_register_sequence(adev, 762 tonga_golden_common_all, 763 ARRAY_SIZE(tonga_golden_common_all)); 764 break; 765 case CHIP_VEGAM: 766 amdgpu_device_program_register_sequence(adev, 767 golden_settings_vegam_a11, 768 ARRAY_SIZE(golden_settings_vegam_a11)); 769 amdgpu_device_program_register_sequence(adev, 770 vegam_golden_common_all, 771 ARRAY_SIZE(vegam_golden_common_all)); 772 break; 773 case CHIP_POLARIS11: 774 case CHIP_POLARIS12: 775 amdgpu_device_program_register_sequence(adev, 776 golden_settings_polaris11_a11, 777 ARRAY_SIZE(golden_settings_polaris11_a11)); 778 amdgpu_device_program_register_sequence(adev, 779 polaris11_golden_common_all, 780 ARRAY_SIZE(polaris11_golden_common_all)); 781 break; 782 case CHIP_POLARIS10: 783 amdgpu_device_program_register_sequence(adev, 784 golden_settings_polaris10_a11, 785 ARRAY_SIZE(golden_settings_polaris10_a11)); 786 amdgpu_device_program_register_sequence(adev, 787 polaris10_golden_common_all, 788 ARRAY_SIZE(polaris10_golden_common_all)); 789 WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C); 790 if (adev->pdev->revision == 0xc7 && 791 ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) || 792 (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) || 793 (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1682))) { 794 amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD); 795 amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0); 796 } 797 break; 798 case CHIP_CARRIZO: 799 amdgpu_device_program_register_sequence(adev, 800 cz_mgcg_cgcg_init, 801 ARRAY_SIZE(cz_mgcg_cgcg_init)); 802 amdgpu_device_program_register_sequence(adev, 803 cz_golden_settings_a11, 804 ARRAY_SIZE(cz_golden_settings_a11)); 805 amdgpu_device_program_register_sequence(adev, 806 cz_golden_common_all, 807 ARRAY_SIZE(cz_golden_common_all)); 808 break; 809 case CHIP_STONEY: 810 amdgpu_device_program_register_sequence(adev, 811 stoney_mgcg_cgcg_init, 812 ARRAY_SIZE(stoney_mgcg_cgcg_init)); 813 amdgpu_device_program_register_sequence(adev, 814 stoney_golden_settings_a11, 815 ARRAY_SIZE(stoney_golden_settings_a11)); 816 amdgpu_device_program_register_sequence(adev, 817 stoney_golden_common_all, 818 ARRAY_SIZE(stoney_golden_common_all)); 819 break; 820 default: 821 break; 822 } 823 } 824 825 static void gfx_v8_0_scratch_init(struct amdgpu_device *adev) 826 { 827 adev->gfx.scratch.num_reg = 8; 828 adev->gfx.scratch.reg_base = mmSCRATCH_REG0; 829 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; 830 } 831 832 static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) 833 { 834 struct amdgpu_device *adev = ring->adev; 835 uint32_t scratch; 836 uint32_t tmp = 0; 837 unsigned i; 838 int r; 839 840 r = amdgpu_gfx_scratch_get(adev, &scratch); 841 if (r) 842 return r; 843 844 WREG32(scratch, 0xCAFEDEAD); 845 r = amdgpu_ring_alloc(ring, 3); 846 if (r) 847 goto error_free_scratch; 848 849 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 850 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); 851 amdgpu_ring_write(ring, 0xDEADBEEF); 852 amdgpu_ring_commit(ring); 853 854 for (i = 0; i < adev->usec_timeout; i++) { 855 tmp = RREG32(scratch); 856 if (tmp == 0xDEADBEEF) 857 break; 858 DRM_UDELAY(1); 859 } 860 861 if (i >= adev->usec_timeout) 862 r = -ETIMEDOUT; 863 864 error_free_scratch: 865 amdgpu_gfx_scratch_free(adev, scratch); 866 return r; 867 } 868 869 static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 870 { 871 struct amdgpu_device *adev = ring->adev; 872 struct amdgpu_ib ib; 873 struct dma_fence *f = NULL; 874 875 unsigned int index; 876 uint64_t gpu_addr; 877 uint32_t tmp; 878 long r; 879 880 r = amdgpu_device_wb_get(adev, &index); 881 if (r) 882 return r; 883 884 gpu_addr = adev->wb.gpu_addr + (index * 4); 885 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); 886 memset(&ib, 0, sizeof(ib)); 887 r = amdgpu_ib_get(adev, NULL, 16, &ib); 888 if (r) 889 goto err1; 890 891 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); 892 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; 893 ib.ptr[2] = lower_32_bits(gpu_addr); 894 ib.ptr[3] = upper_32_bits(gpu_addr); 895 ib.ptr[4] = 0xDEADBEEF; 896 ib.length_dw = 5; 897 898 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 899 if (r) 900 goto err2; 901 902 r = dma_fence_wait_timeout(f, false, timeout); 903 if (r == 0) { 904 r = -ETIMEDOUT; 905 goto err2; 906 } else if (r < 0) { 907 goto err2; 908 } 909 910 tmp = adev->wb.wb[index]; 911 if (tmp == 0xDEADBEEF) 912 r = 0; 913 else 914 r = -EINVAL; 915 916 err2: 917 amdgpu_ib_free(adev, &ib, NULL); 918 dma_fence_put(f); 919 err1: 920 amdgpu_device_wb_free(adev, index); 921 return r; 922 } 923 924 925 static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) 926 { 927 release_firmware(adev->gfx.pfp_fw); 928 adev->gfx.pfp_fw = NULL; 929 release_firmware(adev->gfx.me_fw); 930 adev->gfx.me_fw = NULL; 931 release_firmware(adev->gfx.ce_fw); 932 adev->gfx.ce_fw = NULL; 933 release_firmware(adev->gfx.rlc_fw); 934 adev->gfx.rlc_fw = NULL; 935 release_firmware(adev->gfx.mec_fw); 936 adev->gfx.mec_fw = NULL; 937 if ((adev->asic_type != CHIP_STONEY) && 938 (adev->asic_type != CHIP_TOPAZ)) 939 release_firmware(adev->gfx.mec2_fw); 940 adev->gfx.mec2_fw = NULL; 941 942 kfree(adev->gfx.rlc.register_list_format); 943 } 944 945 static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) 946 { 947 const char *chip_name; 948 char fw_name[30]; 949 int err; 950 struct amdgpu_firmware_info *info = NULL; 951 const struct common_firmware_header *header = NULL; 952 const struct gfx_firmware_header_v1_0 *cp_hdr; 953 const struct rlc_firmware_header_v2_0 *rlc_hdr; 954 unsigned int *tmp = NULL, i; 955 956 DRM_DEBUG("\n"); 957 958 switch (adev->asic_type) { 959 case CHIP_TOPAZ: 960 chip_name = "topaz"; 961 break; 962 case CHIP_TONGA: 963 chip_name = "tonga"; 964 break; 965 case CHIP_CARRIZO: 966 chip_name = "carrizo"; 967 break; 968 case CHIP_FIJI: 969 chip_name = "fiji"; 970 break; 971 case CHIP_STONEY: 972 chip_name = "stoney"; 973 break; 974 case CHIP_POLARIS10: 975 chip_name = "polaris10"; 976 break; 977 case CHIP_POLARIS11: 978 chip_name = "polaris11"; 979 break; 980 case CHIP_POLARIS12: 981 chip_name = "polaris12"; 982 break; 983 case CHIP_VEGAM: 984 chip_name = "vegam"; 985 break; 986 default: 987 BUG(); 988 } 989 990 if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) { 991 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name); 992 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); 993 if (err == -ENOENT) { 994 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); 995 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); 996 } 997 } else { 998 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); 999 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); 1000 } 1001 if (err) 1002 goto out; 1003 err = amdgpu_ucode_validate(adev->gfx.pfp_fw); 1004 if (err) 1005 goto out; 1006 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; 1007 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 1008 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 1009 1010 if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) { 1011 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name); 1012 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); 1013 if (err == -ENOENT) { 1014 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); 1015 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); 1016 } 1017 } else { 1018 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); 1019 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); 1020 } 1021 if (err) 1022 goto out; 1023 err = amdgpu_ucode_validate(adev->gfx.me_fw); 1024 if (err) 1025 goto out; 1026 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; 1027 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 1028 1029 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 1030 1031 if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) { 1032 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name); 1033 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); 1034 if (err == -ENOENT) { 1035 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); 1036 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); 1037 } 1038 } else { 1039 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); 1040 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); 1041 } 1042 if (err) 1043 goto out; 1044 err = amdgpu_ucode_validate(adev->gfx.ce_fw); 1045 if (err) 1046 goto out; 1047 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; 1048 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 1049 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 1050 1051 /* 1052 * Support for MCBP/Virtualization in combination with chained IBs is 1053 * formal released on feature version #46 1054 */ 1055 if (adev->gfx.ce_feature_version >= 46 && 1056 adev->gfx.pfp_feature_version >= 46) { 1057 adev->virt.chained_ib_support = true; 1058 DRM_INFO("Chained IB support enabled!\n"); 1059 } else 1060 adev->virt.chained_ib_support = false; 1061 1062 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); 1063 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); 1064 if (err) 1065 goto out; 1066 err = amdgpu_ucode_validate(adev->gfx.rlc_fw); 1067 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 1068 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); 1069 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); 1070 1071 adev->gfx.rlc.save_and_restore_offset = 1072 le32_to_cpu(rlc_hdr->save_and_restore_offset); 1073 adev->gfx.rlc.clear_state_descriptor_offset = 1074 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset); 1075 adev->gfx.rlc.avail_scratch_ram_locations = 1076 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations); 1077 adev->gfx.rlc.reg_restore_list_size = 1078 le32_to_cpu(rlc_hdr->reg_restore_list_size); 1079 adev->gfx.rlc.reg_list_format_start = 1080 le32_to_cpu(rlc_hdr->reg_list_format_start); 1081 adev->gfx.rlc.reg_list_format_separate_start = 1082 le32_to_cpu(rlc_hdr->reg_list_format_separate_start); 1083 adev->gfx.rlc.starting_offsets_start = 1084 le32_to_cpu(rlc_hdr->starting_offsets_start); 1085 adev->gfx.rlc.reg_list_format_size_bytes = 1086 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes); 1087 adev->gfx.rlc.reg_list_size_bytes = 1088 le32_to_cpu(rlc_hdr->reg_list_size_bytes); 1089 1090 adev->gfx.rlc.register_list_format = 1091 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes + 1092 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL); 1093 1094 if (!adev->gfx.rlc.register_list_format) { 1095 err = -ENOMEM; 1096 goto out; 1097 } 1098 1099 tmp = (unsigned int *)((uintptr_t)rlc_hdr + 1100 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); 1101 for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++) 1102 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); 1103 1104 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; 1105 1106 tmp = (unsigned int *)((uintptr_t)rlc_hdr + 1107 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); 1108 for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++) 1109 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); 1110 1111 if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) { 1112 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name); 1113 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); 1114 if (err == -ENOENT) { 1115 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); 1116 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); 1117 } 1118 } else { 1119 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); 1120 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); 1121 } 1122 if (err) 1123 goto out; 1124 err = amdgpu_ucode_validate(adev->gfx.mec_fw); 1125 if (err) 1126 goto out; 1127 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 1128 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 1129 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 1130 1131 if ((adev->asic_type != CHIP_STONEY) && 1132 (adev->asic_type != CHIP_TOPAZ)) { 1133 if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) { 1134 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name); 1135 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); 1136 if (err == -ENOENT) { 1137 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); 1138 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); 1139 } 1140 } else { 1141 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); 1142 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); 1143 } 1144 if (!err) { 1145 err = amdgpu_ucode_validate(adev->gfx.mec2_fw); 1146 if (err) 1147 goto out; 1148 cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1149 adev->gfx.mec2_fw->data; 1150 adev->gfx.mec2_fw_version = 1151 le32_to_cpu(cp_hdr->header.ucode_version); 1152 adev->gfx.mec2_feature_version = 1153 le32_to_cpu(cp_hdr->ucode_feature_version); 1154 } else { 1155 err = 0; 1156 adev->gfx.mec2_fw = NULL; 1157 } 1158 } 1159 1160 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP]; 1161 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP; 1162 info->fw = adev->gfx.pfp_fw; 1163 header = (const struct common_firmware_header *)info->fw->data; 1164 adev->firmware.fw_size += 1165 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 1166 1167 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME]; 1168 info->ucode_id = AMDGPU_UCODE_ID_CP_ME; 1169 info->fw = adev->gfx.me_fw; 1170 header = (const struct common_firmware_header *)info->fw->data; 1171 adev->firmware.fw_size += 1172 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 1173 1174 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE]; 1175 info->ucode_id = AMDGPU_UCODE_ID_CP_CE; 1176 info->fw = adev->gfx.ce_fw; 1177 header = (const struct common_firmware_header *)info->fw->data; 1178 adev->firmware.fw_size += 1179 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 1180 1181 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; 1182 info->ucode_id = AMDGPU_UCODE_ID_RLC_G; 1183 info->fw = adev->gfx.rlc_fw; 1184 header = (const struct common_firmware_header *)info->fw->data; 1185 adev->firmware.fw_size += 1186 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 1187 1188 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; 1189 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1; 1190 info->fw = adev->gfx.mec_fw; 1191 header = (const struct common_firmware_header *)info->fw->data; 1192 adev->firmware.fw_size += 1193 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 1194 1195 /* we need account JT in */ 1196 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 1197 adev->firmware.fw_size += 1198 ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE); 1199 1200 if (amdgpu_sriov_vf(adev)) { 1201 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE]; 1202 info->ucode_id = AMDGPU_UCODE_ID_STORAGE; 1203 info->fw = adev->gfx.mec_fw; 1204 adev->firmware.fw_size += 1205 ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE); 1206 } 1207 1208 if (adev->gfx.mec2_fw) { 1209 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2]; 1210 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 1211 info->fw = adev->gfx.mec2_fw; 1212 header = (const struct common_firmware_header *)info->fw->data; 1213 adev->firmware.fw_size += 1214 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 1215 } 1216 1217 out: 1218 if (err) { 1219 dev_err(adev->dev, 1220 "gfx8: Failed to load firmware \"%s\"\n", 1221 fw_name); 1222 release_firmware(adev->gfx.pfp_fw); 1223 adev->gfx.pfp_fw = NULL; 1224 release_firmware(adev->gfx.me_fw); 1225 adev->gfx.me_fw = NULL; 1226 release_firmware(adev->gfx.ce_fw); 1227 adev->gfx.ce_fw = NULL; 1228 release_firmware(adev->gfx.rlc_fw); 1229 adev->gfx.rlc_fw = NULL; 1230 release_firmware(adev->gfx.mec_fw); 1231 adev->gfx.mec_fw = NULL; 1232 release_firmware(adev->gfx.mec2_fw); 1233 adev->gfx.mec2_fw = NULL; 1234 } 1235 return err; 1236 } 1237 1238 static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, 1239 volatile u32 *buffer) 1240 { 1241 u32 count = 0, i; 1242 const struct cs_section_def *sect = NULL; 1243 const struct cs_extent_def *ext = NULL; 1244 1245 if (adev->gfx.rlc.cs_data == NULL) 1246 return; 1247 if (buffer == NULL) 1248 return; 1249 1250 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1251 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 1252 1253 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 1254 buffer[count++] = cpu_to_le32(0x80000000); 1255 buffer[count++] = cpu_to_le32(0x80000000); 1256 1257 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { 1258 for (ext = sect->section; ext->extent != NULL; ++ext) { 1259 if (sect->id == SECT_CONTEXT) { 1260 buffer[count++] = 1261 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); 1262 buffer[count++] = cpu_to_le32(ext->reg_index - 1263 PACKET3_SET_CONTEXT_REG_START); 1264 for (i = 0; i < ext->reg_count; i++) 1265 buffer[count++] = cpu_to_le32(ext->extent[i]); 1266 } else { 1267 return; 1268 } 1269 } 1270 } 1271 1272 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 1273 buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - 1274 PACKET3_SET_CONTEXT_REG_START); 1275 buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config); 1276 buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1); 1277 1278 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1279 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); 1280 1281 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); 1282 buffer[count++] = cpu_to_le32(0); 1283 } 1284 1285 static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev) 1286 { 1287 if (adev->asic_type == CHIP_CARRIZO) 1288 return 5; 1289 else 1290 return 4; 1291 } 1292 1293 static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) 1294 { 1295 const struct cs_section_def *cs_data; 1296 int r; 1297 1298 adev->gfx.rlc.cs_data = vi_cs_data; 1299 1300 cs_data = adev->gfx.rlc.cs_data; 1301 1302 if (cs_data) { 1303 /* init clear state block */ 1304 r = amdgpu_gfx_rlc_init_csb(adev); 1305 if (r) 1306 return r; 1307 } 1308 1309 if ((adev->asic_type == CHIP_CARRIZO) || 1310 (adev->asic_type == CHIP_STONEY)) { 1311 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ 1312 r = amdgpu_gfx_rlc_init_cpt(adev); 1313 if (r) 1314 return r; 1315 } 1316 1317 return 0; 1318 } 1319 1320 static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) 1321 { 1322 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 1323 } 1324 1325 static int gfx_v8_0_mec_init(struct amdgpu_device *adev) 1326 { 1327 int r; 1328 u32 *hpd; 1329 size_t mec_hpd_size; 1330 1331 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 1332 1333 /* take ownership of the relevant compute queues */ 1334 amdgpu_gfx_compute_queue_acquire(adev); 1335 1336 mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE; 1337 1338 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 1339 AMDGPU_GEM_DOMAIN_VRAM, 1340 &adev->gfx.mec.hpd_eop_obj, 1341 &adev->gfx.mec.hpd_eop_gpu_addr, 1342 (void **)&hpd); 1343 if (r) { 1344 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 1345 return r; 1346 } 1347 1348 memset(hpd, 0, mec_hpd_size); 1349 1350 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 1351 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 1352 1353 return 0; 1354 } 1355 1356 static const u32 vgpr_init_compute_shader[] = 1357 { 1358 0x7e000209, 0x7e020208, 1359 0x7e040207, 0x7e060206, 1360 0x7e080205, 0x7e0a0204, 1361 0x7e0c0203, 0x7e0e0202, 1362 0x7e100201, 0x7e120200, 1363 0x7e140209, 0x7e160208, 1364 0x7e180207, 0x7e1a0206, 1365 0x7e1c0205, 0x7e1e0204, 1366 0x7e200203, 0x7e220202, 1367 0x7e240201, 0x7e260200, 1368 0x7e280209, 0x7e2a0208, 1369 0x7e2c0207, 0x7e2e0206, 1370 0x7e300205, 0x7e320204, 1371 0x7e340203, 0x7e360202, 1372 0x7e380201, 0x7e3a0200, 1373 0x7e3c0209, 0x7e3e0208, 1374 0x7e400207, 0x7e420206, 1375 0x7e440205, 0x7e460204, 1376 0x7e480203, 0x7e4a0202, 1377 0x7e4c0201, 0x7e4e0200, 1378 0x7e500209, 0x7e520208, 1379 0x7e540207, 0x7e560206, 1380 0x7e580205, 0x7e5a0204, 1381 0x7e5c0203, 0x7e5e0202, 1382 0x7e600201, 0x7e620200, 1383 0x7e640209, 0x7e660208, 1384 0x7e680207, 0x7e6a0206, 1385 0x7e6c0205, 0x7e6e0204, 1386 0x7e700203, 0x7e720202, 1387 0x7e740201, 0x7e760200, 1388 0x7e780209, 0x7e7a0208, 1389 0x7e7c0207, 0x7e7e0206, 1390 0xbf8a0000, 0xbf810000, 1391 }; 1392 1393 static const u32 sgpr_init_compute_shader[] = 1394 { 1395 0xbe8a0100, 0xbe8c0102, 1396 0xbe8e0104, 0xbe900106, 1397 0xbe920108, 0xbe940100, 1398 0xbe960102, 0xbe980104, 1399 0xbe9a0106, 0xbe9c0108, 1400 0xbe9e0100, 0xbea00102, 1401 0xbea20104, 0xbea40106, 1402 0xbea60108, 0xbea80100, 1403 0xbeaa0102, 0xbeac0104, 1404 0xbeae0106, 0xbeb00108, 1405 0xbeb20100, 0xbeb40102, 1406 0xbeb60104, 0xbeb80106, 1407 0xbeba0108, 0xbebc0100, 1408 0xbebe0102, 0xbec00104, 1409 0xbec20106, 0xbec40108, 1410 0xbec60100, 0xbec80102, 1411 0xbee60004, 0xbee70005, 1412 0xbeea0006, 0xbeeb0007, 1413 0xbee80008, 0xbee90009, 1414 0xbefc0000, 0xbf8a0000, 1415 0xbf810000, 0x00000000, 1416 }; 1417 1418 static const u32 vgpr_init_regs[] = 1419 { 1420 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff, 1421 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */ 1422 mmCOMPUTE_NUM_THREAD_X, 256*4, 1423 mmCOMPUTE_NUM_THREAD_Y, 1, 1424 mmCOMPUTE_NUM_THREAD_Z, 1, 1425 mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */ 1426 mmCOMPUTE_PGM_RSRC2, 20, 1427 mmCOMPUTE_USER_DATA_0, 0xedcedc00, 1428 mmCOMPUTE_USER_DATA_1, 0xedcedc01, 1429 mmCOMPUTE_USER_DATA_2, 0xedcedc02, 1430 mmCOMPUTE_USER_DATA_3, 0xedcedc03, 1431 mmCOMPUTE_USER_DATA_4, 0xedcedc04, 1432 mmCOMPUTE_USER_DATA_5, 0xedcedc05, 1433 mmCOMPUTE_USER_DATA_6, 0xedcedc06, 1434 mmCOMPUTE_USER_DATA_7, 0xedcedc07, 1435 mmCOMPUTE_USER_DATA_8, 0xedcedc08, 1436 mmCOMPUTE_USER_DATA_9, 0xedcedc09, 1437 }; 1438 1439 static const u32 sgpr1_init_regs[] = 1440 { 1441 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f, 1442 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */ 1443 mmCOMPUTE_NUM_THREAD_X, 256*5, 1444 mmCOMPUTE_NUM_THREAD_Y, 1, 1445 mmCOMPUTE_NUM_THREAD_Z, 1, 1446 mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */ 1447 mmCOMPUTE_PGM_RSRC2, 20, 1448 mmCOMPUTE_USER_DATA_0, 0xedcedc00, 1449 mmCOMPUTE_USER_DATA_1, 0xedcedc01, 1450 mmCOMPUTE_USER_DATA_2, 0xedcedc02, 1451 mmCOMPUTE_USER_DATA_3, 0xedcedc03, 1452 mmCOMPUTE_USER_DATA_4, 0xedcedc04, 1453 mmCOMPUTE_USER_DATA_5, 0xedcedc05, 1454 mmCOMPUTE_USER_DATA_6, 0xedcedc06, 1455 mmCOMPUTE_USER_DATA_7, 0xedcedc07, 1456 mmCOMPUTE_USER_DATA_8, 0xedcedc08, 1457 mmCOMPUTE_USER_DATA_9, 0xedcedc09, 1458 }; 1459 1460 static const u32 sgpr2_init_regs[] = 1461 { 1462 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xf0, 1463 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, 1464 mmCOMPUTE_NUM_THREAD_X, 256*5, 1465 mmCOMPUTE_NUM_THREAD_Y, 1, 1466 mmCOMPUTE_NUM_THREAD_Z, 1, 1467 mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */ 1468 mmCOMPUTE_PGM_RSRC2, 20, 1469 mmCOMPUTE_USER_DATA_0, 0xedcedc00, 1470 mmCOMPUTE_USER_DATA_1, 0xedcedc01, 1471 mmCOMPUTE_USER_DATA_2, 0xedcedc02, 1472 mmCOMPUTE_USER_DATA_3, 0xedcedc03, 1473 mmCOMPUTE_USER_DATA_4, 0xedcedc04, 1474 mmCOMPUTE_USER_DATA_5, 0xedcedc05, 1475 mmCOMPUTE_USER_DATA_6, 0xedcedc06, 1476 mmCOMPUTE_USER_DATA_7, 0xedcedc07, 1477 mmCOMPUTE_USER_DATA_8, 0xedcedc08, 1478 mmCOMPUTE_USER_DATA_9, 0xedcedc09, 1479 }; 1480 1481 static const u32 sec_ded_counter_registers[] = 1482 { 1483 mmCPC_EDC_ATC_CNT, 1484 mmCPC_EDC_SCRATCH_CNT, 1485 mmCPC_EDC_UCODE_CNT, 1486 mmCPF_EDC_ATC_CNT, 1487 mmCPF_EDC_ROQ_CNT, 1488 mmCPF_EDC_TAG_CNT, 1489 mmCPG_EDC_ATC_CNT, 1490 mmCPG_EDC_DMA_CNT, 1491 mmCPG_EDC_TAG_CNT, 1492 mmDC_EDC_CSINVOC_CNT, 1493 mmDC_EDC_RESTORE_CNT, 1494 mmDC_EDC_STATE_CNT, 1495 mmGDS_EDC_CNT, 1496 mmGDS_EDC_GRBM_CNT, 1497 mmGDS_EDC_OA_DED, 1498 mmSPI_EDC_CNT, 1499 mmSQC_ATC_EDC_GATCL1_CNT, 1500 mmSQC_EDC_CNT, 1501 mmSQ_EDC_DED_CNT, 1502 mmSQ_EDC_INFO, 1503 mmSQ_EDC_SEC_CNT, 1504 mmTCC_EDC_CNT, 1505 mmTCP_ATC_EDC_GATCL1_CNT, 1506 mmTCP_EDC_CNT, 1507 mmTD_EDC_CNT 1508 }; 1509 1510 static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) 1511 { 1512 struct amdgpu_ring *ring = &adev->gfx.compute_ring[0]; 1513 struct amdgpu_ib ib; 1514 struct dma_fence *f = NULL; 1515 int r, i; 1516 u32 tmp; 1517 unsigned total_size, vgpr_offset, sgpr_offset; 1518 u64 gpu_addr; 1519 1520 /* only supported on CZ */ 1521 if (adev->asic_type != CHIP_CARRIZO) 1522 return 0; 1523 1524 /* bail if the compute ring is not ready */ 1525 if (!ring->sched.ready) 1526 return 0; 1527 1528 tmp = RREG32(mmGB_EDC_MODE); 1529 WREG32(mmGB_EDC_MODE, 0); 1530 1531 total_size = 1532 (((ARRAY_SIZE(vgpr_init_regs) / 2) * 3) + 4 + 5 + 2) * 4; 1533 total_size += 1534 (((ARRAY_SIZE(sgpr1_init_regs) / 2) * 3) + 4 + 5 + 2) * 4; 1535 total_size += 1536 (((ARRAY_SIZE(sgpr2_init_regs) / 2) * 3) + 4 + 5 + 2) * 4; 1537 total_size = ALIGN(total_size, 256); 1538 vgpr_offset = total_size; 1539 total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256); 1540 sgpr_offset = total_size; 1541 total_size += sizeof(sgpr_init_compute_shader); 1542 1543 /* allocate an indirect buffer to put the commands in */ 1544 memset(&ib, 0, sizeof(ib)); 1545 r = amdgpu_ib_get(adev, NULL, total_size, &ib); 1546 if (r) { 1547 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 1548 return r; 1549 } 1550 1551 /* load the compute shaders */ 1552 for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++) 1553 ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i]; 1554 1555 for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++) 1556 ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i]; 1557 1558 /* init the ib length to 0 */ 1559 ib.length_dw = 0; 1560 1561 /* VGPR */ 1562 /* write the register state for the compute dispatch */ 1563 for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i += 2) { 1564 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); 1565 ib.ptr[ib.length_dw++] = vgpr_init_regs[i] - PACKET3_SET_SH_REG_START; 1566 ib.ptr[ib.length_dw++] = vgpr_init_regs[i + 1]; 1567 } 1568 /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */ 1569 gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8; 1570 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2); 1571 ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START; 1572 ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr); 1573 ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr); 1574 1575 /* write dispatch packet */ 1576 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); 1577 ib.ptr[ib.length_dw++] = 8; /* x */ 1578 ib.ptr[ib.length_dw++] = 1; /* y */ 1579 ib.ptr[ib.length_dw++] = 1; /* z */ 1580 ib.ptr[ib.length_dw++] = 1581 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1); 1582 1583 /* write CS partial flush packet */ 1584 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0); 1585 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); 1586 1587 /* SGPR1 */ 1588 /* write the register state for the compute dispatch */ 1589 for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i += 2) { 1590 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); 1591 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i] - PACKET3_SET_SH_REG_START; 1592 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i + 1]; 1593 } 1594 /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */ 1595 gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8; 1596 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2); 1597 ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START; 1598 ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr); 1599 ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr); 1600 1601 /* write dispatch packet */ 1602 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); 1603 ib.ptr[ib.length_dw++] = 8; /* x */ 1604 ib.ptr[ib.length_dw++] = 1; /* y */ 1605 ib.ptr[ib.length_dw++] = 1; /* z */ 1606 ib.ptr[ib.length_dw++] = 1607 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1); 1608 1609 /* write CS partial flush packet */ 1610 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0); 1611 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); 1612 1613 /* SGPR2 */ 1614 /* write the register state for the compute dispatch */ 1615 for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i += 2) { 1616 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); 1617 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i] - PACKET3_SET_SH_REG_START; 1618 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i + 1]; 1619 } 1620 /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */ 1621 gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8; 1622 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2); 1623 ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START; 1624 ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr); 1625 ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr); 1626 1627 /* write dispatch packet */ 1628 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); 1629 ib.ptr[ib.length_dw++] = 8; /* x */ 1630 ib.ptr[ib.length_dw++] = 1; /* y */ 1631 ib.ptr[ib.length_dw++] = 1; /* z */ 1632 ib.ptr[ib.length_dw++] = 1633 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1); 1634 1635 /* write CS partial flush packet */ 1636 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0); 1637 ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); 1638 1639 /* shedule the ib on the ring */ 1640 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 1641 if (r) { 1642 DRM_ERROR("amdgpu: ib submit failed (%d).\n", r); 1643 goto fail; 1644 } 1645 1646 /* wait for the GPU to finish processing the IB */ 1647 r = dma_fence_wait(f, false); 1648 if (r) { 1649 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 1650 goto fail; 1651 } 1652 1653 tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, DED_MODE, 2); 1654 tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, PROP_FED, 1); 1655 WREG32(mmGB_EDC_MODE, tmp); 1656 1657 tmp = RREG32(mmCC_GC_EDC_CONFIG); 1658 tmp = REG_SET_FIELD(tmp, CC_GC_EDC_CONFIG, DIS_EDC, 0) | 1; 1659 WREG32(mmCC_GC_EDC_CONFIG, tmp); 1660 1661 1662 /* read back registers to clear the counters */ 1663 for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) 1664 RREG32(sec_ded_counter_registers[i]); 1665 1666 fail: 1667 amdgpu_ib_free(adev, &ib, NULL); 1668 dma_fence_put(f); 1669 1670 return r; 1671 } 1672 1673 static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) 1674 { 1675 u32 gb_addr_config; 1676 u32 mc_shared_chmap, mc_arb_ramcfg; 1677 u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; 1678 u32 tmp; 1679 int ret; 1680 1681 switch (adev->asic_type) { 1682 case CHIP_TOPAZ: 1683 adev->gfx.config.max_shader_engines = 1; 1684 adev->gfx.config.max_tile_pipes = 2; 1685 adev->gfx.config.max_cu_per_sh = 6; 1686 adev->gfx.config.max_sh_per_se = 1; 1687 adev->gfx.config.max_backends_per_se = 2; 1688 adev->gfx.config.max_texture_channel_caches = 2; 1689 adev->gfx.config.max_gprs = 256; 1690 adev->gfx.config.max_gs_threads = 32; 1691 adev->gfx.config.max_hw_contexts = 8; 1692 1693 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1694 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1695 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1696 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 1697 gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN; 1698 break; 1699 case CHIP_FIJI: 1700 adev->gfx.config.max_shader_engines = 4; 1701 adev->gfx.config.max_tile_pipes = 16; 1702 adev->gfx.config.max_cu_per_sh = 16; 1703 adev->gfx.config.max_sh_per_se = 1; 1704 adev->gfx.config.max_backends_per_se = 4; 1705 adev->gfx.config.max_texture_channel_caches = 16; 1706 adev->gfx.config.max_gprs = 256; 1707 adev->gfx.config.max_gs_threads = 32; 1708 adev->gfx.config.max_hw_contexts = 8; 1709 1710 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1711 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1712 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1713 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 1714 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN; 1715 break; 1716 case CHIP_POLARIS11: 1717 case CHIP_POLARIS12: 1718 ret = amdgpu_atombios_get_gfx_info(adev); 1719 if (ret) 1720 return ret; 1721 adev->gfx.config.max_gprs = 256; 1722 adev->gfx.config.max_gs_threads = 32; 1723 adev->gfx.config.max_hw_contexts = 8; 1724 1725 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1726 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1727 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1728 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 1729 gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN; 1730 break; 1731 case CHIP_POLARIS10: 1732 case CHIP_VEGAM: 1733 ret = amdgpu_atombios_get_gfx_info(adev); 1734 if (ret) 1735 return ret; 1736 adev->gfx.config.max_gprs = 256; 1737 adev->gfx.config.max_gs_threads = 32; 1738 adev->gfx.config.max_hw_contexts = 8; 1739 1740 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1741 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1742 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1743 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 1744 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN; 1745 break; 1746 case CHIP_TONGA: 1747 adev->gfx.config.max_shader_engines = 4; 1748 adev->gfx.config.max_tile_pipes = 8; 1749 adev->gfx.config.max_cu_per_sh = 8; 1750 adev->gfx.config.max_sh_per_se = 1; 1751 adev->gfx.config.max_backends_per_se = 2; 1752 adev->gfx.config.max_texture_channel_caches = 8; 1753 adev->gfx.config.max_gprs = 256; 1754 adev->gfx.config.max_gs_threads = 32; 1755 adev->gfx.config.max_hw_contexts = 8; 1756 1757 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1758 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1759 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1760 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 1761 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN; 1762 break; 1763 case CHIP_CARRIZO: 1764 adev->gfx.config.max_shader_engines = 1; 1765 adev->gfx.config.max_tile_pipes = 2; 1766 adev->gfx.config.max_sh_per_se = 1; 1767 adev->gfx.config.max_backends_per_se = 2; 1768 adev->gfx.config.max_cu_per_sh = 8; 1769 adev->gfx.config.max_texture_channel_caches = 2; 1770 adev->gfx.config.max_gprs = 256; 1771 adev->gfx.config.max_gs_threads = 32; 1772 adev->gfx.config.max_hw_contexts = 8; 1773 1774 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1775 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1776 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1777 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 1778 gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN; 1779 break; 1780 case CHIP_STONEY: 1781 adev->gfx.config.max_shader_engines = 1; 1782 adev->gfx.config.max_tile_pipes = 2; 1783 adev->gfx.config.max_sh_per_se = 1; 1784 adev->gfx.config.max_backends_per_se = 1; 1785 adev->gfx.config.max_cu_per_sh = 3; 1786 adev->gfx.config.max_texture_channel_caches = 2; 1787 adev->gfx.config.max_gprs = 256; 1788 adev->gfx.config.max_gs_threads = 16; 1789 adev->gfx.config.max_hw_contexts = 8; 1790 1791 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1792 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1793 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1794 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 1795 gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN; 1796 break; 1797 default: 1798 adev->gfx.config.max_shader_engines = 2; 1799 adev->gfx.config.max_tile_pipes = 4; 1800 adev->gfx.config.max_cu_per_sh = 2; 1801 adev->gfx.config.max_sh_per_se = 1; 1802 adev->gfx.config.max_backends_per_se = 2; 1803 adev->gfx.config.max_texture_channel_caches = 4; 1804 adev->gfx.config.max_gprs = 256; 1805 adev->gfx.config.max_gs_threads = 32; 1806 adev->gfx.config.max_hw_contexts = 8; 1807 1808 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 1809 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 1810 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 1811 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 1812 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN; 1813 break; 1814 } 1815 1816 mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); 1817 adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); 1818 mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; 1819 1820 adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; 1821 adev->gfx.config.mem_max_burst_length_bytes = 256; 1822 if (adev->flags & AMD_IS_APU) { 1823 /* Get memory bank mapping mode. */ 1824 tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING); 1825 dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP); 1826 dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP); 1827 1828 tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING); 1829 dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP); 1830 dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP); 1831 1832 /* Validate settings in case only one DIMM installed. */ 1833 if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12)) 1834 dimm00_addr_map = 0; 1835 if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12)) 1836 dimm01_addr_map = 0; 1837 if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12)) 1838 dimm10_addr_map = 0; 1839 if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12)) 1840 dimm11_addr_map = 0; 1841 1842 /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */ 1843 /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */ 1844 if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11)) 1845 adev->gfx.config.mem_row_size_in_kb = 2; 1846 else 1847 adev->gfx.config.mem_row_size_in_kb = 1; 1848 } else { 1849 tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS); 1850 adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 1851 if (adev->gfx.config.mem_row_size_in_kb > 4) 1852 adev->gfx.config.mem_row_size_in_kb = 4; 1853 } 1854 1855 adev->gfx.config.shader_engine_tile_size = 32; 1856 adev->gfx.config.num_gpus = 1; 1857 adev->gfx.config.multi_gpu_tile_size = 64; 1858 1859 /* fix up row size */ 1860 switch (adev->gfx.config.mem_row_size_in_kb) { 1861 case 1: 1862 default: 1863 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0); 1864 break; 1865 case 2: 1866 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1); 1867 break; 1868 case 4: 1869 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2); 1870 break; 1871 } 1872 adev->gfx.config.gb_addr_config = gb_addr_config; 1873 1874 return 0; 1875 } 1876 1877 static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 1878 int mec, int pipe, int queue) 1879 { 1880 int r; 1881 unsigned irq_type; 1882 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; 1883 1884 ring = &adev->gfx.compute_ring[ring_id]; 1885 1886 /* mec0 is me1 */ 1887 ring->me = mec + 1; 1888 ring->pipe = pipe; 1889 ring->queue = queue; 1890 1891 ring->ring_obj = NULL; 1892 ring->use_doorbell = true; 1893 ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id; 1894 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr 1895 + (ring_id * GFX8_MEC_HPD_SIZE); 1896 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); 1897 1898 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 1899 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 1900 + ring->pipe; 1901 1902 /* type-2 packets are deprecated on MEC, use type-3 instead */ 1903 r = amdgpu_ring_init(adev, ring, 1024, 1904 &adev->gfx.eop_irq, irq_type); 1905 if (r) 1906 return r; 1907 1908 1909 return 0; 1910 } 1911 1912 static void gfx_v8_0_sq_irq_work_func(struct work_struct *work); 1913 1914 static int gfx_v8_0_sw_init(void *handle) 1915 { 1916 int i, j, k, r, ring_id; 1917 struct amdgpu_ring *ring; 1918 struct amdgpu_kiq *kiq; 1919 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1920 1921 switch (adev->asic_type) { 1922 case CHIP_TONGA: 1923 case CHIP_CARRIZO: 1924 case CHIP_FIJI: 1925 case CHIP_POLARIS10: 1926 case CHIP_POLARIS11: 1927 case CHIP_POLARIS12: 1928 case CHIP_VEGAM: 1929 adev->gfx.mec.num_mec = 2; 1930 break; 1931 case CHIP_TOPAZ: 1932 case CHIP_STONEY: 1933 default: 1934 adev->gfx.mec.num_mec = 1; 1935 break; 1936 } 1937 1938 adev->gfx.mec.num_pipe_per_mec = 4; 1939 adev->gfx.mec.num_queue_per_pipe = 8; 1940 1941 /* EOP Event */ 1942 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq); 1943 if (r) 1944 return r; 1945 1946 /* Privileged reg */ 1947 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT, 1948 &adev->gfx.priv_reg_irq); 1949 if (r) 1950 return r; 1951 1952 /* Privileged inst */ 1953 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT, 1954 &adev->gfx.priv_inst_irq); 1955 if (r) 1956 return r; 1957 1958 /* Add CP EDC/ECC irq */ 1959 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR, 1960 &adev->gfx.cp_ecc_error_irq); 1961 if (r) 1962 return r; 1963 1964 /* SQ interrupts. */ 1965 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG, 1966 &adev->gfx.sq_irq); 1967 if (r) { 1968 DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r); 1969 return r; 1970 } 1971 1972 INIT_WORK(&adev->gfx.sq_work.work, gfx_v8_0_sq_irq_work_func); 1973 1974 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1975 1976 gfx_v8_0_scratch_init(adev); 1977 1978 r = gfx_v8_0_init_microcode(adev); 1979 if (r) { 1980 DRM_ERROR("Failed to load gfx firmware!\n"); 1981 return r; 1982 } 1983 1984 r = adev->gfx.rlc.funcs->init(adev); 1985 if (r) { 1986 DRM_ERROR("Failed to init rlc BOs!\n"); 1987 return r; 1988 } 1989 1990 r = gfx_v8_0_mec_init(adev); 1991 if (r) { 1992 DRM_ERROR("Failed to init MEC BOs!\n"); 1993 return r; 1994 } 1995 1996 /* set up the gfx ring */ 1997 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 1998 ring = &adev->gfx.gfx_ring[i]; 1999 ring->ring_obj = NULL; 2000 sprintf(ring->name, "gfx"); 2001 /* no gfx doorbells on iceland */ 2002 if (adev->asic_type != CHIP_TOPAZ) { 2003 ring->use_doorbell = true; 2004 ring->doorbell_index = adev->doorbell_index.gfx_ring0; 2005 } 2006 2007 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, 2008 AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP); 2009 if (r) 2010 return r; 2011 } 2012 2013 2014 /* set up the compute queues - allocate horizontally across pipes */ 2015 ring_id = 0; 2016 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 2017 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 2018 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 2019 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j)) 2020 continue; 2021 2022 r = gfx_v8_0_compute_ring_init(adev, 2023 ring_id, 2024 i, k, j); 2025 if (r) 2026 return r; 2027 2028 ring_id++; 2029 } 2030 } 2031 } 2032 2033 r = amdgpu_gfx_kiq_init(adev, GFX8_MEC_HPD_SIZE); 2034 if (r) { 2035 DRM_ERROR("Failed to init KIQ BOs!\n"); 2036 return r; 2037 } 2038 2039 kiq = &adev->gfx.kiq; 2040 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); 2041 if (r) 2042 return r; 2043 2044 /* create MQD for all compute queues as well as KIQ for SRIOV case */ 2045 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation)); 2046 if (r) 2047 return r; 2048 2049 adev->gfx.ce_ram_size = 0x8000; 2050 2051 r = gfx_v8_0_gpu_early_init(adev); 2052 if (r) 2053 return r; 2054 2055 return 0; 2056 } 2057 2058 static int gfx_v8_0_sw_fini(void *handle) 2059 { 2060 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2061 int i; 2062 2063 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 2064 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); 2065 for (i = 0; i < adev->gfx.num_compute_rings; i++) 2066 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 2067 2068 amdgpu_gfx_mqd_sw_fini(adev); 2069 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); 2070 amdgpu_gfx_kiq_fini(adev); 2071 2072 gfx_v8_0_mec_fini(adev); 2073 amdgpu_gfx_rlc_fini(adev); 2074 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 2075 &adev->gfx.rlc.clear_state_gpu_addr, 2076 (void **)&adev->gfx.rlc.cs_ptr); 2077 if ((adev->asic_type == CHIP_CARRIZO) || 2078 (adev->asic_type == CHIP_STONEY)) { 2079 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 2080 &adev->gfx.rlc.cp_table_gpu_addr, 2081 (void **)&adev->gfx.rlc.cp_table_ptr); 2082 } 2083 gfx_v8_0_free_microcode(adev); 2084 2085 return 0; 2086 } 2087 2088 static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) 2089 { 2090 uint32_t *modearray, *mod2array; 2091 const u32 num_tile_mode_states = ARRAY_SIZE(adev->gfx.config.tile_mode_array); 2092 const u32 num_secondary_tile_mode_states = ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); 2093 u32 reg_offset; 2094 2095 modearray = adev->gfx.config.tile_mode_array; 2096 mod2array = adev->gfx.config.macrotile_mode_array; 2097 2098 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 2099 modearray[reg_offset] = 0; 2100 2101 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) 2102 mod2array[reg_offset] = 0; 2103 2104 switch (adev->asic_type) { 2105 case CHIP_TOPAZ: 2106 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2107 PIPE_CONFIG(ADDR_SURF_P2) | 2108 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 2109 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2110 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2111 PIPE_CONFIG(ADDR_SURF_P2) | 2112 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 2113 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2114 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2115 PIPE_CONFIG(ADDR_SURF_P2) | 2116 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 2117 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2118 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2119 PIPE_CONFIG(ADDR_SURF_P2) | 2120 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 2121 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2122 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2123 PIPE_CONFIG(ADDR_SURF_P2) | 2124 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2125 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2126 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2127 PIPE_CONFIG(ADDR_SURF_P2) | 2128 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2129 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2130 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2131 PIPE_CONFIG(ADDR_SURF_P2) | 2132 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2133 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2134 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 2135 PIPE_CONFIG(ADDR_SURF_P2)); 2136 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2137 PIPE_CONFIG(ADDR_SURF_P2) | 2138 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2139 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2140 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2141 PIPE_CONFIG(ADDR_SURF_P2) | 2142 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2143 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2144 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2145 PIPE_CONFIG(ADDR_SURF_P2) | 2146 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2147 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2148 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2149 PIPE_CONFIG(ADDR_SURF_P2) | 2150 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2151 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2152 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2153 PIPE_CONFIG(ADDR_SURF_P2) | 2154 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2155 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2156 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 2157 PIPE_CONFIG(ADDR_SURF_P2) | 2158 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2159 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2160 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2161 PIPE_CONFIG(ADDR_SURF_P2) | 2162 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2163 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2164 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 2165 PIPE_CONFIG(ADDR_SURF_P2) | 2166 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2167 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2168 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 2169 PIPE_CONFIG(ADDR_SURF_P2) | 2170 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2171 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2172 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 2173 PIPE_CONFIG(ADDR_SURF_P2) | 2174 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2175 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2176 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 2177 PIPE_CONFIG(ADDR_SURF_P2) | 2178 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2179 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2180 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 2181 PIPE_CONFIG(ADDR_SURF_P2) | 2182 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2183 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2184 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 2185 PIPE_CONFIG(ADDR_SURF_P2) | 2186 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2187 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2188 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 2189 PIPE_CONFIG(ADDR_SURF_P2) | 2190 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2191 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2192 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 2193 PIPE_CONFIG(ADDR_SURF_P2) | 2194 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2195 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2196 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2197 PIPE_CONFIG(ADDR_SURF_P2) | 2198 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2199 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2200 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2201 PIPE_CONFIG(ADDR_SURF_P2) | 2202 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2203 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2204 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2205 PIPE_CONFIG(ADDR_SURF_P2) | 2206 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2207 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2208 2209 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | 2210 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2211 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2212 NUM_BANKS(ADDR_SURF_8_BANK)); 2213 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | 2214 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2215 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2216 NUM_BANKS(ADDR_SURF_8_BANK)); 2217 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 2218 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2219 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2220 NUM_BANKS(ADDR_SURF_8_BANK)); 2221 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2222 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2223 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2224 NUM_BANKS(ADDR_SURF_8_BANK)); 2225 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2226 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 2227 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2228 NUM_BANKS(ADDR_SURF_8_BANK)); 2229 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2230 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2231 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2232 NUM_BANKS(ADDR_SURF_8_BANK)); 2233 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2234 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2235 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2236 NUM_BANKS(ADDR_SURF_8_BANK)); 2237 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | 2238 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | 2239 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2240 NUM_BANKS(ADDR_SURF_16_BANK)); 2241 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | 2242 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2243 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2244 NUM_BANKS(ADDR_SURF_16_BANK)); 2245 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 2246 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2247 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2248 NUM_BANKS(ADDR_SURF_16_BANK)); 2249 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 2250 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 2251 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2252 NUM_BANKS(ADDR_SURF_16_BANK)); 2253 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2254 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 2255 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2256 NUM_BANKS(ADDR_SURF_16_BANK)); 2257 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2258 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2259 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2260 NUM_BANKS(ADDR_SURF_16_BANK)); 2261 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2262 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2263 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2264 NUM_BANKS(ADDR_SURF_8_BANK)); 2265 2266 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 2267 if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 && 2268 reg_offset != 23) 2269 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]); 2270 2271 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) 2272 if (reg_offset != 7) 2273 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]); 2274 2275 break; 2276 case CHIP_FIJI: 2277 case CHIP_VEGAM: 2278 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2279 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2280 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 2281 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2282 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2283 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2284 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 2285 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2286 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2287 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2288 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 2289 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2290 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2291 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2292 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 2293 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2294 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2295 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2296 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2297 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2298 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2299 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2300 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2301 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2302 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2303 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2304 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2305 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2306 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2307 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2308 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2309 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2310 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 2311 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); 2312 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2313 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2314 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2315 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2316 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2317 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2318 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2319 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2320 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2321 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2322 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2323 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2324 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2325 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2326 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2327 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2328 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2329 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2330 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2331 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2332 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2333 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2334 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2335 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2336 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 2337 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2338 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2339 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2340 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2341 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2342 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2343 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2344 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2345 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2346 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2347 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2348 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 2349 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2350 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2351 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2352 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 2353 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2354 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2355 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2356 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 2357 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2358 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2359 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2360 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 2361 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2362 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2363 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2364 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 2365 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2366 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2367 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2368 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 2369 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2370 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2371 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2372 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 2373 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2374 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2375 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2376 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 2377 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2378 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2379 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2380 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 2381 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2382 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2383 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2384 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2385 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2386 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2387 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2388 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2389 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2390 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2391 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2392 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2393 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 2394 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2395 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2396 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2397 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2398 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2399 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2400 2401 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2402 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2403 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2404 NUM_BANKS(ADDR_SURF_8_BANK)); 2405 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2406 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2407 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2408 NUM_BANKS(ADDR_SURF_8_BANK)); 2409 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2410 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2411 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2412 NUM_BANKS(ADDR_SURF_8_BANK)); 2413 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2414 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2415 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2416 NUM_BANKS(ADDR_SURF_8_BANK)); 2417 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2418 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 2419 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 2420 NUM_BANKS(ADDR_SURF_8_BANK)); 2421 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2422 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2423 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 2424 NUM_BANKS(ADDR_SURF_8_BANK)); 2425 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2426 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2427 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 2428 NUM_BANKS(ADDR_SURF_8_BANK)); 2429 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2430 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | 2431 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2432 NUM_BANKS(ADDR_SURF_8_BANK)); 2433 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2434 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2435 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2436 NUM_BANKS(ADDR_SURF_8_BANK)); 2437 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2438 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 2439 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 2440 NUM_BANKS(ADDR_SURF_8_BANK)); 2441 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2442 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2443 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 2444 NUM_BANKS(ADDR_SURF_8_BANK)); 2445 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2446 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 2447 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2448 NUM_BANKS(ADDR_SURF_8_BANK)); 2449 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2450 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2451 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2452 NUM_BANKS(ADDR_SURF_8_BANK)); 2453 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2454 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2455 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 2456 NUM_BANKS(ADDR_SURF_4_BANK)); 2457 2458 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 2459 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]); 2460 2461 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) 2462 if (reg_offset != 7) 2463 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]); 2464 2465 break; 2466 case CHIP_TONGA: 2467 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2468 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2469 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 2470 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2471 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2472 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2473 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 2474 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2475 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2476 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2477 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 2478 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2479 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2480 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2481 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 2482 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2483 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2484 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2485 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2486 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2487 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2488 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2489 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2490 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2491 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2492 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2493 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2494 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2495 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2496 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2497 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2498 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2499 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 2500 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16)); 2501 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2502 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2503 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2504 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2505 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2506 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2507 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2508 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2509 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2510 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2511 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2512 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2513 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2514 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2515 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2516 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2517 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2518 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2519 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2520 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2521 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2522 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2523 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2524 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2525 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 2526 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2527 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2528 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2529 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2530 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2531 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2532 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2533 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2534 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2535 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2536 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2537 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 2538 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2539 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2540 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2541 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 2542 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2543 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2544 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2545 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 2546 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2547 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2548 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2549 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 2550 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2551 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2552 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2553 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 2554 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2555 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2556 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2557 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 2558 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2559 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2560 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2561 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 2562 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2563 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2564 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2565 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 2566 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2567 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2568 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2569 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 2570 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2571 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2572 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2573 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2574 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2575 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2576 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2577 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2578 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2579 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2580 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2581 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2582 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2583 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2584 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2585 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2586 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2587 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2588 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2589 2590 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2591 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2592 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2593 NUM_BANKS(ADDR_SURF_16_BANK)); 2594 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2595 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2596 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2597 NUM_BANKS(ADDR_SURF_16_BANK)); 2598 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2599 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2600 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2601 NUM_BANKS(ADDR_SURF_16_BANK)); 2602 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2603 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2604 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2605 NUM_BANKS(ADDR_SURF_16_BANK)); 2606 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2607 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 2608 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2609 NUM_BANKS(ADDR_SURF_16_BANK)); 2610 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2611 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2612 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 2613 NUM_BANKS(ADDR_SURF_16_BANK)); 2614 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2615 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2616 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 2617 NUM_BANKS(ADDR_SURF_16_BANK)); 2618 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2619 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | 2620 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2621 NUM_BANKS(ADDR_SURF_16_BANK)); 2622 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2623 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2624 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2625 NUM_BANKS(ADDR_SURF_16_BANK)); 2626 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2627 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 2628 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2629 NUM_BANKS(ADDR_SURF_16_BANK)); 2630 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2631 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2632 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2633 NUM_BANKS(ADDR_SURF_16_BANK)); 2634 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2635 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2636 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 2637 NUM_BANKS(ADDR_SURF_8_BANK)); 2638 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2639 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2640 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 2641 NUM_BANKS(ADDR_SURF_4_BANK)); 2642 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2643 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2644 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 2645 NUM_BANKS(ADDR_SURF_4_BANK)); 2646 2647 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 2648 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]); 2649 2650 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) 2651 if (reg_offset != 7) 2652 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]); 2653 2654 break; 2655 case CHIP_POLARIS11: 2656 case CHIP_POLARIS12: 2657 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2658 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2659 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 2660 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2661 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2662 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2663 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 2664 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2665 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2666 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2667 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 2668 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2669 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2670 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2671 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 2672 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2673 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2674 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2675 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2676 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2677 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2678 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2679 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2680 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2681 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2682 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2683 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2684 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2685 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2686 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2687 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2688 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2689 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 2690 PIPE_CONFIG(ADDR_SURF_P4_16x16)); 2691 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2692 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2693 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2694 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2695 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2696 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2697 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2698 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2699 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2700 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2701 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2702 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2703 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2704 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2705 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2706 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2707 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2708 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2709 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2710 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2711 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2712 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2713 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2714 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2715 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 2716 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2717 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2718 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2719 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2720 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2721 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2722 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2723 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2724 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2725 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2726 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2727 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 2728 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2729 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2730 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2731 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 2732 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2733 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2734 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2735 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 2736 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2737 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2738 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2739 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 2740 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2741 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2742 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2743 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 2744 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2745 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2746 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2747 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 2748 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2749 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2750 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2751 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 2752 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2753 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2754 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2755 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 2756 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2757 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2758 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2759 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 2760 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2761 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2762 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2763 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2764 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2765 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2766 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2767 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2768 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2769 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2770 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2771 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2772 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2773 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2774 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2775 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2776 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2777 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2778 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2779 2780 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2781 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2782 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2783 NUM_BANKS(ADDR_SURF_16_BANK)); 2784 2785 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2786 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2787 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2788 NUM_BANKS(ADDR_SURF_16_BANK)); 2789 2790 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2791 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2792 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2793 NUM_BANKS(ADDR_SURF_16_BANK)); 2794 2795 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2796 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 2797 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2798 NUM_BANKS(ADDR_SURF_16_BANK)); 2799 2800 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2801 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2802 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2803 NUM_BANKS(ADDR_SURF_16_BANK)); 2804 2805 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2806 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2807 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2808 NUM_BANKS(ADDR_SURF_16_BANK)); 2809 2810 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2811 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2812 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2813 NUM_BANKS(ADDR_SURF_16_BANK)); 2814 2815 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 2816 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | 2817 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2818 NUM_BANKS(ADDR_SURF_16_BANK)); 2819 2820 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 2821 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2822 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2823 NUM_BANKS(ADDR_SURF_16_BANK)); 2824 2825 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2826 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2827 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2828 NUM_BANKS(ADDR_SURF_16_BANK)); 2829 2830 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2831 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 2832 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2833 NUM_BANKS(ADDR_SURF_16_BANK)); 2834 2835 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2836 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2837 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2838 NUM_BANKS(ADDR_SURF_16_BANK)); 2839 2840 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2841 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2842 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 2843 NUM_BANKS(ADDR_SURF_8_BANK)); 2844 2845 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2846 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 2847 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 2848 NUM_BANKS(ADDR_SURF_4_BANK)); 2849 2850 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 2851 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]); 2852 2853 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) 2854 if (reg_offset != 7) 2855 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]); 2856 2857 break; 2858 case CHIP_POLARIS10: 2859 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2860 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2861 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 2862 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2863 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2864 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2865 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 2866 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2867 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2868 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2869 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 2870 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2871 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2872 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2873 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 2874 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2875 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2876 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2877 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2878 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2879 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2880 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2881 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2882 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2883 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2884 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2885 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2886 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2887 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2888 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2889 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 2890 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 2891 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 2892 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16)); 2893 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2894 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2895 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2896 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2897 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2898 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2899 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2900 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2901 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2902 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2903 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2904 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2905 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2906 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2907 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 2908 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2909 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2910 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2911 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2912 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2913 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2914 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2915 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2916 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2917 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 2918 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2919 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2920 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2921 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2922 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2923 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2924 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2925 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2926 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2927 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2928 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2929 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 2930 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2931 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2932 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2933 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 2934 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2935 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2936 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2937 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 2938 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2939 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2940 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2941 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 2942 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2943 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2944 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2945 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 2946 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2947 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2948 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2949 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 2950 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2951 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2952 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2953 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 2954 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2955 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 2956 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2957 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 2958 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2959 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2960 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2961 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 2962 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2963 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 2964 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 2965 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 2966 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2967 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2968 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2969 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2970 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2971 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2972 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 2973 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2974 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | 2975 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2976 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2977 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 2978 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2979 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 2980 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 2981 2982 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2983 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2984 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2985 NUM_BANKS(ADDR_SURF_16_BANK)); 2986 2987 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2988 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2989 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2990 NUM_BANKS(ADDR_SURF_16_BANK)); 2991 2992 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2993 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2994 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 2995 NUM_BANKS(ADDR_SURF_16_BANK)); 2996 2997 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 2998 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 2999 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 3000 NUM_BANKS(ADDR_SURF_16_BANK)); 3001 3002 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3003 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 3004 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 3005 NUM_BANKS(ADDR_SURF_16_BANK)); 3006 3007 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3008 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 3009 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 3010 NUM_BANKS(ADDR_SURF_16_BANK)); 3011 3012 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3013 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 3014 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 3015 NUM_BANKS(ADDR_SURF_16_BANK)); 3016 3017 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3018 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | 3019 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 3020 NUM_BANKS(ADDR_SURF_16_BANK)); 3021 3022 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3023 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 3024 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 3025 NUM_BANKS(ADDR_SURF_16_BANK)); 3026 3027 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3028 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 3029 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 3030 NUM_BANKS(ADDR_SURF_16_BANK)); 3031 3032 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3033 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 3034 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 3035 NUM_BANKS(ADDR_SURF_16_BANK)); 3036 3037 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3038 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 3039 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 3040 NUM_BANKS(ADDR_SURF_8_BANK)); 3041 3042 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3043 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 3044 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 3045 NUM_BANKS(ADDR_SURF_4_BANK)); 3046 3047 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3048 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 3049 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 3050 NUM_BANKS(ADDR_SURF_4_BANK)); 3051 3052 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 3053 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]); 3054 3055 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) 3056 if (reg_offset != 7) 3057 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]); 3058 3059 break; 3060 case CHIP_STONEY: 3061 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 3062 PIPE_CONFIG(ADDR_SURF_P2) | 3063 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 3064 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 3065 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 3066 PIPE_CONFIG(ADDR_SURF_P2) | 3067 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 3068 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 3069 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 3070 PIPE_CONFIG(ADDR_SURF_P2) | 3071 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 3072 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 3073 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 3074 PIPE_CONFIG(ADDR_SURF_P2) | 3075 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 3076 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 3077 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 3078 PIPE_CONFIG(ADDR_SURF_P2) | 3079 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 3080 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 3081 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 3082 PIPE_CONFIG(ADDR_SURF_P2) | 3083 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 3084 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 3085 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 3086 PIPE_CONFIG(ADDR_SURF_P2) | 3087 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 3088 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 3089 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 3090 PIPE_CONFIG(ADDR_SURF_P2)); 3091 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 3092 PIPE_CONFIG(ADDR_SURF_P2) | 3093 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 3094 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 3095 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 3096 PIPE_CONFIG(ADDR_SURF_P2) | 3097 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 3098 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 3099 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 3100 PIPE_CONFIG(ADDR_SURF_P2) | 3101 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 3102 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 3103 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 3104 PIPE_CONFIG(ADDR_SURF_P2) | 3105 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 3106 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 3107 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 3108 PIPE_CONFIG(ADDR_SURF_P2) | 3109 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 3110 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 3111 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 3112 PIPE_CONFIG(ADDR_SURF_P2) | 3113 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 3114 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 3115 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 3116 PIPE_CONFIG(ADDR_SURF_P2) | 3117 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 3118 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 3119 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 3120 PIPE_CONFIG(ADDR_SURF_P2) | 3121 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 3122 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 3123 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 3124 PIPE_CONFIG(ADDR_SURF_P2) | 3125 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 3126 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 3127 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 3128 PIPE_CONFIG(ADDR_SURF_P2) | 3129 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 3130 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 3131 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 3132 PIPE_CONFIG(ADDR_SURF_P2) | 3133 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 3134 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 3135 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 3136 PIPE_CONFIG(ADDR_SURF_P2) | 3137 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 3138 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 3139 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 3140 PIPE_CONFIG(ADDR_SURF_P2) | 3141 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 3142 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 3143 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 3144 PIPE_CONFIG(ADDR_SURF_P2) | 3145 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 3146 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 3147 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 3148 PIPE_CONFIG(ADDR_SURF_P2) | 3149 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 3150 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 3151 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 3152 PIPE_CONFIG(ADDR_SURF_P2) | 3153 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 3154 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 3155 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 3156 PIPE_CONFIG(ADDR_SURF_P2) | 3157 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 3158 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 3159 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 3160 PIPE_CONFIG(ADDR_SURF_P2) | 3161 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 3162 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 3163 3164 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3165 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 3166 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 3167 NUM_BANKS(ADDR_SURF_8_BANK)); 3168 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3169 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 3170 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 3171 NUM_BANKS(ADDR_SURF_8_BANK)); 3172 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3173 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 3174 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 3175 NUM_BANKS(ADDR_SURF_8_BANK)); 3176 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3177 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 3178 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 3179 NUM_BANKS(ADDR_SURF_8_BANK)); 3180 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3181 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 3182 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 3183 NUM_BANKS(ADDR_SURF_8_BANK)); 3184 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3185 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 3186 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 3187 NUM_BANKS(ADDR_SURF_8_BANK)); 3188 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3189 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 3190 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 3191 NUM_BANKS(ADDR_SURF_8_BANK)); 3192 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | 3193 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | 3194 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 3195 NUM_BANKS(ADDR_SURF_16_BANK)); 3196 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | 3197 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 3198 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 3199 NUM_BANKS(ADDR_SURF_16_BANK)); 3200 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 3201 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 3202 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 3203 NUM_BANKS(ADDR_SURF_16_BANK)); 3204 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 3205 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 3206 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 3207 NUM_BANKS(ADDR_SURF_16_BANK)); 3208 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3209 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 3210 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 3211 NUM_BANKS(ADDR_SURF_16_BANK)); 3212 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3213 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 3214 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 3215 NUM_BANKS(ADDR_SURF_16_BANK)); 3216 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3217 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 3218 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 3219 NUM_BANKS(ADDR_SURF_8_BANK)); 3220 3221 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 3222 if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 && 3223 reg_offset != 23) 3224 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]); 3225 3226 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) 3227 if (reg_offset != 7) 3228 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]); 3229 3230 break; 3231 default: 3232 dev_warn(adev->dev, 3233 "Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n", 3234 adev->asic_type); 3235 /* fall through */ 3236 3237 case CHIP_CARRIZO: 3238 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 3239 PIPE_CONFIG(ADDR_SURF_P2) | 3240 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 3241 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 3242 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 3243 PIPE_CONFIG(ADDR_SURF_P2) | 3244 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 3245 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 3246 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 3247 PIPE_CONFIG(ADDR_SURF_P2) | 3248 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 3249 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 3250 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 3251 PIPE_CONFIG(ADDR_SURF_P2) | 3252 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 3253 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 3254 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 3255 PIPE_CONFIG(ADDR_SURF_P2) | 3256 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 3257 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 3258 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 3259 PIPE_CONFIG(ADDR_SURF_P2) | 3260 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 3261 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 3262 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 3263 PIPE_CONFIG(ADDR_SURF_P2) | 3264 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | 3265 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 3266 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 3267 PIPE_CONFIG(ADDR_SURF_P2)); 3268 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 3269 PIPE_CONFIG(ADDR_SURF_P2) | 3270 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 3271 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 3272 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 3273 PIPE_CONFIG(ADDR_SURF_P2) | 3274 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 3275 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 3276 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 3277 PIPE_CONFIG(ADDR_SURF_P2) | 3278 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 3279 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 3280 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 3281 PIPE_CONFIG(ADDR_SURF_P2) | 3282 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 3283 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 3284 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 3285 PIPE_CONFIG(ADDR_SURF_P2) | 3286 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 3287 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 3288 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 3289 PIPE_CONFIG(ADDR_SURF_P2) | 3290 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 3291 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 3292 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 3293 PIPE_CONFIG(ADDR_SURF_P2) | 3294 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 3295 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 3296 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 3297 PIPE_CONFIG(ADDR_SURF_P2) | 3298 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 3299 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 3300 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 3301 PIPE_CONFIG(ADDR_SURF_P2) | 3302 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 3303 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 3304 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 3305 PIPE_CONFIG(ADDR_SURF_P2) | 3306 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 3307 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 3308 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 3309 PIPE_CONFIG(ADDR_SURF_P2) | 3310 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 3311 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 3312 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 3313 PIPE_CONFIG(ADDR_SURF_P2) | 3314 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 3315 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 3316 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 3317 PIPE_CONFIG(ADDR_SURF_P2) | 3318 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 3319 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 3320 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 3321 PIPE_CONFIG(ADDR_SURF_P2) | 3322 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 3323 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 3324 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 3325 PIPE_CONFIG(ADDR_SURF_P2) | 3326 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 3327 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 3328 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 3329 PIPE_CONFIG(ADDR_SURF_P2) | 3330 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 3331 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 3332 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 3333 PIPE_CONFIG(ADDR_SURF_P2) | 3334 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 3335 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 3336 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 3337 PIPE_CONFIG(ADDR_SURF_P2) | 3338 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 3339 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 3340 3341 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3342 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 3343 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 3344 NUM_BANKS(ADDR_SURF_8_BANK)); 3345 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3346 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 3347 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 3348 NUM_BANKS(ADDR_SURF_8_BANK)); 3349 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3350 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 3351 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 3352 NUM_BANKS(ADDR_SURF_8_BANK)); 3353 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3354 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 3355 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 3356 NUM_BANKS(ADDR_SURF_8_BANK)); 3357 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3358 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 3359 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 3360 NUM_BANKS(ADDR_SURF_8_BANK)); 3361 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3362 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 3363 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 3364 NUM_BANKS(ADDR_SURF_8_BANK)); 3365 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3366 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 3367 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 3368 NUM_BANKS(ADDR_SURF_8_BANK)); 3369 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | 3370 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | 3371 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 3372 NUM_BANKS(ADDR_SURF_16_BANK)); 3373 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | 3374 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 3375 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 3376 NUM_BANKS(ADDR_SURF_16_BANK)); 3377 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 3378 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 3379 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 3380 NUM_BANKS(ADDR_SURF_16_BANK)); 3381 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 3382 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 3383 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 3384 NUM_BANKS(ADDR_SURF_16_BANK)); 3385 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3386 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 3387 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 3388 NUM_BANKS(ADDR_SURF_16_BANK)); 3389 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3390 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 3391 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 3392 NUM_BANKS(ADDR_SURF_16_BANK)); 3393 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 3394 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 3395 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 3396 NUM_BANKS(ADDR_SURF_8_BANK)); 3397 3398 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 3399 if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 && 3400 reg_offset != 23) 3401 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]); 3402 3403 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) 3404 if (reg_offset != 7) 3405 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]); 3406 3407 break; 3408 } 3409 } 3410 3411 static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, 3412 u32 se_num, u32 sh_num, u32 instance) 3413 { 3414 u32 data; 3415 3416 if (instance == 0xffffffff) 3417 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); 3418 else 3419 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); 3420 3421 if (se_num == 0xffffffff) 3422 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); 3423 else 3424 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 3425 3426 if (sh_num == 0xffffffff) 3427 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); 3428 else 3429 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); 3430 3431 WREG32(mmGRBM_GFX_INDEX, data); 3432 } 3433 3434 static void gfx_v8_0_select_me_pipe_q(struct amdgpu_device *adev, 3435 u32 me, u32 pipe, u32 q) 3436 { 3437 vi_srbm_select(adev, me, pipe, q, 0); 3438 } 3439 3440 static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev) 3441 { 3442 u32 data, mask; 3443 3444 data = RREG32(mmCC_RB_BACKEND_DISABLE) | 3445 RREG32(mmGC_USER_RB_BACKEND_DISABLE); 3446 3447 data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE); 3448 3449 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se / 3450 adev->gfx.config.max_sh_per_se); 3451 3452 return (~data) & mask; 3453 } 3454 3455 static void 3456 gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1) 3457 { 3458 switch (adev->asic_type) { 3459 case CHIP_FIJI: 3460 case CHIP_VEGAM: 3461 *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) | 3462 RB_XSEL2(1) | PKR_MAP(2) | 3463 PKR_XSEL(1) | PKR_YSEL(1) | 3464 SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3); 3465 *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) | 3466 SE_PAIR_YSEL(2); 3467 break; 3468 case CHIP_TONGA: 3469 case CHIP_POLARIS10: 3470 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) | 3471 SE_XSEL(1) | SE_YSEL(1); 3472 *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) | 3473 SE_PAIR_YSEL(2); 3474 break; 3475 case CHIP_TOPAZ: 3476 case CHIP_CARRIZO: 3477 *rconf |= RB_MAP_PKR0(2); 3478 *rconf1 |= 0x0; 3479 break; 3480 case CHIP_POLARIS11: 3481 case CHIP_POLARIS12: 3482 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) | 3483 SE_XSEL(1) | SE_YSEL(1); 3484 *rconf1 |= 0x0; 3485 break; 3486 case CHIP_STONEY: 3487 *rconf |= 0x0; 3488 *rconf1 |= 0x0; 3489 break; 3490 default: 3491 DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type); 3492 break; 3493 } 3494 } 3495 3496 static void 3497 gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev, 3498 u32 raster_config, u32 raster_config_1, 3499 unsigned rb_mask, unsigned num_rb) 3500 { 3501 unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1); 3502 unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1); 3503 unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2); 3504 unsigned rb_per_se = num_rb / num_se; 3505 unsigned se_mask[4]; 3506 unsigned se; 3507 3508 se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask; 3509 se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask; 3510 se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask; 3511 se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask; 3512 3513 WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4)); 3514 WARN_ON(!(sh_per_se == 1 || sh_per_se == 2)); 3515 WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2)); 3516 3517 if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) || 3518 (!se_mask[2] && !se_mask[3]))) { 3519 raster_config_1 &= ~SE_PAIR_MAP_MASK; 3520 3521 if (!se_mask[0] && !se_mask[1]) { 3522 raster_config_1 |= 3523 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3); 3524 } else { 3525 raster_config_1 |= 3526 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0); 3527 } 3528 } 3529 3530 for (se = 0; se < num_se; se++) { 3531 unsigned raster_config_se = raster_config; 3532 unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se); 3533 unsigned pkr1_mask = pkr0_mask << rb_per_pkr; 3534 int idx = (se / 2) * 2; 3535 3536 if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) { 3537 raster_config_se &= ~SE_MAP_MASK; 3538 3539 if (!se_mask[idx]) { 3540 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3); 3541 } else { 3542 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0); 3543 } 3544 } 3545 3546 pkr0_mask &= rb_mask; 3547 pkr1_mask &= rb_mask; 3548 if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) { 3549 raster_config_se &= ~PKR_MAP_MASK; 3550 3551 if (!pkr0_mask) { 3552 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3); 3553 } else { 3554 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0); 3555 } 3556 } 3557 3558 if (rb_per_se >= 2) { 3559 unsigned rb0_mask = 1 << (se * rb_per_se); 3560 unsigned rb1_mask = rb0_mask << 1; 3561 3562 rb0_mask &= rb_mask; 3563 rb1_mask &= rb_mask; 3564 if (!rb0_mask || !rb1_mask) { 3565 raster_config_se &= ~RB_MAP_PKR0_MASK; 3566 3567 if (!rb0_mask) { 3568 raster_config_se |= 3569 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3); 3570 } else { 3571 raster_config_se |= 3572 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0); 3573 } 3574 } 3575 3576 if (rb_per_se > 2) { 3577 rb0_mask = 1 << (se * rb_per_se + rb_per_pkr); 3578 rb1_mask = rb0_mask << 1; 3579 rb0_mask &= rb_mask; 3580 rb1_mask &= rb_mask; 3581 if (!rb0_mask || !rb1_mask) { 3582 raster_config_se &= ~RB_MAP_PKR1_MASK; 3583 3584 if (!rb0_mask) { 3585 raster_config_se |= 3586 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3); 3587 } else { 3588 raster_config_se |= 3589 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0); 3590 } 3591 } 3592 } 3593 } 3594 3595 /* GRBM_GFX_INDEX has a different offset on VI */ 3596 gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff); 3597 WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se); 3598 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); 3599 } 3600 3601 /* GRBM_GFX_INDEX has a different offset on VI */ 3602 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 3603 } 3604 3605 static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) 3606 { 3607 int i, j; 3608 u32 data; 3609 u32 raster_config = 0, raster_config_1 = 0; 3610 u32 active_rbs = 0; 3611 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / 3612 adev->gfx.config.max_sh_per_se; 3613 unsigned num_rb_pipes; 3614 3615 mutex_lock(&adev->grbm_idx_mutex); 3616 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 3617 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 3618 gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); 3619 data = gfx_v8_0_get_rb_active_bitmap(adev); 3620 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * 3621 rb_bitmap_width_per_sh); 3622 } 3623 } 3624 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 3625 3626 adev->gfx.config.backend_enable_mask = active_rbs; 3627 adev->gfx.config.num_rbs = hweight32(active_rbs); 3628 3629 num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se * 3630 adev->gfx.config.max_shader_engines, 16); 3631 3632 gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1); 3633 3634 if (!adev->gfx.config.backend_enable_mask || 3635 adev->gfx.config.num_rbs >= num_rb_pipes) { 3636 WREG32(mmPA_SC_RASTER_CONFIG, raster_config); 3637 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); 3638 } else { 3639 gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1, 3640 adev->gfx.config.backend_enable_mask, 3641 num_rb_pipes); 3642 } 3643 3644 /* cache the values for userspace */ 3645 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 3646 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 3647 gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); 3648 adev->gfx.config.rb_config[i][j].rb_backend_disable = 3649 RREG32(mmCC_RB_BACKEND_DISABLE); 3650 adev->gfx.config.rb_config[i][j].user_rb_backend_disable = 3651 RREG32(mmGC_USER_RB_BACKEND_DISABLE); 3652 adev->gfx.config.rb_config[i][j].raster_config = 3653 RREG32(mmPA_SC_RASTER_CONFIG); 3654 adev->gfx.config.rb_config[i][j].raster_config_1 = 3655 RREG32(mmPA_SC_RASTER_CONFIG_1); 3656 } 3657 } 3658 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 3659 mutex_unlock(&adev->grbm_idx_mutex); 3660 } 3661 3662 /** 3663 * gfx_v8_0_init_compute_vmid - gart enable 3664 * 3665 * @adev: amdgpu_device pointer 3666 * 3667 * Initialize compute vmid sh_mem registers 3668 * 3669 */ 3670 #define DEFAULT_SH_MEM_BASES (0x6000) 3671 #define FIRST_COMPUTE_VMID (8) 3672 #define LAST_COMPUTE_VMID (16) 3673 static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev) 3674 { 3675 int i; 3676 uint32_t sh_mem_config; 3677 uint32_t sh_mem_bases; 3678 3679 /* 3680 * Configure apertures: 3681 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 3682 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 3683 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 3684 */ 3685 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16); 3686 3687 sh_mem_config = SH_MEM_ADDRESS_MODE_HSA64 << 3688 SH_MEM_CONFIG__ADDRESS_MODE__SHIFT | 3689 SH_MEM_ALIGNMENT_MODE_UNALIGNED << 3690 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT | 3691 MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT | 3692 SH_MEM_CONFIG__PRIVATE_ATC_MASK; 3693 3694 mutex_lock(&adev->srbm_mutex); 3695 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { 3696 vi_srbm_select(adev, 0, 0, 0, i); 3697 /* CP and shaders */ 3698 WREG32(mmSH_MEM_CONFIG, sh_mem_config); 3699 WREG32(mmSH_MEM_APE1_BASE, 1); 3700 WREG32(mmSH_MEM_APE1_LIMIT, 0); 3701 WREG32(mmSH_MEM_BASES, sh_mem_bases); 3702 } 3703 vi_srbm_select(adev, 0, 0, 0, 0); 3704 mutex_unlock(&adev->srbm_mutex); 3705 } 3706 3707 static void gfx_v8_0_config_init(struct amdgpu_device *adev) 3708 { 3709 switch (adev->asic_type) { 3710 default: 3711 adev->gfx.config.double_offchip_lds_buf = 1; 3712 break; 3713 case CHIP_CARRIZO: 3714 case CHIP_STONEY: 3715 adev->gfx.config.double_offchip_lds_buf = 0; 3716 break; 3717 } 3718 } 3719 3720 static void gfx_v8_0_constants_init(struct amdgpu_device *adev) 3721 { 3722 u32 tmp, sh_static_mem_cfg; 3723 int i; 3724 3725 WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF); 3726 WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 3727 WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 3728 WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config); 3729 3730 gfx_v8_0_tiling_mode_table_init(adev); 3731 gfx_v8_0_setup_rb(adev); 3732 gfx_v8_0_get_cu_info(adev); 3733 gfx_v8_0_config_init(adev); 3734 3735 /* XXX SH_MEM regs */ 3736 /* where to put LDS, scratch, GPUVM in FSA64 space */ 3737 sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG, 3738 SWIZZLE_ENABLE, 1); 3739 sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG, 3740 ELEMENT_SIZE, 1); 3741 sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG, 3742 INDEX_STRIDE, 3); 3743 WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg); 3744 3745 mutex_lock(&adev->srbm_mutex); 3746 for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) { 3747 vi_srbm_select(adev, 0, 0, 0, i); 3748 /* CP and shaders */ 3749 if (i == 0) { 3750 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC); 3751 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC); 3752 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE, 3753 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 3754 WREG32(mmSH_MEM_CONFIG, tmp); 3755 WREG32(mmSH_MEM_BASES, 0); 3756 } else { 3757 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC); 3758 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC); 3759 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE, 3760 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 3761 WREG32(mmSH_MEM_CONFIG, tmp); 3762 tmp = adev->gmc.shared_aperture_start >> 48; 3763 WREG32(mmSH_MEM_BASES, tmp); 3764 } 3765 3766 WREG32(mmSH_MEM_APE1_BASE, 1); 3767 WREG32(mmSH_MEM_APE1_LIMIT, 0); 3768 } 3769 vi_srbm_select(adev, 0, 0, 0, 0); 3770 mutex_unlock(&adev->srbm_mutex); 3771 3772 gfx_v8_0_init_compute_vmid(adev); 3773 3774 mutex_lock(&adev->grbm_idx_mutex); 3775 /* 3776 * making sure that the following register writes will be broadcasted 3777 * to all the shaders 3778 */ 3779 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 3780 3781 WREG32(mmPA_SC_FIFO_SIZE, 3782 (adev->gfx.config.sc_prim_fifo_size_frontend << 3783 PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) | 3784 (adev->gfx.config.sc_prim_fifo_size_backend << 3785 PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) | 3786 (adev->gfx.config.sc_hiz_tile_fifo_size << 3787 PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | 3788 (adev->gfx.config.sc_earlyz_tile_fifo_size << 3789 PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)); 3790 3791 tmp = RREG32(mmSPI_ARB_PRIORITY); 3792 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2); 3793 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2); 3794 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2); 3795 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2); 3796 WREG32(mmSPI_ARB_PRIORITY, tmp); 3797 3798 mutex_unlock(&adev->grbm_idx_mutex); 3799 3800 } 3801 3802 static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) 3803 { 3804 u32 i, j, k; 3805 u32 mask; 3806 3807 mutex_lock(&adev->grbm_idx_mutex); 3808 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 3809 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 3810 gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); 3811 for (k = 0; k < adev->usec_timeout; k++) { 3812 if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) 3813 break; 3814 udelay(1); 3815 } 3816 if (k == adev->usec_timeout) { 3817 gfx_v8_0_select_se_sh(adev, 0xffffffff, 3818 0xffffffff, 0xffffffff); 3819 mutex_unlock(&adev->grbm_idx_mutex); 3820 DRM_INFO("Timeout wait for RLC serdes %u,%u\n", 3821 i, j); 3822 return; 3823 } 3824 } 3825 } 3826 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 3827 mutex_unlock(&adev->grbm_idx_mutex); 3828 3829 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | 3830 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK | 3831 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK | 3832 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK; 3833 for (k = 0; k < adev->usec_timeout; k++) { 3834 if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) 3835 break; 3836 udelay(1); 3837 } 3838 } 3839 3840 static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, 3841 bool enable) 3842 { 3843 u32 tmp = RREG32(mmCP_INT_CNTL_RING0); 3844 3845 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0); 3846 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0); 3847 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0); 3848 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0); 3849 3850 WREG32(mmCP_INT_CNTL_RING0, tmp); 3851 } 3852 3853 static void gfx_v8_0_init_csb(struct amdgpu_device *adev) 3854 { 3855 /* csib */ 3856 WREG32(mmRLC_CSIB_ADDR_HI, 3857 adev->gfx.rlc.clear_state_gpu_addr >> 32); 3858 WREG32(mmRLC_CSIB_ADDR_LO, 3859 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); 3860 WREG32(mmRLC_CSIB_LENGTH, 3861 adev->gfx.rlc.clear_state_size); 3862 } 3863 3864 static void gfx_v8_0_parse_ind_reg_list(int *register_list_format, 3865 int ind_offset, 3866 int list_size, 3867 int *unique_indices, 3868 int *indices_count, 3869 int max_indices, 3870 int *ind_start_offsets, 3871 int *offset_count, 3872 int max_offset) 3873 { 3874 int indices; 3875 bool new_entry = true; 3876 3877 for (; ind_offset < list_size; ind_offset++) { 3878 3879 if (new_entry) { 3880 new_entry = false; 3881 ind_start_offsets[*offset_count] = ind_offset; 3882 *offset_count = *offset_count + 1; 3883 BUG_ON(*offset_count >= max_offset); 3884 } 3885 3886 if (register_list_format[ind_offset] == 0xFFFFFFFF) { 3887 new_entry = true; 3888 continue; 3889 } 3890 3891 ind_offset += 2; 3892 3893 /* look for the matching indice */ 3894 for (indices = 0; 3895 indices < *indices_count; 3896 indices++) { 3897 if (unique_indices[indices] == 3898 register_list_format[ind_offset]) 3899 break; 3900 } 3901 3902 if (indices >= *indices_count) { 3903 unique_indices[*indices_count] = 3904 register_list_format[ind_offset]; 3905 indices = *indices_count; 3906 *indices_count = *indices_count + 1; 3907 BUG_ON(*indices_count >= max_indices); 3908 } 3909 3910 register_list_format[ind_offset] = indices; 3911 } 3912 } 3913 3914 static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev) 3915 { 3916 int i, temp, data; 3917 int unique_indices[] = {0, 0, 0, 0, 0, 0, 0, 0}; 3918 int indices_count = 0; 3919 int indirect_start_offsets[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 3920 int offset_count = 0; 3921 3922 int list_size; 3923 unsigned int *register_list_format = 3924 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL); 3925 if (!register_list_format) 3926 return -ENOMEM; 3927 memcpy(register_list_format, adev->gfx.rlc.register_list_format, 3928 adev->gfx.rlc.reg_list_format_size_bytes); 3929 3930 gfx_v8_0_parse_ind_reg_list(register_list_format, 3931 RLC_FormatDirectRegListLength, 3932 adev->gfx.rlc.reg_list_format_size_bytes >> 2, 3933 unique_indices, 3934 &indices_count, 3935 ARRAY_SIZE(unique_indices), 3936 indirect_start_offsets, 3937 &offset_count, 3938 ARRAY_SIZE(indirect_start_offsets)); 3939 3940 /* save and restore list */ 3941 WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1); 3942 3943 WREG32(mmRLC_SRM_ARAM_ADDR, 0); 3944 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++) 3945 WREG32(mmRLC_SRM_ARAM_DATA, adev->gfx.rlc.register_restore[i]); 3946 3947 /* indirect list */ 3948 WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_list_format_start); 3949 for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++) 3950 WREG32(mmRLC_GPM_SCRATCH_DATA, register_list_format[i]); 3951 3952 list_size = adev->gfx.rlc.reg_list_size_bytes >> 2; 3953 list_size = list_size >> 1; 3954 WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_restore_list_size); 3955 WREG32(mmRLC_GPM_SCRATCH_DATA, list_size); 3956 3957 /* starting offsets starts */ 3958 WREG32(mmRLC_GPM_SCRATCH_ADDR, 3959 adev->gfx.rlc.starting_offsets_start); 3960 for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++) 3961 WREG32(mmRLC_GPM_SCRATCH_DATA, 3962 indirect_start_offsets[i]); 3963 3964 /* unique indices */ 3965 temp = mmRLC_SRM_INDEX_CNTL_ADDR_0; 3966 data = mmRLC_SRM_INDEX_CNTL_DATA_0; 3967 for (i = 0; i < ARRAY_SIZE(unique_indices); i++) { 3968 if (unique_indices[i] != 0) { 3969 WREG32(temp + i, unique_indices[i] & 0x3FFFF); 3970 WREG32(data + i, unique_indices[i] >> 20); 3971 } 3972 } 3973 kfree(register_list_format); 3974 3975 return 0; 3976 } 3977 3978 static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev) 3979 { 3980 WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1); 3981 } 3982 3983 static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev) 3984 { 3985 uint32_t data; 3986 3987 WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60); 3988 3989 data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10); 3990 data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10); 3991 data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10); 3992 data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10); 3993 WREG32(mmRLC_PG_DELAY, data); 3994 3995 WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3); 3996 WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0); 3997 3998 } 3999 4000 static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev, 4001 bool enable) 4002 { 4003 WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0); 4004 } 4005 4006 static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev, 4007 bool enable) 4008 { 4009 WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0); 4010 } 4011 4012 static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable) 4013 { 4014 WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 0 : 1); 4015 } 4016 4017 static void gfx_v8_0_init_pg(struct amdgpu_device *adev) 4018 { 4019 if ((adev->asic_type == CHIP_CARRIZO) || 4020 (adev->asic_type == CHIP_STONEY)) { 4021 gfx_v8_0_init_csb(adev); 4022 gfx_v8_0_init_save_restore_list(adev); 4023 gfx_v8_0_enable_save_restore_machine(adev); 4024 WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8); 4025 gfx_v8_0_init_power_gating(adev); 4026 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask); 4027 } else if ((adev->asic_type == CHIP_POLARIS11) || 4028 (adev->asic_type == CHIP_POLARIS12) || 4029 (adev->asic_type == CHIP_VEGAM)) { 4030 gfx_v8_0_init_csb(adev); 4031 gfx_v8_0_init_save_restore_list(adev); 4032 gfx_v8_0_enable_save_restore_machine(adev); 4033 gfx_v8_0_init_power_gating(adev); 4034 } 4035 4036 } 4037 4038 static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev) 4039 { 4040 WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0); 4041 4042 gfx_v8_0_enable_gui_idle_interrupt(adev, false); 4043 gfx_v8_0_wait_for_rlc_serdes(adev); 4044 } 4045 4046 static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev) 4047 { 4048 WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 4049 udelay(50); 4050 4051 WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); 4052 udelay(50); 4053 } 4054 4055 static void gfx_v8_0_rlc_start(struct amdgpu_device *adev) 4056 { 4057 WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1); 4058 4059 /* carrizo do enable cp interrupt after cp inited */ 4060 if (!(adev->flags & AMD_IS_APU)) 4061 gfx_v8_0_enable_gui_idle_interrupt(adev, true); 4062 4063 udelay(50); 4064 } 4065 4066 static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) 4067 { 4068 if (amdgpu_sriov_vf(adev)) { 4069 gfx_v8_0_init_csb(adev); 4070 return 0; 4071 } 4072 4073 adev->gfx.rlc.funcs->stop(adev); 4074 adev->gfx.rlc.funcs->reset(adev); 4075 gfx_v8_0_init_pg(adev); 4076 adev->gfx.rlc.funcs->start(adev); 4077 4078 return 0; 4079 } 4080 4081 static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) 4082 { 4083 int i; 4084 u32 tmp = RREG32(mmCP_ME_CNTL); 4085 4086 if (enable) { 4087 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0); 4088 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0); 4089 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0); 4090 } else { 4091 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1); 4092 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); 4093 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); 4094 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 4095 adev->gfx.gfx_ring[i].sched.ready = false; 4096 } 4097 WREG32(mmCP_ME_CNTL, tmp); 4098 udelay(50); 4099 } 4100 4101 static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev) 4102 { 4103 u32 count = 0; 4104 const struct cs_section_def *sect = NULL; 4105 const struct cs_extent_def *ext = NULL; 4106 4107 /* begin clear state */ 4108 count += 2; 4109 /* context control state */ 4110 count += 3; 4111 4112 for (sect = vi_cs_data; sect->section != NULL; ++sect) { 4113 for (ext = sect->section; ext->extent != NULL; ++ext) { 4114 if (sect->id == SECT_CONTEXT) 4115 count += 2 + ext->reg_count; 4116 else 4117 return 0; 4118 } 4119 } 4120 /* pa_sc_raster_config/pa_sc_raster_config1 */ 4121 count += 4; 4122 /* end clear state */ 4123 count += 2; 4124 /* clear state */ 4125 count += 2; 4126 4127 return count; 4128 } 4129 4130 static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) 4131 { 4132 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; 4133 const struct cs_section_def *sect = NULL; 4134 const struct cs_extent_def *ext = NULL; 4135 int r, i; 4136 4137 /* init the CP */ 4138 WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1); 4139 WREG32(mmCP_ENDIAN_SWAP, 0); 4140 WREG32(mmCP_DEVICE_ID, 1); 4141 4142 gfx_v8_0_cp_gfx_enable(adev, true); 4143 4144 r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4); 4145 if (r) { 4146 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 4147 return r; 4148 } 4149 4150 /* clear state buffer */ 4151 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 4152 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 4153 4154 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 4155 amdgpu_ring_write(ring, 0x80000000); 4156 amdgpu_ring_write(ring, 0x80000000); 4157 4158 for (sect = vi_cs_data; sect->section != NULL; ++sect) { 4159 for (ext = sect->section; ext->extent != NULL; ++ext) { 4160 if (sect->id == SECT_CONTEXT) { 4161 amdgpu_ring_write(ring, 4162 PACKET3(PACKET3_SET_CONTEXT_REG, 4163 ext->reg_count)); 4164 amdgpu_ring_write(ring, 4165 ext->reg_index - PACKET3_SET_CONTEXT_REG_START); 4166 for (i = 0; i < ext->reg_count; i++) 4167 amdgpu_ring_write(ring, ext->extent[i]); 4168 } 4169 } 4170 } 4171 4172 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 4173 amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); 4174 amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config); 4175 amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1); 4176 4177 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 4178 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 4179 4180 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 4181 amdgpu_ring_write(ring, 0); 4182 4183 /* init the CE partitions */ 4184 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); 4185 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); 4186 amdgpu_ring_write(ring, 0x8000); 4187 amdgpu_ring_write(ring, 0x8000); 4188 4189 amdgpu_ring_commit(ring); 4190 4191 return 0; 4192 } 4193 static void gfx_v8_0_set_cpg_door_bell(struct amdgpu_device *adev, struct amdgpu_ring *ring) 4194 { 4195 u32 tmp; 4196 /* no gfx doorbells on iceland */ 4197 if (adev->asic_type == CHIP_TOPAZ) 4198 return; 4199 4200 tmp = RREG32(mmCP_RB_DOORBELL_CONTROL); 4201 4202 if (ring->use_doorbell) { 4203 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 4204 DOORBELL_OFFSET, ring->doorbell_index); 4205 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 4206 DOORBELL_HIT, 0); 4207 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 4208 DOORBELL_EN, 1); 4209 } else { 4210 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0); 4211 } 4212 4213 WREG32(mmCP_RB_DOORBELL_CONTROL, tmp); 4214 4215 if (adev->flags & AMD_IS_APU) 4216 return; 4217 4218 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, 4219 DOORBELL_RANGE_LOWER, 4220 adev->doorbell_index.gfx_ring0); 4221 WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp); 4222 4223 WREG32(mmCP_RB_DOORBELL_RANGE_UPPER, 4224 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); 4225 } 4226 4227 static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) 4228 { 4229 struct amdgpu_ring *ring; 4230 u32 tmp; 4231 u32 rb_bufsz; 4232 u64 rb_addr, rptr_addr, wptr_gpu_addr; 4233 4234 /* Set the write pointer delay */ 4235 WREG32(mmCP_RB_WPTR_DELAY, 0); 4236 4237 /* set the RB to use vmid 0 */ 4238 WREG32(mmCP_RB_VMID, 0); 4239 4240 /* Set ring buffer size */ 4241 ring = &adev->gfx.gfx_ring[0]; 4242 rb_bufsz = order_base_2(ring->ring_size / 8); 4243 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz); 4244 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2); 4245 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MTYPE, 3); 4246 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MIN_IB_AVAILSZ, 1); 4247 #ifdef __BIG_ENDIAN 4248 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1); 4249 #endif 4250 WREG32(mmCP_RB0_CNTL, tmp); 4251 4252 /* Initialize the ring buffer's read and write pointers */ 4253 WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK); 4254 ring->wptr = 0; 4255 WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); 4256 4257 /* set the wb address wether it's enabled or not */ 4258 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); 4259 WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); 4260 WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); 4261 4262 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 4263 WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr)); 4264 WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr)); 4265 mdelay(1); 4266 WREG32(mmCP_RB0_CNTL, tmp); 4267 4268 rb_addr = ring->gpu_addr >> 8; 4269 WREG32(mmCP_RB0_BASE, rb_addr); 4270 WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr)); 4271 4272 gfx_v8_0_set_cpg_door_bell(adev, ring); 4273 /* start the ring */ 4274 amdgpu_ring_clear_ring(ring); 4275 gfx_v8_0_cp_gfx_start(adev); 4276 ring->sched.ready = true; 4277 4278 return 0; 4279 } 4280 4281 static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 4282 { 4283 int i; 4284 4285 if (enable) { 4286 WREG32(mmCP_MEC_CNTL, 0); 4287 } else { 4288 WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); 4289 for (i = 0; i < adev->gfx.num_compute_rings; i++) 4290 adev->gfx.compute_ring[i].sched.ready = false; 4291 adev->gfx.kiq.ring.sched.ready = false; 4292 } 4293 udelay(50); 4294 } 4295 4296 /* KIQ functions */ 4297 static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring) 4298 { 4299 uint32_t tmp; 4300 struct amdgpu_device *adev = ring->adev; 4301 4302 /* tell RLC which is KIQ queue */ 4303 tmp = RREG32(mmRLC_CP_SCHEDULERS); 4304 tmp &= 0xffffff00; 4305 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 4306 WREG32(mmRLC_CP_SCHEDULERS, tmp); 4307 tmp |= 0x80; 4308 WREG32(mmRLC_CP_SCHEDULERS, tmp); 4309 } 4310 4311 static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) 4312 { 4313 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; 4314 uint64_t queue_mask = 0; 4315 int r, i; 4316 4317 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { 4318 if (!test_bit(i, adev->gfx.mec.queue_bitmap)) 4319 continue; 4320 4321 /* This situation may be hit in the future if a new HW 4322 * generation exposes more than 64 queues. If so, the 4323 * definition of queue_mask needs updating */ 4324 if (WARN_ON(i >= (sizeof(queue_mask)*8))) { 4325 DRM_ERROR("Invalid KCQ enabled: %d\n", i); 4326 break; 4327 } 4328 4329 queue_mask |= (1ull << i); 4330 } 4331 4332 r = amdgpu_ring_alloc(kiq_ring, (8 * adev->gfx.num_compute_rings) + 8); 4333 if (r) { 4334 DRM_ERROR("Failed to lock KIQ (%d).\n", r); 4335 return r; 4336 } 4337 /* set resources */ 4338 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 4339 amdgpu_ring_write(kiq_ring, 0); /* vmid_mask:0 queue_type:0 (KIQ) */ 4340 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ 4341 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ 4342 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ 4343 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ 4344 amdgpu_ring_write(kiq_ring, 0); /* oac mask */ 4345 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ 4346 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4347 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; 4348 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 4349 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 4350 4351 /* map queues */ 4352 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 4353 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ 4354 amdgpu_ring_write(kiq_ring, 4355 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); 4356 amdgpu_ring_write(kiq_ring, 4357 PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index) | 4358 PACKET3_MAP_QUEUES_QUEUE(ring->queue) | 4359 PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 4360 PACKET3_MAP_QUEUES_ME(ring->me == 1 ? 0 : 1)); /* doorbell */ 4361 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); 4362 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); 4363 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); 4364 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 4365 } 4366 4367 amdgpu_ring_commit(kiq_ring); 4368 4369 return 0; 4370 } 4371 4372 static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req) 4373 { 4374 int i, r = 0; 4375 4376 if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) { 4377 WREG32_FIELD(CP_HQD_DEQUEUE_REQUEST, DEQUEUE_REQ, req); 4378 for (i = 0; i < adev->usec_timeout; i++) { 4379 if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK)) 4380 break; 4381 udelay(1); 4382 } 4383 if (i == adev->usec_timeout) 4384 r = -ETIMEDOUT; 4385 } 4386 WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0); 4387 WREG32(mmCP_HQD_PQ_RPTR, 0); 4388 WREG32(mmCP_HQD_PQ_WPTR, 0); 4389 4390 return r; 4391 } 4392 4393 static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) 4394 { 4395 struct amdgpu_device *adev = ring->adev; 4396 struct vi_mqd *mqd = ring->mqd_ptr; 4397 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 4398 uint32_t tmp; 4399 4400 mqd->header = 0xC0310800; 4401 mqd->compute_pipelinestat_enable = 0x00000001; 4402 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 4403 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 4404 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 4405 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 4406 mqd->compute_misc_reserved = 0x00000003; 4407 mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr 4408 + offsetof(struct vi_mqd_allocation, dynamic_cu_mask)); 4409 mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr 4410 + offsetof(struct vi_mqd_allocation, dynamic_cu_mask)); 4411 eop_base_addr = ring->eop_gpu_addr >> 8; 4412 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 4413 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 4414 4415 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 4416 tmp = RREG32(mmCP_HQD_EOP_CONTROL); 4417 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 4418 (order_base_2(GFX8_MEC_HPD_SIZE / 4) - 1)); 4419 4420 mqd->cp_hqd_eop_control = tmp; 4421 4422 /* enable doorbell? */ 4423 tmp = REG_SET_FIELD(RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL), 4424 CP_HQD_PQ_DOORBELL_CONTROL, 4425 DOORBELL_EN, 4426 ring->use_doorbell ? 1 : 0); 4427 4428 mqd->cp_hqd_pq_doorbell_control = tmp; 4429 4430 /* set the pointer to the MQD */ 4431 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc; 4432 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); 4433 4434 /* set MQD vmid to 0 */ 4435 tmp = RREG32(mmCP_MQD_CONTROL); 4436 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 4437 mqd->cp_mqd_control = tmp; 4438 4439 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 4440 hqd_gpu_addr = ring->gpu_addr >> 8; 4441 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 4442 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 4443 4444 /* set up the HQD, this is similar to CP_RB0_CNTL */ 4445 tmp = RREG32(mmCP_HQD_PQ_CONTROL); 4446 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 4447 (order_base_2(ring->ring_size / 4) - 1)); 4448 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 4449 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); 4450 #ifdef __BIG_ENDIAN 4451 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1); 4452 #endif 4453 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); 4454 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0); 4455 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 4456 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 4457 mqd->cp_hqd_pq_control = tmp; 4458 4459 /* set the wb address whether it's enabled or not */ 4460 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); 4461 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 4462 mqd->cp_hqd_pq_rptr_report_addr_hi = 4463 upper_32_bits(wb_gpu_addr) & 0xffff; 4464 4465 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 4466 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 4467 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 4468 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 4469 4470 tmp = 0; 4471 /* enable the doorbell if requested */ 4472 if (ring->use_doorbell) { 4473 tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); 4474 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4475 DOORBELL_OFFSET, ring->doorbell_index); 4476 4477 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4478 DOORBELL_EN, 1); 4479 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4480 DOORBELL_SOURCE, 0); 4481 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 4482 DOORBELL_HIT, 0); 4483 } 4484 4485 mqd->cp_hqd_pq_doorbell_control = tmp; 4486 4487 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 4488 ring->wptr = 0; 4489 mqd->cp_hqd_pq_wptr = ring->wptr; 4490 mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR); 4491 4492 /* set the vmid for the queue */ 4493 mqd->cp_hqd_vmid = 0; 4494 4495 tmp = RREG32(mmCP_HQD_PERSISTENT_STATE); 4496 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); 4497 mqd->cp_hqd_persistent_state = tmp; 4498 4499 /* set MTYPE */ 4500 tmp = RREG32(mmCP_HQD_IB_CONTROL); 4501 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); 4502 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MTYPE, 3); 4503 mqd->cp_hqd_ib_control = tmp; 4504 4505 tmp = RREG32(mmCP_HQD_IQ_TIMER); 4506 tmp = REG_SET_FIELD(tmp, CP_HQD_IQ_TIMER, MTYPE, 3); 4507 mqd->cp_hqd_iq_timer = tmp; 4508 4509 tmp = RREG32(mmCP_HQD_CTX_SAVE_CONTROL); 4510 tmp = REG_SET_FIELD(tmp, CP_HQD_CTX_SAVE_CONTROL, MTYPE, 3); 4511 mqd->cp_hqd_ctx_save_control = tmp; 4512 4513 /* defaults */ 4514 mqd->cp_hqd_eop_rptr = RREG32(mmCP_HQD_EOP_RPTR); 4515 mqd->cp_hqd_eop_wptr = RREG32(mmCP_HQD_EOP_WPTR); 4516 mqd->cp_hqd_pipe_priority = RREG32(mmCP_HQD_PIPE_PRIORITY); 4517 mqd->cp_hqd_queue_priority = RREG32(mmCP_HQD_QUEUE_PRIORITY); 4518 mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM); 4519 mqd->cp_hqd_ctx_save_base_addr_lo = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO); 4520 mqd->cp_hqd_ctx_save_base_addr_hi = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI); 4521 mqd->cp_hqd_cntl_stack_offset = RREG32(mmCP_HQD_CNTL_STACK_OFFSET); 4522 mqd->cp_hqd_cntl_stack_size = RREG32(mmCP_HQD_CNTL_STACK_SIZE); 4523 mqd->cp_hqd_wg_state_offset = RREG32(mmCP_HQD_WG_STATE_OFFSET); 4524 mqd->cp_hqd_ctx_save_size = RREG32(mmCP_HQD_CTX_SAVE_SIZE); 4525 mqd->cp_hqd_eop_done_events = RREG32(mmCP_HQD_EOP_EVENTS); 4526 mqd->cp_hqd_error = RREG32(mmCP_HQD_ERROR); 4527 mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM); 4528 mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES); 4529 4530 /* activate the queue */ 4531 mqd->cp_hqd_active = 1; 4532 4533 return 0; 4534 } 4535 4536 int gfx_v8_0_mqd_commit(struct amdgpu_device *adev, 4537 struct vi_mqd *mqd) 4538 { 4539 uint32_t mqd_reg; 4540 uint32_t *mqd_data; 4541 4542 /* HQD registers extend from mmCP_MQD_BASE_ADDR to mmCP_HQD_ERROR */ 4543 mqd_data = &mqd->cp_mqd_base_addr_lo; 4544 4545 /* disable wptr polling */ 4546 WREG32_FIELD(CP_PQ_WPTR_POLL_CNTL, EN, 0); 4547 4548 /* program all HQD registers */ 4549 for (mqd_reg = mmCP_HQD_VMID; mqd_reg <= mmCP_HQD_EOP_CONTROL; mqd_reg++) 4550 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]); 4551 4552 /* Tonga errata: EOP RPTR/WPTR should be left unmodified. 4553 * This is safe since EOP RPTR==WPTR for any inactive HQD 4554 * on ASICs that do not support context-save. 4555 * EOP writes/reads can start anywhere in the ring. 4556 */ 4557 if (adev->asic_type != CHIP_TONGA) { 4558 WREG32(mmCP_HQD_EOP_RPTR, mqd->cp_hqd_eop_rptr); 4559 WREG32(mmCP_HQD_EOP_WPTR, mqd->cp_hqd_eop_wptr); 4560 WREG32(mmCP_HQD_EOP_WPTR_MEM, mqd->cp_hqd_eop_wptr_mem); 4561 } 4562 4563 for (mqd_reg = mmCP_HQD_EOP_EVENTS; mqd_reg <= mmCP_HQD_ERROR; mqd_reg++) 4564 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]); 4565 4566 /* activate the HQD */ 4567 for (mqd_reg = mmCP_MQD_BASE_ADDR; mqd_reg <= mmCP_HQD_ACTIVE; mqd_reg++) 4568 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]); 4569 4570 return 0; 4571 } 4572 4573 static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) 4574 { 4575 struct amdgpu_device *adev = ring->adev; 4576 struct vi_mqd *mqd = ring->mqd_ptr; 4577 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; 4578 4579 gfx_v8_0_kiq_setting(ring); 4580 4581 if (adev->in_gpu_reset) { /* for GPU_RESET case */ 4582 /* reset MQD to a clean status */ 4583 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4584 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); 4585 4586 /* reset ring buffer */ 4587 ring->wptr = 0; 4588 amdgpu_ring_clear_ring(ring); 4589 mutex_lock(&adev->srbm_mutex); 4590 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4591 gfx_v8_0_mqd_commit(adev, mqd); 4592 vi_srbm_select(adev, 0, 0, 0, 0); 4593 mutex_unlock(&adev->srbm_mutex); 4594 } else { 4595 memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); 4596 ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; 4597 ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; 4598 mutex_lock(&adev->srbm_mutex); 4599 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4600 gfx_v8_0_mqd_init(ring); 4601 gfx_v8_0_mqd_commit(adev, mqd); 4602 vi_srbm_select(adev, 0, 0, 0, 0); 4603 mutex_unlock(&adev->srbm_mutex); 4604 4605 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4606 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation)); 4607 } 4608 4609 return 0; 4610 } 4611 4612 static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) 4613 { 4614 struct amdgpu_device *adev = ring->adev; 4615 struct vi_mqd *mqd = ring->mqd_ptr; 4616 int mqd_idx = ring - &adev->gfx.compute_ring[0]; 4617 4618 if (!adev->in_gpu_reset && !adev->in_suspend) { 4619 memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); 4620 ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; 4621 ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; 4622 mutex_lock(&adev->srbm_mutex); 4623 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4624 gfx_v8_0_mqd_init(ring); 4625 vi_srbm_select(adev, 0, 0, 0, 0); 4626 mutex_unlock(&adev->srbm_mutex); 4627 4628 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4629 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation)); 4630 } else if (adev->in_gpu_reset) { /* for GPU_RESET case */ 4631 /* reset MQD to a clean status */ 4632 if (adev->gfx.mec.mqd_backup[mqd_idx]) 4633 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); 4634 /* reset ring buffer */ 4635 ring->wptr = 0; 4636 amdgpu_ring_clear_ring(ring); 4637 } else { 4638 amdgpu_ring_clear_ring(ring); 4639 } 4640 return 0; 4641 } 4642 4643 static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev) 4644 { 4645 if (adev->asic_type > CHIP_TONGA) { 4646 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, adev->doorbell_index.kiq << 2); 4647 WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, adev->doorbell_index.mec_ring7 << 2); 4648 } 4649 /* enable doorbells */ 4650 WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1); 4651 } 4652 4653 static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) 4654 { 4655 struct amdgpu_ring *ring; 4656 int r; 4657 4658 ring = &adev->gfx.kiq.ring; 4659 4660 r = amdgpu_bo_reserve(ring->mqd_obj, false); 4661 if (unlikely(r != 0)) 4662 return r; 4663 4664 r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr); 4665 if (unlikely(r != 0)) 4666 return r; 4667 4668 gfx_v8_0_kiq_init_queue(ring); 4669 amdgpu_bo_kunmap(ring->mqd_obj); 4670 ring->mqd_ptr = NULL; 4671 amdgpu_bo_unreserve(ring->mqd_obj); 4672 ring->sched.ready = true; 4673 return 0; 4674 } 4675 4676 static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev) 4677 { 4678 struct amdgpu_ring *ring = NULL; 4679 int r = 0, i; 4680 4681 gfx_v8_0_cp_compute_enable(adev, true); 4682 4683 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4684 ring = &adev->gfx.compute_ring[i]; 4685 4686 r = amdgpu_bo_reserve(ring->mqd_obj, false); 4687 if (unlikely(r != 0)) 4688 goto done; 4689 r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr); 4690 if (!r) { 4691 r = gfx_v8_0_kcq_init_queue(ring); 4692 amdgpu_bo_kunmap(ring->mqd_obj); 4693 ring->mqd_ptr = NULL; 4694 } 4695 amdgpu_bo_unreserve(ring->mqd_obj); 4696 if (r) 4697 goto done; 4698 } 4699 4700 gfx_v8_0_set_mec_doorbell_range(adev); 4701 4702 r = gfx_v8_0_kiq_kcq_enable(adev); 4703 if (r) 4704 goto done; 4705 4706 done: 4707 return r; 4708 } 4709 4710 static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev) 4711 { 4712 int r, i; 4713 struct amdgpu_ring *ring; 4714 4715 /* collect all the ring_tests here, gfx, kiq, compute */ 4716 ring = &adev->gfx.gfx_ring[0]; 4717 r = amdgpu_ring_test_helper(ring); 4718 if (r) 4719 return r; 4720 4721 ring = &adev->gfx.kiq.ring; 4722 r = amdgpu_ring_test_helper(ring); 4723 if (r) 4724 return r; 4725 4726 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4727 ring = &adev->gfx.compute_ring[i]; 4728 amdgpu_ring_test_helper(ring); 4729 } 4730 4731 return 0; 4732 } 4733 4734 static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) 4735 { 4736 int r; 4737 4738 if (!(adev->flags & AMD_IS_APU)) 4739 gfx_v8_0_enable_gui_idle_interrupt(adev, false); 4740 4741 r = gfx_v8_0_kiq_resume(adev); 4742 if (r) 4743 return r; 4744 4745 r = gfx_v8_0_cp_gfx_resume(adev); 4746 if (r) 4747 return r; 4748 4749 r = gfx_v8_0_kcq_resume(adev); 4750 if (r) 4751 return r; 4752 4753 r = gfx_v8_0_cp_test_all_rings(adev); 4754 if (r) 4755 return r; 4756 4757 gfx_v8_0_enable_gui_idle_interrupt(adev, true); 4758 4759 return 0; 4760 } 4761 4762 static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable) 4763 { 4764 gfx_v8_0_cp_gfx_enable(adev, enable); 4765 gfx_v8_0_cp_compute_enable(adev, enable); 4766 } 4767 4768 static int gfx_v8_0_hw_init(void *handle) 4769 { 4770 int r; 4771 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4772 4773 gfx_v8_0_init_golden_registers(adev); 4774 gfx_v8_0_constants_init(adev); 4775 4776 r = adev->gfx.rlc.funcs->resume(adev); 4777 if (r) 4778 return r; 4779 4780 r = gfx_v8_0_cp_resume(adev); 4781 4782 return r; 4783 } 4784 4785 static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev) 4786 { 4787 int r, i; 4788 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; 4789 4790 r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings); 4791 if (r) 4792 DRM_ERROR("Failed to lock KIQ (%d).\n", r); 4793 4794 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4795 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; 4796 4797 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); 4798 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 4799 PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */ 4800 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | 4801 PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) | 4802 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); 4803 amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 4804 amdgpu_ring_write(kiq_ring, 0); 4805 amdgpu_ring_write(kiq_ring, 0); 4806 amdgpu_ring_write(kiq_ring, 0); 4807 } 4808 r = amdgpu_ring_test_helper(kiq_ring); 4809 if (r) 4810 DRM_ERROR("KCQ disable failed\n"); 4811 4812 return r; 4813 } 4814 4815 static bool gfx_v8_0_is_idle(void *handle) 4816 { 4817 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4818 4819 if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE) 4820 || RREG32(mmGRBM_STATUS2) != 0x8) 4821 return false; 4822 else 4823 return true; 4824 } 4825 4826 static bool gfx_v8_0_rlc_is_idle(void *handle) 4827 { 4828 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4829 4830 if (RREG32(mmGRBM_STATUS2) != 0x8) 4831 return false; 4832 else 4833 return true; 4834 } 4835 4836 static int gfx_v8_0_wait_for_rlc_idle(void *handle) 4837 { 4838 unsigned int i; 4839 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4840 4841 for (i = 0; i < adev->usec_timeout; i++) { 4842 if (gfx_v8_0_rlc_is_idle(handle)) 4843 return 0; 4844 4845 udelay(1); 4846 } 4847 return -ETIMEDOUT; 4848 } 4849 4850 static int gfx_v8_0_wait_for_idle(void *handle) 4851 { 4852 unsigned int i; 4853 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4854 4855 for (i = 0; i < adev->usec_timeout; i++) { 4856 if (gfx_v8_0_is_idle(handle)) 4857 return 0; 4858 4859 udelay(1); 4860 } 4861 return -ETIMEDOUT; 4862 } 4863 4864 static int gfx_v8_0_hw_fini(void *handle) 4865 { 4866 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4867 4868 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 4869 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 4870 4871 amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); 4872 4873 amdgpu_irq_put(adev, &adev->gfx.sq_irq, 0); 4874 4875 /* disable KCQ to avoid CPC touch memory not valid anymore */ 4876 gfx_v8_0_kcq_disable(adev); 4877 4878 if (amdgpu_sriov_vf(adev)) { 4879 pr_debug("For SRIOV client, shouldn't do anything.\n"); 4880 return 0; 4881 } 4882 amdgpu_gfx_rlc_enter_safe_mode(adev); 4883 if (!gfx_v8_0_wait_for_idle(adev)) 4884 gfx_v8_0_cp_enable(adev, false); 4885 else 4886 pr_err("cp is busy, skip halt cp\n"); 4887 if (!gfx_v8_0_wait_for_rlc_idle(adev)) 4888 adev->gfx.rlc.funcs->stop(adev); 4889 else 4890 pr_err("rlc is busy, skip halt rlc\n"); 4891 amdgpu_gfx_rlc_exit_safe_mode(adev); 4892 return 0; 4893 } 4894 4895 static int gfx_v8_0_suspend(void *handle) 4896 { 4897 return gfx_v8_0_hw_fini(handle); 4898 } 4899 4900 static int gfx_v8_0_resume(void *handle) 4901 { 4902 return gfx_v8_0_hw_init(handle); 4903 } 4904 4905 static bool gfx_v8_0_check_soft_reset(void *handle) 4906 { 4907 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4908 u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 4909 u32 tmp; 4910 4911 /* GRBM_STATUS */ 4912 tmp = RREG32(mmGRBM_STATUS); 4913 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | 4914 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | 4915 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | 4916 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | 4917 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | 4918 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK | 4919 GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { 4920 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 4921 GRBM_SOFT_RESET, SOFT_RESET_CP, 1); 4922 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 4923 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); 4924 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, 4925 SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); 4926 } 4927 4928 /* GRBM_STATUS2 */ 4929 tmp = RREG32(mmGRBM_STATUS2); 4930 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY)) 4931 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 4932 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 4933 4934 if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) || 4935 REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) || 4936 REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) { 4937 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4938 SOFT_RESET_CPF, 1); 4939 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4940 SOFT_RESET_CPC, 1); 4941 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, 4942 SOFT_RESET_CPG, 1); 4943 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, 4944 SOFT_RESET_GRBM, 1); 4945 } 4946 4947 /* SRBM_STATUS */ 4948 tmp = RREG32(mmSRBM_STATUS); 4949 if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING)) 4950 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, 4951 SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); 4952 if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY)) 4953 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, 4954 SRBM_SOFT_RESET, SOFT_RESET_SEM, 1); 4955 4956 if (grbm_soft_reset || srbm_soft_reset) { 4957 adev->gfx.grbm_soft_reset = grbm_soft_reset; 4958 adev->gfx.srbm_soft_reset = srbm_soft_reset; 4959 return true; 4960 } else { 4961 adev->gfx.grbm_soft_reset = 0; 4962 adev->gfx.srbm_soft_reset = 0; 4963 return false; 4964 } 4965 } 4966 4967 static int gfx_v8_0_pre_soft_reset(void *handle) 4968 { 4969 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4970 u32 grbm_soft_reset = 0; 4971 4972 if ((!adev->gfx.grbm_soft_reset) && 4973 (!adev->gfx.srbm_soft_reset)) 4974 return 0; 4975 4976 grbm_soft_reset = adev->gfx.grbm_soft_reset; 4977 4978 /* stop the rlc */ 4979 adev->gfx.rlc.funcs->stop(adev); 4980 4981 if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || 4982 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) 4983 /* Disable GFX parsing/prefetching */ 4984 gfx_v8_0_cp_gfx_enable(adev, false); 4985 4986 if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || 4987 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) || 4988 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) || 4989 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) { 4990 int i; 4991 4992 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4993 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; 4994 4995 mutex_lock(&adev->srbm_mutex); 4996 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 4997 gfx_v8_0_deactivate_hqd(adev, 2); 4998 vi_srbm_select(adev, 0, 0, 0, 0); 4999 mutex_unlock(&adev->srbm_mutex); 5000 } 5001 /* Disable MEC parsing/prefetching */ 5002 gfx_v8_0_cp_compute_enable(adev, false); 5003 } 5004 5005 return 0; 5006 } 5007 5008 static int gfx_v8_0_soft_reset(void *handle) 5009 { 5010 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5011 u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 5012 u32 tmp; 5013 5014 if ((!adev->gfx.grbm_soft_reset) && 5015 (!adev->gfx.srbm_soft_reset)) 5016 return 0; 5017 5018 grbm_soft_reset = adev->gfx.grbm_soft_reset; 5019 srbm_soft_reset = adev->gfx.srbm_soft_reset; 5020 5021 if (grbm_soft_reset || srbm_soft_reset) { 5022 tmp = RREG32(mmGMCON_DEBUG); 5023 tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1); 5024 tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1); 5025 WREG32(mmGMCON_DEBUG, tmp); 5026 udelay(50); 5027 } 5028 5029 if (grbm_soft_reset) { 5030 tmp = RREG32(mmGRBM_SOFT_RESET); 5031 tmp |= grbm_soft_reset; 5032 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); 5033 WREG32(mmGRBM_SOFT_RESET, tmp); 5034 tmp = RREG32(mmGRBM_SOFT_RESET); 5035 5036 udelay(50); 5037 5038 tmp &= ~grbm_soft_reset; 5039 WREG32(mmGRBM_SOFT_RESET, tmp); 5040 tmp = RREG32(mmGRBM_SOFT_RESET); 5041 } 5042 5043 if (srbm_soft_reset) { 5044 tmp = RREG32(mmSRBM_SOFT_RESET); 5045 tmp |= srbm_soft_reset; 5046 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 5047 WREG32(mmSRBM_SOFT_RESET, tmp); 5048 tmp = RREG32(mmSRBM_SOFT_RESET); 5049 5050 udelay(50); 5051 5052 tmp &= ~srbm_soft_reset; 5053 WREG32(mmSRBM_SOFT_RESET, tmp); 5054 tmp = RREG32(mmSRBM_SOFT_RESET); 5055 } 5056 5057 if (grbm_soft_reset || srbm_soft_reset) { 5058 tmp = RREG32(mmGMCON_DEBUG); 5059 tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0); 5060 tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0); 5061 WREG32(mmGMCON_DEBUG, tmp); 5062 } 5063 5064 /* Wait a little for things to settle down */ 5065 udelay(50); 5066 5067 return 0; 5068 } 5069 5070 static int gfx_v8_0_post_soft_reset(void *handle) 5071 { 5072 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5073 u32 grbm_soft_reset = 0; 5074 5075 if ((!adev->gfx.grbm_soft_reset) && 5076 (!adev->gfx.srbm_soft_reset)) 5077 return 0; 5078 5079 grbm_soft_reset = adev->gfx.grbm_soft_reset; 5080 5081 if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || 5082 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) || 5083 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) || 5084 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) { 5085 int i; 5086 5087 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 5088 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; 5089 5090 mutex_lock(&adev->srbm_mutex); 5091 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 5092 gfx_v8_0_deactivate_hqd(adev, 2); 5093 vi_srbm_select(adev, 0, 0, 0, 0); 5094 mutex_unlock(&adev->srbm_mutex); 5095 } 5096 gfx_v8_0_kiq_resume(adev); 5097 gfx_v8_0_kcq_resume(adev); 5098 } 5099 5100 if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || 5101 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) 5102 gfx_v8_0_cp_gfx_resume(adev); 5103 5104 gfx_v8_0_cp_test_all_rings(adev); 5105 5106 adev->gfx.rlc.funcs->start(adev); 5107 5108 return 0; 5109 } 5110 5111 /** 5112 * gfx_v8_0_get_gpu_clock_counter - return GPU clock counter snapshot 5113 * 5114 * @adev: amdgpu_device pointer 5115 * 5116 * Fetches a GPU clock counter snapshot. 5117 * Returns the 64 bit clock counter snapshot. 5118 */ 5119 static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev) 5120 { 5121 uint64_t clock; 5122 5123 mutex_lock(&adev->gfx.gpu_clock_mutex); 5124 WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); 5125 clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) | 5126 ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); 5127 mutex_unlock(&adev->gfx.gpu_clock_mutex); 5128 return clock; 5129 } 5130 5131 static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring, 5132 uint32_t vmid, 5133 uint32_t gds_base, uint32_t gds_size, 5134 uint32_t gws_base, uint32_t gws_size, 5135 uint32_t oa_base, uint32_t oa_size) 5136 { 5137 /* GDS Base */ 5138 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5139 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5140 WRITE_DATA_DST_SEL(0))); 5141 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base); 5142 amdgpu_ring_write(ring, 0); 5143 amdgpu_ring_write(ring, gds_base); 5144 5145 /* GDS Size */ 5146 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5147 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5148 WRITE_DATA_DST_SEL(0))); 5149 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size); 5150 amdgpu_ring_write(ring, 0); 5151 amdgpu_ring_write(ring, gds_size); 5152 5153 /* GWS */ 5154 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5155 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5156 WRITE_DATA_DST_SEL(0))); 5157 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws); 5158 amdgpu_ring_write(ring, 0); 5159 amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); 5160 5161 /* OA */ 5162 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5163 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5164 WRITE_DATA_DST_SEL(0))); 5165 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa); 5166 amdgpu_ring_write(ring, 0); 5167 amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); 5168 } 5169 5170 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) 5171 { 5172 WREG32(mmSQ_IND_INDEX, 5173 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 5174 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | 5175 (address << SQ_IND_INDEX__INDEX__SHIFT) | 5176 (SQ_IND_INDEX__FORCE_READ_MASK)); 5177 return RREG32(mmSQ_IND_DATA); 5178 } 5179 5180 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, 5181 uint32_t wave, uint32_t thread, 5182 uint32_t regno, uint32_t num, uint32_t *out) 5183 { 5184 WREG32(mmSQ_IND_INDEX, 5185 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 5186 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | 5187 (regno << SQ_IND_INDEX__INDEX__SHIFT) | 5188 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) | 5189 (SQ_IND_INDEX__FORCE_READ_MASK) | 5190 (SQ_IND_INDEX__AUTO_INCR_MASK)); 5191 while (num--) 5192 *(out++) = RREG32(mmSQ_IND_DATA); 5193 } 5194 5195 static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) 5196 { 5197 /* type 0 wave data */ 5198 dst[(*no_fields)++] = 0; 5199 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS); 5200 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO); 5201 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI); 5202 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO); 5203 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI); 5204 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID); 5205 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0); 5206 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1); 5207 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC); 5208 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC); 5209 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS); 5210 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS); 5211 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO); 5212 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI); 5213 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO); 5214 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI); 5215 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0); 5216 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0); 5217 } 5218 5219 static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, 5220 uint32_t wave, uint32_t start, 5221 uint32_t size, uint32_t *dst) 5222 { 5223 wave_read_regs( 5224 adev, simd, wave, 0, 5225 start + SQIND_WAVE_SGPRS_OFFSET, size, dst); 5226 } 5227 5228 5229 static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = { 5230 .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, 5231 .select_se_sh = &gfx_v8_0_select_se_sh, 5232 .read_wave_data = &gfx_v8_0_read_wave_data, 5233 .read_wave_sgprs = &gfx_v8_0_read_wave_sgprs, 5234 .select_me_pipe_q = &gfx_v8_0_select_me_pipe_q 5235 }; 5236 5237 static int gfx_v8_0_early_init(void *handle) 5238 { 5239 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5240 5241 adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS; 5242 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; 5243 adev->gfx.funcs = &gfx_v8_0_gfx_funcs; 5244 gfx_v8_0_set_ring_funcs(adev); 5245 gfx_v8_0_set_irq_funcs(adev); 5246 gfx_v8_0_set_gds_init(adev); 5247 gfx_v8_0_set_rlc_funcs(adev); 5248 5249 return 0; 5250 } 5251 5252 static int gfx_v8_0_late_init(void *handle) 5253 { 5254 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5255 int r; 5256 5257 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 5258 if (r) 5259 return r; 5260 5261 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 5262 if (r) 5263 return r; 5264 5265 /* requires IBs so do in late init after IB pool is initialized */ 5266 r = gfx_v8_0_do_edc_gpr_workarounds(adev); 5267 if (r) 5268 return r; 5269 5270 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); 5271 if (r) { 5272 DRM_ERROR("amdgpu_irq_get() failed to get IRQ for EDC, r: %d.\n", r); 5273 return r; 5274 } 5275 5276 r = amdgpu_irq_get(adev, &adev->gfx.sq_irq, 0); 5277 if (r) { 5278 DRM_ERROR( 5279 "amdgpu_irq_get() failed to get IRQ for SQ, r: %d.\n", 5280 r); 5281 return r; 5282 } 5283 5284 return 0; 5285 } 5286 5287 static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, 5288 bool enable) 5289 { 5290 if (((adev->asic_type == CHIP_POLARIS11) || 5291 (adev->asic_type == CHIP_POLARIS12) || 5292 (adev->asic_type == CHIP_VEGAM)) && 5293 adev->powerplay.pp_funcs->set_powergating_by_smu) 5294 /* Send msg to SMU via Powerplay */ 5295 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable); 5296 5297 WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0); 5298 } 5299 5300 static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev, 5301 bool enable) 5302 { 5303 WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0); 5304 } 5305 5306 static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev, 5307 bool enable) 5308 { 5309 WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0); 5310 } 5311 5312 static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev, 5313 bool enable) 5314 { 5315 WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0); 5316 } 5317 5318 static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev, 5319 bool enable) 5320 { 5321 WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0); 5322 5323 /* Read any GFX register to wake up GFX. */ 5324 if (!enable) 5325 RREG32(mmDB_RENDER_CONTROL); 5326 } 5327 5328 static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev, 5329 bool enable) 5330 { 5331 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { 5332 cz_enable_gfx_cg_power_gating(adev, true); 5333 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE) 5334 cz_enable_gfx_pipeline_power_gating(adev, true); 5335 } else { 5336 cz_enable_gfx_cg_power_gating(adev, false); 5337 cz_enable_gfx_pipeline_power_gating(adev, false); 5338 } 5339 } 5340 5341 static int gfx_v8_0_set_powergating_state(void *handle, 5342 enum amd_powergating_state state) 5343 { 5344 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5345 bool enable = (state == AMD_PG_STATE_GATE); 5346 5347 if (amdgpu_sriov_vf(adev)) 5348 return 0; 5349 5350 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | 5351 AMD_PG_SUPPORT_RLC_SMU_HS | 5352 AMD_PG_SUPPORT_CP | 5353 AMD_PG_SUPPORT_GFX_DMG)) 5354 amdgpu_gfx_rlc_enter_safe_mode(adev); 5355 switch (adev->asic_type) { 5356 case CHIP_CARRIZO: 5357 case CHIP_STONEY: 5358 5359 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { 5360 cz_enable_sck_slow_down_on_power_up(adev, true); 5361 cz_enable_sck_slow_down_on_power_down(adev, true); 5362 } else { 5363 cz_enable_sck_slow_down_on_power_up(adev, false); 5364 cz_enable_sck_slow_down_on_power_down(adev, false); 5365 } 5366 if (adev->pg_flags & AMD_PG_SUPPORT_CP) 5367 cz_enable_cp_power_gating(adev, true); 5368 else 5369 cz_enable_cp_power_gating(adev, false); 5370 5371 cz_update_gfx_cg_power_gating(adev, enable); 5372 5373 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) 5374 gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true); 5375 else 5376 gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false); 5377 5378 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable) 5379 gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true); 5380 else 5381 gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false); 5382 break; 5383 case CHIP_POLARIS11: 5384 case CHIP_POLARIS12: 5385 case CHIP_VEGAM: 5386 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) 5387 gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true); 5388 else 5389 gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false); 5390 5391 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable) 5392 gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true); 5393 else 5394 gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false); 5395 5396 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable) 5397 polaris11_enable_gfx_quick_mg_power_gating(adev, true); 5398 else 5399 polaris11_enable_gfx_quick_mg_power_gating(adev, false); 5400 break; 5401 default: 5402 break; 5403 } 5404 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | 5405 AMD_PG_SUPPORT_RLC_SMU_HS | 5406 AMD_PG_SUPPORT_CP | 5407 AMD_PG_SUPPORT_GFX_DMG)) 5408 amdgpu_gfx_rlc_exit_safe_mode(adev); 5409 return 0; 5410 } 5411 5412 static void gfx_v8_0_get_clockgating_state(void *handle, u32 *flags) 5413 { 5414 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5415 int data; 5416 5417 if (amdgpu_sriov_vf(adev)) 5418 *flags = 0; 5419 5420 /* AMD_CG_SUPPORT_GFX_MGCG */ 5421 data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); 5422 if (!(data & RLC_CGTT_MGCG_OVERRIDE__CPF_MASK)) 5423 *flags |= AMD_CG_SUPPORT_GFX_MGCG; 5424 5425 /* AMD_CG_SUPPORT_GFX_CGLG */ 5426 data = RREG32(mmRLC_CGCG_CGLS_CTRL); 5427 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) 5428 *flags |= AMD_CG_SUPPORT_GFX_CGCG; 5429 5430 /* AMD_CG_SUPPORT_GFX_CGLS */ 5431 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) 5432 *flags |= AMD_CG_SUPPORT_GFX_CGLS; 5433 5434 /* AMD_CG_SUPPORT_GFX_CGTS */ 5435 data = RREG32(mmCGTS_SM_CTRL_REG); 5436 if (!(data & CGTS_SM_CTRL_REG__OVERRIDE_MASK)) 5437 *flags |= AMD_CG_SUPPORT_GFX_CGTS; 5438 5439 /* AMD_CG_SUPPORT_GFX_CGTS_LS */ 5440 if (!(data & CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK)) 5441 *flags |= AMD_CG_SUPPORT_GFX_CGTS_LS; 5442 5443 /* AMD_CG_SUPPORT_GFX_RLC_LS */ 5444 data = RREG32(mmRLC_MEM_SLP_CNTL); 5445 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) 5446 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; 5447 5448 /* AMD_CG_SUPPORT_GFX_CP_LS */ 5449 data = RREG32(mmCP_MEM_SLP_CNTL); 5450 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) 5451 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; 5452 } 5453 5454 static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, 5455 uint32_t reg_addr, uint32_t cmd) 5456 { 5457 uint32_t data; 5458 5459 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 5460 5461 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); 5462 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); 5463 5464 data = RREG32(mmRLC_SERDES_WR_CTRL); 5465 if (adev->asic_type == CHIP_STONEY) 5466 data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK | 5467 RLC_SERDES_WR_CTRL__READ_COMMAND_MASK | 5468 RLC_SERDES_WR_CTRL__P1_SELECT_MASK | 5469 RLC_SERDES_WR_CTRL__P2_SELECT_MASK | 5470 RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK | 5471 RLC_SERDES_WR_CTRL__POWER_DOWN_MASK | 5472 RLC_SERDES_WR_CTRL__POWER_UP_MASK | 5473 RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK | 5474 RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK); 5475 else 5476 data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK | 5477 RLC_SERDES_WR_CTRL__READ_COMMAND_MASK | 5478 RLC_SERDES_WR_CTRL__P1_SELECT_MASK | 5479 RLC_SERDES_WR_CTRL__P2_SELECT_MASK | 5480 RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK | 5481 RLC_SERDES_WR_CTRL__POWER_DOWN_MASK | 5482 RLC_SERDES_WR_CTRL__POWER_UP_MASK | 5483 RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK | 5484 RLC_SERDES_WR_CTRL__BPM_DATA_MASK | 5485 RLC_SERDES_WR_CTRL__REG_ADDR_MASK | 5486 RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK); 5487 data |= (RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK | 5488 (cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) | 5489 (reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) | 5490 (0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT)); 5491 5492 WREG32(mmRLC_SERDES_WR_CTRL, data); 5493 } 5494 5495 #define MSG_ENTER_RLC_SAFE_MODE 1 5496 #define MSG_EXIT_RLC_SAFE_MODE 0 5497 #define RLC_GPR_REG2__REQ_MASK 0x00000001 5498 #define RLC_GPR_REG2__REQ__SHIFT 0 5499 #define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001 5500 #define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e 5501 5502 static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev) 5503 { 5504 uint32_t rlc_setting; 5505 5506 rlc_setting = RREG32(mmRLC_CNTL); 5507 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) 5508 return false; 5509 5510 return true; 5511 } 5512 5513 static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev) 5514 { 5515 uint32_t data; 5516 unsigned i; 5517 data = RREG32(mmRLC_CNTL); 5518 data |= RLC_SAFE_MODE__CMD_MASK; 5519 data &= ~RLC_SAFE_MODE__MESSAGE_MASK; 5520 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 5521 WREG32(mmRLC_SAFE_MODE, data); 5522 5523 /* wait for RLC_SAFE_MODE */ 5524 for (i = 0; i < adev->usec_timeout; i++) { 5525 if ((RREG32(mmRLC_GPM_STAT) & 5526 (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | 5527 RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) == 5528 (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | 5529 RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) 5530 break; 5531 udelay(1); 5532 } 5533 for (i = 0; i < adev->usec_timeout; i++) { 5534 if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) 5535 break; 5536 udelay(1); 5537 } 5538 } 5539 5540 static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev) 5541 { 5542 uint32_t data; 5543 unsigned i; 5544 5545 data = RREG32(mmRLC_CNTL); 5546 data |= RLC_SAFE_MODE__CMD_MASK; 5547 data &= ~RLC_SAFE_MODE__MESSAGE_MASK; 5548 WREG32(mmRLC_SAFE_MODE, data); 5549 5550 for (i = 0; i < adev->usec_timeout; i++) { 5551 if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) 5552 break; 5553 udelay(1); 5554 } 5555 } 5556 5557 static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { 5558 .is_rlc_enabled = gfx_v8_0_is_rlc_enabled, 5559 .set_safe_mode = gfx_v8_0_set_safe_mode, 5560 .unset_safe_mode = gfx_v8_0_unset_safe_mode, 5561 .init = gfx_v8_0_rlc_init, 5562 .get_csb_size = gfx_v8_0_get_csb_size, 5563 .get_csb_buffer = gfx_v8_0_get_csb_buffer, 5564 .get_cp_table_num = gfx_v8_0_cp_jump_table_num, 5565 .resume = gfx_v8_0_rlc_resume, 5566 .stop = gfx_v8_0_rlc_stop, 5567 .reset = gfx_v8_0_rlc_reset, 5568 .start = gfx_v8_0_rlc_start 5569 }; 5570 5571 static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 5572 bool enable) 5573 { 5574 uint32_t temp, data; 5575 5576 amdgpu_gfx_rlc_enter_safe_mode(adev); 5577 5578 /* It is disabled by HW by default */ 5579 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { 5580 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { 5581 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) 5582 /* 1 - RLC memory Light sleep */ 5583 WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1); 5584 5585 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) 5586 WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1); 5587 } 5588 5589 /* 3 - RLC_CGTT_MGCG_OVERRIDE */ 5590 temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); 5591 if (adev->flags & AMD_IS_APU) 5592 data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK | 5593 RLC_CGTT_MGCG_OVERRIDE__RLC_MASK | 5594 RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK); 5595 else 5596 data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK | 5597 RLC_CGTT_MGCG_OVERRIDE__RLC_MASK | 5598 RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK | 5599 RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK); 5600 5601 if (temp != data) 5602 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data); 5603 5604 /* 4 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */ 5605 gfx_v8_0_wait_for_rlc_serdes(adev); 5606 5607 /* 5 - clear mgcg override */ 5608 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD); 5609 5610 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) { 5611 /* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */ 5612 temp = data = RREG32(mmCGTS_SM_CTRL_REG); 5613 data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK); 5614 data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT); 5615 data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK; 5616 data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK; 5617 if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) && 5618 (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS)) 5619 data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK; 5620 data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK; 5621 data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT); 5622 if (temp != data) 5623 WREG32(mmCGTS_SM_CTRL_REG, data); 5624 } 5625 udelay(50); 5626 5627 /* 7 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */ 5628 gfx_v8_0_wait_for_rlc_serdes(adev); 5629 } else { 5630 /* 1 - MGCG_OVERRIDE[0] for CP and MGCG_OVERRIDE[1] for RLC */ 5631 temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); 5632 data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_MASK | 5633 RLC_CGTT_MGCG_OVERRIDE__RLC_MASK | 5634 RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK | 5635 RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK); 5636 if (temp != data) 5637 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data); 5638 5639 /* 2 - disable MGLS in RLC */ 5640 data = RREG32(mmRLC_MEM_SLP_CNTL); 5641 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { 5642 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; 5643 WREG32(mmRLC_MEM_SLP_CNTL, data); 5644 } 5645 5646 /* 3 - disable MGLS in CP */ 5647 data = RREG32(mmCP_MEM_SLP_CNTL); 5648 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { 5649 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 5650 WREG32(mmCP_MEM_SLP_CNTL, data); 5651 } 5652 5653 /* 4 - Disable CGTS(Tree Shade) MGCG and MGLS */ 5654 temp = data = RREG32(mmCGTS_SM_CTRL_REG); 5655 data |= (CGTS_SM_CTRL_REG__OVERRIDE_MASK | 5656 CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK); 5657 if (temp != data) 5658 WREG32(mmCGTS_SM_CTRL_REG, data); 5659 5660 /* 5 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */ 5661 gfx_v8_0_wait_for_rlc_serdes(adev); 5662 5663 /* 6 - set mgcg override */ 5664 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD); 5665 5666 udelay(50); 5667 5668 /* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */ 5669 gfx_v8_0_wait_for_rlc_serdes(adev); 5670 } 5671 5672 amdgpu_gfx_rlc_exit_safe_mode(adev); 5673 } 5674 5675 static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, 5676 bool enable) 5677 { 5678 uint32_t temp, temp1, data, data1; 5679 5680 temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL); 5681 5682 amdgpu_gfx_rlc_enter_safe_mode(adev); 5683 5684 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { 5685 temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); 5686 data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK; 5687 if (temp1 != data1) 5688 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1); 5689 5690 /* : wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */ 5691 gfx_v8_0_wait_for_rlc_serdes(adev); 5692 5693 /* 2 - clear cgcg override */ 5694 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD); 5695 5696 /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */ 5697 gfx_v8_0_wait_for_rlc_serdes(adev); 5698 5699 /* 3 - write cmd to set CGLS */ 5700 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD); 5701 5702 /* 4 - enable cgcg */ 5703 data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 5704 5705 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) { 5706 /* enable cgls*/ 5707 data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 5708 5709 temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); 5710 data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK; 5711 5712 if (temp1 != data1) 5713 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1); 5714 } else { 5715 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 5716 } 5717 5718 if (temp != data) 5719 WREG32(mmRLC_CGCG_CGLS_CTRL, data); 5720 5721 /* 5 enable cntx_empty_int_enable/cntx_busy_int_enable/ 5722 * Cmp_busy/GFX_Idle interrupts 5723 */ 5724 gfx_v8_0_enable_gui_idle_interrupt(adev, true); 5725 } else { 5726 /* disable cntx_empty_int_enable & GFX Idle interrupt */ 5727 gfx_v8_0_enable_gui_idle_interrupt(adev, false); 5728 5729 /* TEST CGCG */ 5730 temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); 5731 data1 |= (RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK | 5732 RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK); 5733 if (temp1 != data1) 5734 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1); 5735 5736 /* read gfx register to wake up cgcg */ 5737 RREG32(mmCB_CGTT_SCLK_CTRL); 5738 RREG32(mmCB_CGTT_SCLK_CTRL); 5739 RREG32(mmCB_CGTT_SCLK_CTRL); 5740 RREG32(mmCB_CGTT_SCLK_CTRL); 5741 5742 /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */ 5743 gfx_v8_0_wait_for_rlc_serdes(adev); 5744 5745 /* write cmd to Set CGCG Overrride */ 5746 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD); 5747 5748 /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */ 5749 gfx_v8_0_wait_for_rlc_serdes(adev); 5750 5751 /* write cmd to Clear CGLS */ 5752 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD); 5753 5754 /* disable cgcg, cgls should be disabled too. */ 5755 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | 5756 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); 5757 if (temp != data) 5758 WREG32(mmRLC_CGCG_CGLS_CTRL, data); 5759 /* enable interrupts again for PG */ 5760 gfx_v8_0_enable_gui_idle_interrupt(adev, true); 5761 } 5762 5763 gfx_v8_0_wait_for_rlc_serdes(adev); 5764 5765 amdgpu_gfx_rlc_exit_safe_mode(adev); 5766 } 5767 static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev, 5768 bool enable) 5769 { 5770 if (enable) { 5771 /* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS) 5772 * === MGCG + MGLS + TS(CG/LS) === 5773 */ 5774 gfx_v8_0_update_medium_grain_clock_gating(adev, enable); 5775 gfx_v8_0_update_coarse_grain_clock_gating(adev, enable); 5776 } else { 5777 /* CGCG/CGLS should be disabled before MGCG/MGLS/TS(CG/LS) 5778 * === CGCG + CGLS === 5779 */ 5780 gfx_v8_0_update_coarse_grain_clock_gating(adev, enable); 5781 gfx_v8_0_update_medium_grain_clock_gating(adev, enable); 5782 } 5783 return 0; 5784 } 5785 5786 static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev, 5787 enum amd_clockgating_state state) 5788 { 5789 uint32_t msg_id, pp_state = 0; 5790 uint32_t pp_support_state = 0; 5791 5792 if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) { 5793 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) { 5794 pp_support_state = PP_STATE_SUPPORT_LS; 5795 pp_state = PP_STATE_LS; 5796 } 5797 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) { 5798 pp_support_state |= PP_STATE_SUPPORT_CG; 5799 pp_state |= PP_STATE_CG; 5800 } 5801 if (state == AMD_CG_STATE_UNGATE) 5802 pp_state = 0; 5803 5804 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, 5805 PP_BLOCK_GFX_CG, 5806 pp_support_state, 5807 pp_state); 5808 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 5809 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 5810 } 5811 5812 if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) { 5813 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { 5814 pp_support_state = PP_STATE_SUPPORT_LS; 5815 pp_state = PP_STATE_LS; 5816 } 5817 5818 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 5819 pp_support_state |= PP_STATE_SUPPORT_CG; 5820 pp_state |= PP_STATE_CG; 5821 } 5822 5823 if (state == AMD_CG_STATE_UNGATE) 5824 pp_state = 0; 5825 5826 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, 5827 PP_BLOCK_GFX_MG, 5828 pp_support_state, 5829 pp_state); 5830 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 5831 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 5832 } 5833 5834 return 0; 5835 } 5836 5837 static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev, 5838 enum amd_clockgating_state state) 5839 { 5840 5841 uint32_t msg_id, pp_state = 0; 5842 uint32_t pp_support_state = 0; 5843 5844 if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) { 5845 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) { 5846 pp_support_state = PP_STATE_SUPPORT_LS; 5847 pp_state = PP_STATE_LS; 5848 } 5849 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) { 5850 pp_support_state |= PP_STATE_SUPPORT_CG; 5851 pp_state |= PP_STATE_CG; 5852 } 5853 if (state == AMD_CG_STATE_UNGATE) 5854 pp_state = 0; 5855 5856 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, 5857 PP_BLOCK_GFX_CG, 5858 pp_support_state, 5859 pp_state); 5860 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 5861 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 5862 } 5863 5864 if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) { 5865 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) { 5866 pp_support_state = PP_STATE_SUPPORT_LS; 5867 pp_state = PP_STATE_LS; 5868 } 5869 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) { 5870 pp_support_state |= PP_STATE_SUPPORT_CG; 5871 pp_state |= PP_STATE_CG; 5872 } 5873 if (state == AMD_CG_STATE_UNGATE) 5874 pp_state = 0; 5875 5876 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, 5877 PP_BLOCK_GFX_3D, 5878 pp_support_state, 5879 pp_state); 5880 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 5881 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 5882 } 5883 5884 if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) { 5885 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { 5886 pp_support_state = PP_STATE_SUPPORT_LS; 5887 pp_state = PP_STATE_LS; 5888 } 5889 5890 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) { 5891 pp_support_state |= PP_STATE_SUPPORT_CG; 5892 pp_state |= PP_STATE_CG; 5893 } 5894 5895 if (state == AMD_CG_STATE_UNGATE) 5896 pp_state = 0; 5897 5898 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, 5899 PP_BLOCK_GFX_MG, 5900 pp_support_state, 5901 pp_state); 5902 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 5903 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 5904 } 5905 5906 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { 5907 pp_support_state = PP_STATE_SUPPORT_LS; 5908 5909 if (state == AMD_CG_STATE_UNGATE) 5910 pp_state = 0; 5911 else 5912 pp_state = PP_STATE_LS; 5913 5914 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, 5915 PP_BLOCK_GFX_RLC, 5916 pp_support_state, 5917 pp_state); 5918 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 5919 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 5920 } 5921 5922 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { 5923 pp_support_state = PP_STATE_SUPPORT_LS; 5924 5925 if (state == AMD_CG_STATE_UNGATE) 5926 pp_state = 0; 5927 else 5928 pp_state = PP_STATE_LS; 5929 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX, 5930 PP_BLOCK_GFX_CP, 5931 pp_support_state, 5932 pp_state); 5933 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 5934 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 5935 } 5936 5937 return 0; 5938 } 5939 5940 static int gfx_v8_0_set_clockgating_state(void *handle, 5941 enum amd_clockgating_state state) 5942 { 5943 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5944 5945 if (amdgpu_sriov_vf(adev)) 5946 return 0; 5947 5948 switch (adev->asic_type) { 5949 case CHIP_FIJI: 5950 case CHIP_CARRIZO: 5951 case CHIP_STONEY: 5952 gfx_v8_0_update_gfx_clock_gating(adev, 5953 state == AMD_CG_STATE_GATE); 5954 break; 5955 case CHIP_TONGA: 5956 gfx_v8_0_tonga_update_gfx_clock_gating(adev, state); 5957 break; 5958 case CHIP_POLARIS10: 5959 case CHIP_POLARIS11: 5960 case CHIP_POLARIS12: 5961 case CHIP_VEGAM: 5962 gfx_v8_0_polaris_update_gfx_clock_gating(adev, state); 5963 break; 5964 default: 5965 break; 5966 } 5967 return 0; 5968 } 5969 5970 static u64 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring) 5971 { 5972 return ring->adev->wb.wb[ring->rptr_offs]; 5973 } 5974 5975 static u64 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) 5976 { 5977 struct amdgpu_device *adev = ring->adev; 5978 5979 if (ring->use_doorbell) 5980 /* XXX check if swapping is necessary on BE */ 5981 return ring->adev->wb.wb[ring->wptr_offs]; 5982 else 5983 return RREG32(mmCP_RB0_WPTR); 5984 } 5985 5986 static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) 5987 { 5988 struct amdgpu_device *adev = ring->adev; 5989 5990 if (ring->use_doorbell) { 5991 /* XXX check if swapping is necessary on BE */ 5992 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 5993 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 5994 } else { 5995 WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); 5996 (void)RREG32(mmCP_RB0_WPTR); 5997 } 5998 } 5999 6000 static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 6001 { 6002 u32 ref_and_mask, reg_mem_engine; 6003 6004 if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) || 6005 (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) { 6006 switch (ring->me) { 6007 case 1: 6008 ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; 6009 break; 6010 case 2: 6011 ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe; 6012 break; 6013 default: 6014 return; 6015 } 6016 reg_mem_engine = 0; 6017 } else { 6018 ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK; 6019 reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */ 6020 } 6021 6022 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 6023 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */ 6024 WAIT_REG_MEM_FUNCTION(3) | /* == */ 6025 reg_mem_engine)); 6026 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ); 6027 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE); 6028 amdgpu_ring_write(ring, ref_and_mask); 6029 amdgpu_ring_write(ring, ref_and_mask); 6030 amdgpu_ring_write(ring, 0x20); /* poll interval */ 6031 } 6032 6033 static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring) 6034 { 6035 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0)); 6036 amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) | 6037 EVENT_INDEX(4)); 6038 6039 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0)); 6040 amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) | 6041 EVENT_INDEX(0)); 6042 } 6043 6044 static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 6045 struct amdgpu_job *job, 6046 struct amdgpu_ib *ib, 6047 uint32_t flags) 6048 { 6049 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 6050 u32 header, control = 0; 6051 6052 if (ib->flags & AMDGPU_IB_FLAG_CE) 6053 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2); 6054 else 6055 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 6056 6057 control |= ib->length_dw | (vmid << 24); 6058 6059 if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { 6060 control |= INDIRECT_BUFFER_PRE_ENB(1); 6061 6062 if (!(ib->flags & AMDGPU_IB_FLAG_CE)) 6063 gfx_v8_0_ring_emit_de_meta(ring); 6064 } 6065 6066 amdgpu_ring_write(ring, header); 6067 amdgpu_ring_write(ring, 6068 #ifdef __BIG_ENDIAN 6069 (2 << 0) | 6070 #endif 6071 (ib->gpu_addr & 0xFFFFFFFC)); 6072 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); 6073 amdgpu_ring_write(ring, control); 6074 } 6075 6076 static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 6077 struct amdgpu_job *job, 6078 struct amdgpu_ib *ib, 6079 uint32_t flags) 6080 { 6081 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 6082 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 6083 6084 /* Currently, there is a high possibility to get wave ID mismatch 6085 * between ME and GDS, leading to a hw deadlock, because ME generates 6086 * different wave IDs than the GDS expects. This situation happens 6087 * randomly when at least 5 compute pipes use GDS ordered append. 6088 * The wave IDs generated by ME are also wrong after suspend/resume. 6089 * Those are probably bugs somewhere else in the kernel driver. 6090 * 6091 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and 6092 * GDS to 0 for this ring (me/pipe). 6093 */ 6094 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { 6095 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 6096 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START); 6097 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); 6098 } 6099 6100 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 6101 amdgpu_ring_write(ring, 6102 #ifdef __BIG_ENDIAN 6103 (2 << 0) | 6104 #endif 6105 (ib->gpu_addr & 0xFFFFFFFC)); 6106 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); 6107 amdgpu_ring_write(ring, control); 6108 } 6109 6110 static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, 6111 u64 seq, unsigned flags) 6112 { 6113 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 6114 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 6115 6116 /* EVENT_WRITE_EOP - flush caches, send int */ 6117 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 6118 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | 6119 EOP_TC_ACTION_EN | 6120 EOP_TC_WB_ACTION_EN | 6121 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 6122 EVENT_INDEX(5))); 6123 amdgpu_ring_write(ring, addr & 0xfffffffc); 6124 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | 6125 DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 6126 amdgpu_ring_write(ring, lower_32_bits(seq)); 6127 amdgpu_ring_write(ring, upper_32_bits(seq)); 6128 6129 } 6130 6131 static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 6132 { 6133 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 6134 uint32_t seq = ring->fence_drv.sync_seq; 6135 uint64_t addr = ring->fence_drv.gpu_addr; 6136 6137 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 6138 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ 6139 WAIT_REG_MEM_FUNCTION(3) | /* equal */ 6140 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */ 6141 amdgpu_ring_write(ring, addr & 0xfffffffc); 6142 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 6143 amdgpu_ring_write(ring, seq); 6144 amdgpu_ring_write(ring, 0xffffffff); 6145 amdgpu_ring_write(ring, 4); /* poll interval */ 6146 } 6147 6148 static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 6149 unsigned vmid, uint64_t pd_addr) 6150 { 6151 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 6152 6153 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 6154 6155 /* wait for the invalidate to complete */ 6156 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 6157 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ 6158 WAIT_REG_MEM_FUNCTION(0) | /* always */ 6159 WAIT_REG_MEM_ENGINE(0))); /* me */ 6160 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); 6161 amdgpu_ring_write(ring, 0); 6162 amdgpu_ring_write(ring, 0); /* ref */ 6163 amdgpu_ring_write(ring, 0); /* mask */ 6164 amdgpu_ring_write(ring, 0x20); /* poll interval */ 6165 6166 /* compute doesn't have PFP */ 6167 if (usepfp) { 6168 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 6169 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 6170 amdgpu_ring_write(ring, 0x0); 6171 } 6172 } 6173 6174 static u64 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring) 6175 { 6176 return ring->adev->wb.wb[ring->wptr_offs]; 6177 } 6178 6179 static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring) 6180 { 6181 struct amdgpu_device *adev = ring->adev; 6182 6183 /* XXX check if swapping is necessary on BE */ 6184 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 6185 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 6186 } 6187 6188 static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring, 6189 bool acquire) 6190 { 6191 struct amdgpu_device *adev = ring->adev; 6192 int pipe_num, tmp, reg; 6193 int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1; 6194 6195 pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe; 6196 6197 /* first me only has 2 entries, GFX and HP3D */ 6198 if (ring->me > 0) 6199 pipe_num -= 2; 6200 6201 reg = mmSPI_WCL_PIPE_PERCENT_GFX + pipe_num; 6202 tmp = RREG32(reg); 6203 tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent); 6204 WREG32(reg, tmp); 6205 } 6206 6207 static void gfx_v8_0_pipe_reserve_resources(struct amdgpu_device *adev, 6208 struct amdgpu_ring *ring, 6209 bool acquire) 6210 { 6211 int i, pipe; 6212 bool reserve; 6213 struct amdgpu_ring *iring; 6214 6215 mutex_lock(&adev->gfx.pipe_reserve_mutex); 6216 pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0); 6217 if (acquire) 6218 set_bit(pipe, adev->gfx.pipe_reserve_bitmap); 6219 else 6220 clear_bit(pipe, adev->gfx.pipe_reserve_bitmap); 6221 6222 if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) { 6223 /* Clear all reservations - everyone reacquires all resources */ 6224 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) 6225 gfx_v8_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i], 6226 true); 6227 6228 for (i = 0; i < adev->gfx.num_compute_rings; ++i) 6229 gfx_v8_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i], 6230 true); 6231 } else { 6232 /* Lower all pipes without a current reservation */ 6233 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { 6234 iring = &adev->gfx.gfx_ring[i]; 6235 pipe = amdgpu_gfx_mec_queue_to_bit(adev, 6236 iring->me, 6237 iring->pipe, 6238 0); 6239 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); 6240 gfx_v8_0_ring_set_pipe_percent(iring, reserve); 6241 } 6242 6243 for (i = 0; i < adev->gfx.num_compute_rings; ++i) { 6244 iring = &adev->gfx.compute_ring[i]; 6245 pipe = amdgpu_gfx_mec_queue_to_bit(adev, 6246 iring->me, 6247 iring->pipe, 6248 0); 6249 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); 6250 gfx_v8_0_ring_set_pipe_percent(iring, reserve); 6251 } 6252 } 6253 6254 mutex_unlock(&adev->gfx.pipe_reserve_mutex); 6255 } 6256 6257 static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev, 6258 struct amdgpu_ring *ring, 6259 bool acquire) 6260 { 6261 uint32_t pipe_priority = acquire ? 0x2 : 0x0; 6262 uint32_t queue_priority = acquire ? 0xf : 0x0; 6263 6264 mutex_lock(&adev->srbm_mutex); 6265 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 6266 6267 WREG32(mmCP_HQD_PIPE_PRIORITY, pipe_priority); 6268 WREG32(mmCP_HQD_QUEUE_PRIORITY, queue_priority); 6269 6270 vi_srbm_select(adev, 0, 0, 0, 0); 6271 mutex_unlock(&adev->srbm_mutex); 6272 } 6273 static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring, 6274 enum drm_sched_priority priority) 6275 { 6276 struct amdgpu_device *adev = ring->adev; 6277 bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW; 6278 6279 if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) 6280 return; 6281 6282 gfx_v8_0_hqd_set_priority(adev, ring, acquire); 6283 gfx_v8_0_pipe_reserve_resources(adev, ring, acquire); 6284 } 6285 6286 static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring, 6287 u64 addr, u64 seq, 6288 unsigned flags) 6289 { 6290 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 6291 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 6292 6293 /* RELEASE_MEM - flush caches, send int */ 6294 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5)); 6295 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | 6296 EOP_TC_ACTION_EN | 6297 EOP_TC_WB_ACTION_EN | 6298 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 6299 EVENT_INDEX(5))); 6300 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 6301 amdgpu_ring_write(ring, addr & 0xfffffffc); 6302 amdgpu_ring_write(ring, upper_32_bits(addr)); 6303 amdgpu_ring_write(ring, lower_32_bits(seq)); 6304 amdgpu_ring_write(ring, upper_32_bits(seq)); 6305 } 6306 6307 static void gfx_v8_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, 6308 u64 seq, unsigned int flags) 6309 { 6310 /* we only allocate 32bit for each seq wb address */ 6311 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 6312 6313 /* write fence seq to the "addr" */ 6314 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 6315 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 6316 WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); 6317 amdgpu_ring_write(ring, lower_32_bits(addr)); 6318 amdgpu_ring_write(ring, upper_32_bits(addr)); 6319 amdgpu_ring_write(ring, lower_32_bits(seq)); 6320 6321 if (flags & AMDGPU_FENCE_FLAG_INT) { 6322 /* set register to trigger INT */ 6323 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 6324 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 6325 WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); 6326 amdgpu_ring_write(ring, mmCPC_INT_STATUS); 6327 amdgpu_ring_write(ring, 0); 6328 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ 6329 } 6330 } 6331 6332 static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring) 6333 { 6334 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 6335 amdgpu_ring_write(ring, 0); 6336 } 6337 6338 static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) 6339 { 6340 uint32_t dw2 = 0; 6341 6342 if (amdgpu_sriov_vf(ring->adev)) 6343 gfx_v8_0_ring_emit_ce_meta(ring); 6344 6345 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ 6346 if (flags & AMDGPU_HAVE_CTX_SWITCH) { 6347 gfx_v8_0_ring_emit_vgt_flush(ring); 6348 /* set load_global_config & load_global_uconfig */ 6349 dw2 |= 0x8001; 6350 /* set load_cs_sh_regs */ 6351 dw2 |= 0x01000000; 6352 /* set load_per_context_state & load_gfx_sh_regs for GFX */ 6353 dw2 |= 0x10002; 6354 6355 /* set load_ce_ram if preamble presented */ 6356 if (AMDGPU_PREAMBLE_IB_PRESENT & flags) 6357 dw2 |= 0x10000000; 6358 } else { 6359 /* still load_ce_ram if this is the first time preamble presented 6360 * although there is no context switch happens. 6361 */ 6362 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags) 6363 dw2 |= 0x10000000; 6364 } 6365 6366 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 6367 amdgpu_ring_write(ring, dw2); 6368 amdgpu_ring_write(ring, 0); 6369 } 6370 6371 static unsigned gfx_v8_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring) 6372 { 6373 unsigned ret; 6374 6375 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3)); 6376 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)); 6377 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr)); 6378 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */ 6379 ret = ring->wptr & ring->buf_mask; 6380 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */ 6381 return ret; 6382 } 6383 6384 static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset) 6385 { 6386 unsigned cur; 6387 6388 BUG_ON(offset > ring->buf_mask); 6389 BUG_ON(ring->ring[offset] != 0x55aa55aa); 6390 6391 cur = (ring->wptr & ring->buf_mask) - 1; 6392 if (likely(cur > offset)) 6393 ring->ring[offset] = cur - offset; 6394 else 6395 ring->ring[offset] = (ring->ring_size >> 2) - offset + cur; 6396 } 6397 6398 static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) 6399 { 6400 struct amdgpu_device *adev = ring->adev; 6401 6402 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 6403 amdgpu_ring_write(ring, 0 | /* src: register*/ 6404 (5 << 8) | /* dst: memory */ 6405 (1 << 20)); /* write confirm */ 6406 amdgpu_ring_write(ring, reg); 6407 amdgpu_ring_write(ring, 0); 6408 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 6409 adev->virt.reg_val_offs * 4)); 6410 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 6411 adev->virt.reg_val_offs * 4)); 6412 } 6413 6414 static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 6415 uint32_t val) 6416 { 6417 uint32_t cmd; 6418 6419 switch (ring->funcs->type) { 6420 case AMDGPU_RING_TYPE_GFX: 6421 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; 6422 break; 6423 case AMDGPU_RING_TYPE_KIQ: 6424 cmd = 1 << 16; /* no inc addr */ 6425 break; 6426 default: 6427 cmd = WR_CONFIRM; 6428 break; 6429 } 6430 6431 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 6432 amdgpu_ring_write(ring, cmd); 6433 amdgpu_ring_write(ring, reg); 6434 amdgpu_ring_write(ring, 0); 6435 amdgpu_ring_write(ring, val); 6436 } 6437 6438 static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid) 6439 { 6440 struct amdgpu_device *adev = ring->adev; 6441 uint32_t value = 0; 6442 6443 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); 6444 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 6445 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 6446 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 6447 WREG32(mmSQ_CMD, value); 6448 } 6449 6450 static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 6451 enum amdgpu_interrupt_state state) 6452 { 6453 WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE, 6454 state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); 6455 } 6456 6457 static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, 6458 int me, int pipe, 6459 enum amdgpu_interrupt_state state) 6460 { 6461 u32 mec_int_cntl, mec_int_cntl_reg; 6462 6463 /* 6464 * amdgpu controls only the first MEC. That's why this function only 6465 * handles the setting of interrupts for this specific MEC. All other 6466 * pipes' interrupts are set by amdkfd. 6467 */ 6468 6469 if (me == 1) { 6470 switch (pipe) { 6471 case 0: 6472 mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL; 6473 break; 6474 case 1: 6475 mec_int_cntl_reg = mmCP_ME1_PIPE1_INT_CNTL; 6476 break; 6477 case 2: 6478 mec_int_cntl_reg = mmCP_ME1_PIPE2_INT_CNTL; 6479 break; 6480 case 3: 6481 mec_int_cntl_reg = mmCP_ME1_PIPE3_INT_CNTL; 6482 break; 6483 default: 6484 DRM_DEBUG("invalid pipe %d\n", pipe); 6485 return; 6486 } 6487 } else { 6488 DRM_DEBUG("invalid me %d\n", me); 6489 return; 6490 } 6491 6492 switch (state) { 6493 case AMDGPU_IRQ_STATE_DISABLE: 6494 mec_int_cntl = RREG32(mec_int_cntl_reg); 6495 mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK; 6496 WREG32(mec_int_cntl_reg, mec_int_cntl); 6497 break; 6498 case AMDGPU_IRQ_STATE_ENABLE: 6499 mec_int_cntl = RREG32(mec_int_cntl_reg); 6500 mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK; 6501 WREG32(mec_int_cntl_reg, mec_int_cntl); 6502 break; 6503 default: 6504 break; 6505 } 6506 } 6507 6508 static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev, 6509 struct amdgpu_irq_src *source, 6510 unsigned type, 6511 enum amdgpu_interrupt_state state) 6512 { 6513 WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE, 6514 state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); 6515 6516 return 0; 6517 } 6518 6519 static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev, 6520 struct amdgpu_irq_src *source, 6521 unsigned type, 6522 enum amdgpu_interrupt_state state) 6523 { 6524 WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE, 6525 state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); 6526 6527 return 0; 6528 } 6529 6530 static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev, 6531 struct amdgpu_irq_src *src, 6532 unsigned type, 6533 enum amdgpu_interrupt_state state) 6534 { 6535 switch (type) { 6536 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP: 6537 gfx_v8_0_set_gfx_eop_interrupt_state(adev, state); 6538 break; 6539 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 6540 gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 0, state); 6541 break; 6542 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 6543 gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 1, state); 6544 break; 6545 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 6546 gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 2, state); 6547 break; 6548 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 6549 gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 3, state); 6550 break; 6551 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: 6552 gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 0, state); 6553 break; 6554 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: 6555 gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 1, state); 6556 break; 6557 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: 6558 gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 2, state); 6559 break; 6560 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: 6561 gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 3, state); 6562 break; 6563 default: 6564 break; 6565 } 6566 return 0; 6567 } 6568 6569 static int gfx_v8_0_set_cp_ecc_int_state(struct amdgpu_device *adev, 6570 struct amdgpu_irq_src *source, 6571 unsigned int type, 6572 enum amdgpu_interrupt_state state) 6573 { 6574 int enable_flag; 6575 6576 switch (state) { 6577 case AMDGPU_IRQ_STATE_DISABLE: 6578 enable_flag = 0; 6579 break; 6580 6581 case AMDGPU_IRQ_STATE_ENABLE: 6582 enable_flag = 1; 6583 break; 6584 6585 default: 6586 return -EINVAL; 6587 } 6588 6589 WREG32_FIELD(CP_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag); 6590 WREG32_FIELD(CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, enable_flag); 6591 WREG32_FIELD(CP_INT_CNTL_RING1, CP_ECC_ERROR_INT_ENABLE, enable_flag); 6592 WREG32_FIELD(CP_INT_CNTL_RING2, CP_ECC_ERROR_INT_ENABLE, enable_flag); 6593 WREG32_FIELD(CPC_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag); 6594 WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, 6595 enable_flag); 6596 WREG32_FIELD(CP_ME1_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, 6597 enable_flag); 6598 WREG32_FIELD(CP_ME1_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, 6599 enable_flag); 6600 WREG32_FIELD(CP_ME1_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, 6601 enable_flag); 6602 WREG32_FIELD(CP_ME2_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, 6603 enable_flag); 6604 WREG32_FIELD(CP_ME2_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, 6605 enable_flag); 6606 WREG32_FIELD(CP_ME2_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, 6607 enable_flag); 6608 WREG32_FIELD(CP_ME2_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, 6609 enable_flag); 6610 6611 return 0; 6612 } 6613 6614 static int gfx_v8_0_set_sq_int_state(struct amdgpu_device *adev, 6615 struct amdgpu_irq_src *source, 6616 unsigned int type, 6617 enum amdgpu_interrupt_state state) 6618 { 6619 int enable_flag; 6620 6621 switch (state) { 6622 case AMDGPU_IRQ_STATE_DISABLE: 6623 enable_flag = 1; 6624 break; 6625 6626 case AMDGPU_IRQ_STATE_ENABLE: 6627 enable_flag = 0; 6628 break; 6629 6630 default: 6631 return -EINVAL; 6632 } 6633 6634 WREG32_FIELD(SQ_INTERRUPT_MSG_CTRL, STALL, 6635 enable_flag); 6636 6637 return 0; 6638 } 6639 6640 static int gfx_v8_0_eop_irq(struct amdgpu_device *adev, 6641 struct amdgpu_irq_src *source, 6642 struct amdgpu_iv_entry *entry) 6643 { 6644 int i; 6645 u8 me_id, pipe_id, queue_id; 6646 struct amdgpu_ring *ring; 6647 6648 DRM_DEBUG("IH: CP EOP\n"); 6649 me_id = (entry->ring_id & 0x0c) >> 2; 6650 pipe_id = (entry->ring_id & 0x03) >> 0; 6651 queue_id = (entry->ring_id & 0x70) >> 4; 6652 6653 switch (me_id) { 6654 case 0: 6655 amdgpu_fence_process(&adev->gfx.gfx_ring[0]); 6656 break; 6657 case 1: 6658 case 2: 6659 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 6660 ring = &adev->gfx.compute_ring[i]; 6661 /* Per-queue interrupt is supported for MEC starting from VI. 6662 * The interrupt can only be enabled/disabled per pipe instead of per queue. 6663 */ 6664 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id)) 6665 amdgpu_fence_process(ring); 6666 } 6667 break; 6668 } 6669 return 0; 6670 } 6671 6672 static void gfx_v8_0_fault(struct amdgpu_device *adev, 6673 struct amdgpu_iv_entry *entry) 6674 { 6675 u8 me_id, pipe_id, queue_id; 6676 struct amdgpu_ring *ring; 6677 int i; 6678 6679 me_id = (entry->ring_id & 0x0c) >> 2; 6680 pipe_id = (entry->ring_id & 0x03) >> 0; 6681 queue_id = (entry->ring_id & 0x70) >> 4; 6682 6683 switch (me_id) { 6684 case 0: 6685 drm_sched_fault(&adev->gfx.gfx_ring[0].sched); 6686 break; 6687 case 1: 6688 case 2: 6689 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 6690 ring = &adev->gfx.compute_ring[i]; 6691 if (ring->me == me_id && ring->pipe == pipe_id && 6692 ring->queue == queue_id) 6693 drm_sched_fault(&ring->sched); 6694 } 6695 break; 6696 } 6697 } 6698 6699 static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev, 6700 struct amdgpu_irq_src *source, 6701 struct amdgpu_iv_entry *entry) 6702 { 6703 DRM_ERROR("Illegal register access in command stream\n"); 6704 gfx_v8_0_fault(adev, entry); 6705 return 0; 6706 } 6707 6708 static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev, 6709 struct amdgpu_irq_src *source, 6710 struct amdgpu_iv_entry *entry) 6711 { 6712 DRM_ERROR("Illegal instruction in command stream\n"); 6713 gfx_v8_0_fault(adev, entry); 6714 return 0; 6715 } 6716 6717 static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev, 6718 struct amdgpu_irq_src *source, 6719 struct amdgpu_iv_entry *entry) 6720 { 6721 DRM_ERROR("CP EDC/ECC error detected."); 6722 return 0; 6723 } 6724 6725 static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data) 6726 { 6727 u32 enc, se_id, sh_id, cu_id; 6728 char type[20]; 6729 int sq_edc_source = -1; 6730 6731 enc = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, ENCODING); 6732 se_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, SE_ID); 6733 6734 switch (enc) { 6735 case 0: 6736 DRM_INFO("SQ general purpose intr detected:" 6737 "se_id %d, immed_overflow %d, host_reg_overflow %d," 6738 "host_cmd_overflow %d, cmd_timestamp %d," 6739 "reg_timestamp %d, thread_trace_buff_full %d," 6740 "wlt %d, thread_trace %d.\n", 6741 se_id, 6742 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, IMMED_OVERFLOW), 6743 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_REG_OVERFLOW), 6744 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_CMD_OVERFLOW), 6745 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, CMD_TIMESTAMP), 6746 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, REG_TIMESTAMP), 6747 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE_BUF_FULL), 6748 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, WLT), 6749 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE) 6750 ); 6751 break; 6752 case 1: 6753 case 2: 6754 6755 cu_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, CU_ID); 6756 sh_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SH_ID); 6757 6758 /* 6759 * This function can be called either directly from ISR 6760 * or from BH in which case we can access SQ_EDC_INFO 6761 * instance 6762 */ 6763 if (in_task()) { 6764 mutex_lock(&adev->grbm_idx_mutex); 6765 gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id); 6766 6767 sq_edc_source = REG_GET_FIELD(RREG32(mmSQ_EDC_INFO), SQ_EDC_INFO, SOURCE); 6768 6769 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 6770 mutex_unlock(&adev->grbm_idx_mutex); 6771 } 6772 6773 if (enc == 1) 6774 sprintf(type, "instruction intr"); 6775 else 6776 sprintf(type, "EDC/ECC error"); 6777 6778 DRM_INFO( 6779 "SQ %s detected: " 6780 "se_id %d, sh_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d " 6781 "trap %s, sq_ed_info.source %s.\n", 6782 type, se_id, sh_id, cu_id, 6783 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SIMD_ID), 6784 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, WAVE_ID), 6785 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, VM_ID), 6786 REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, PRIV) ? "true" : "false", 6787 (sq_edc_source != -1) ? sq_edc_source_names[sq_edc_source] : "unavailable" 6788 ); 6789 break; 6790 default: 6791 DRM_ERROR("SQ invalid encoding type\n."); 6792 } 6793 } 6794 6795 static void gfx_v8_0_sq_irq_work_func(struct work_struct *work) 6796 { 6797 6798 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work); 6799 struct sq_work *sq_work = container_of(work, struct sq_work, work); 6800 6801 gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data); 6802 } 6803 6804 static int gfx_v8_0_sq_irq(struct amdgpu_device *adev, 6805 struct amdgpu_irq_src *source, 6806 struct amdgpu_iv_entry *entry) 6807 { 6808 unsigned ih_data = entry->src_data[0]; 6809 6810 /* 6811 * Try to submit work so SQ_EDC_INFO can be accessed from 6812 * BH. If previous work submission hasn't finished yet 6813 * just print whatever info is possible directly from the ISR. 6814 */ 6815 if (work_pending(&adev->gfx.sq_work.work)) { 6816 gfx_v8_0_parse_sq_irq(adev, ih_data); 6817 } else { 6818 adev->gfx.sq_work.ih_data = ih_data; 6819 schedule_work(&adev->gfx.sq_work.work); 6820 } 6821 6822 return 0; 6823 } 6824 6825 static const struct amd_ip_funcs gfx_v8_0_ip_funcs = { 6826 .name = "gfx_v8_0", 6827 .early_init = gfx_v8_0_early_init, 6828 .late_init = gfx_v8_0_late_init, 6829 .sw_init = gfx_v8_0_sw_init, 6830 .sw_fini = gfx_v8_0_sw_fini, 6831 .hw_init = gfx_v8_0_hw_init, 6832 .hw_fini = gfx_v8_0_hw_fini, 6833 .suspend = gfx_v8_0_suspend, 6834 .resume = gfx_v8_0_resume, 6835 .is_idle = gfx_v8_0_is_idle, 6836 .wait_for_idle = gfx_v8_0_wait_for_idle, 6837 .check_soft_reset = gfx_v8_0_check_soft_reset, 6838 .pre_soft_reset = gfx_v8_0_pre_soft_reset, 6839 .soft_reset = gfx_v8_0_soft_reset, 6840 .post_soft_reset = gfx_v8_0_post_soft_reset, 6841 .set_clockgating_state = gfx_v8_0_set_clockgating_state, 6842 .set_powergating_state = gfx_v8_0_set_powergating_state, 6843 .get_clockgating_state = gfx_v8_0_get_clockgating_state, 6844 }; 6845 6846 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { 6847 .type = AMDGPU_RING_TYPE_GFX, 6848 .align_mask = 0xff, 6849 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 6850 .support_64bit_ptrs = false, 6851 .get_rptr = gfx_v8_0_ring_get_rptr, 6852 .get_wptr = gfx_v8_0_ring_get_wptr_gfx, 6853 .set_wptr = gfx_v8_0_ring_set_wptr_gfx, 6854 .emit_frame_size = /* maximum 215dw if count 16 IBs in */ 6855 5 + /* COND_EXEC */ 6856 7 + /* PIPELINE_SYNC */ 6857 VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */ 6858 8 + /* FENCE for VM_FLUSH */ 6859 20 + /* GDS switch */ 6860 4 + /* double SWITCH_BUFFER, 6861 the first COND_EXEC jump to the place just 6862 prior to this double SWITCH_BUFFER */ 6863 5 + /* COND_EXEC */ 6864 7 + /* HDP_flush */ 6865 4 + /* VGT_flush */ 6866 14 + /* CE_META */ 6867 31 + /* DE_META */ 6868 3 + /* CNTX_CTRL */ 6869 5 + /* HDP_INVL */ 6870 8 + 8 + /* FENCE x2 */ 6871 2, /* SWITCH_BUFFER */ 6872 .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */ 6873 .emit_ib = gfx_v8_0_ring_emit_ib_gfx, 6874 .emit_fence = gfx_v8_0_ring_emit_fence_gfx, 6875 .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync, 6876 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, 6877 .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch, 6878 .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, 6879 .test_ring = gfx_v8_0_ring_test_ring, 6880 .test_ib = gfx_v8_0_ring_test_ib, 6881 .insert_nop = amdgpu_ring_insert_nop, 6882 .pad_ib = amdgpu_ring_generic_pad_ib, 6883 .emit_switch_buffer = gfx_v8_ring_emit_sb, 6884 .emit_cntxcntl = gfx_v8_ring_emit_cntxcntl, 6885 .init_cond_exec = gfx_v8_0_ring_emit_init_cond_exec, 6886 .patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec, 6887 .emit_wreg = gfx_v8_0_ring_emit_wreg, 6888 .soft_recovery = gfx_v8_0_ring_soft_recovery, 6889 }; 6890 6891 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { 6892 .type = AMDGPU_RING_TYPE_COMPUTE, 6893 .align_mask = 0xff, 6894 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 6895 .support_64bit_ptrs = false, 6896 .get_rptr = gfx_v8_0_ring_get_rptr, 6897 .get_wptr = gfx_v8_0_ring_get_wptr_compute, 6898 .set_wptr = gfx_v8_0_ring_set_wptr_compute, 6899 .emit_frame_size = 6900 20 + /* gfx_v8_0_ring_emit_gds_switch */ 6901 7 + /* gfx_v8_0_ring_emit_hdp_flush */ 6902 5 + /* hdp_invalidate */ 6903 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ 6904 VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */ 6905 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */ 6906 .emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */ 6907 .emit_ib = gfx_v8_0_ring_emit_ib_compute, 6908 .emit_fence = gfx_v8_0_ring_emit_fence_compute, 6909 .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync, 6910 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, 6911 .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch, 6912 .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, 6913 .test_ring = gfx_v8_0_ring_test_ring, 6914 .test_ib = gfx_v8_0_ring_test_ib, 6915 .insert_nop = amdgpu_ring_insert_nop, 6916 .pad_ib = amdgpu_ring_generic_pad_ib, 6917 .set_priority = gfx_v8_0_ring_set_priority_compute, 6918 .emit_wreg = gfx_v8_0_ring_emit_wreg, 6919 }; 6920 6921 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = { 6922 .type = AMDGPU_RING_TYPE_KIQ, 6923 .align_mask = 0xff, 6924 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 6925 .support_64bit_ptrs = false, 6926 .get_rptr = gfx_v8_0_ring_get_rptr, 6927 .get_wptr = gfx_v8_0_ring_get_wptr_compute, 6928 .set_wptr = gfx_v8_0_ring_set_wptr_compute, 6929 .emit_frame_size = 6930 20 + /* gfx_v8_0_ring_emit_gds_switch */ 6931 7 + /* gfx_v8_0_ring_emit_hdp_flush */ 6932 5 + /* hdp_invalidate */ 6933 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ 6934 17 + /* gfx_v8_0_ring_emit_vm_flush */ 6935 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 6936 .emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */ 6937 .emit_fence = gfx_v8_0_ring_emit_fence_kiq, 6938 .test_ring = gfx_v8_0_ring_test_ring, 6939 .insert_nop = amdgpu_ring_insert_nop, 6940 .pad_ib = amdgpu_ring_generic_pad_ib, 6941 .emit_rreg = gfx_v8_0_ring_emit_rreg, 6942 .emit_wreg = gfx_v8_0_ring_emit_wreg, 6943 }; 6944 6945 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) 6946 { 6947 int i; 6948 6949 adev->gfx.kiq.ring.funcs = &gfx_v8_0_ring_funcs_kiq; 6950 6951 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 6952 adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx; 6953 6954 for (i = 0; i < adev->gfx.num_compute_rings; i++) 6955 adev->gfx.compute_ring[i].funcs = &gfx_v8_0_ring_funcs_compute; 6956 } 6957 6958 static const struct amdgpu_irq_src_funcs gfx_v8_0_eop_irq_funcs = { 6959 .set = gfx_v8_0_set_eop_interrupt_state, 6960 .process = gfx_v8_0_eop_irq, 6961 }; 6962 6963 static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_reg_irq_funcs = { 6964 .set = gfx_v8_0_set_priv_reg_fault_state, 6965 .process = gfx_v8_0_priv_reg_irq, 6966 }; 6967 6968 static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = { 6969 .set = gfx_v8_0_set_priv_inst_fault_state, 6970 .process = gfx_v8_0_priv_inst_irq, 6971 }; 6972 6973 static const struct amdgpu_irq_src_funcs gfx_v8_0_cp_ecc_error_irq_funcs = { 6974 .set = gfx_v8_0_set_cp_ecc_int_state, 6975 .process = gfx_v8_0_cp_ecc_error_irq, 6976 }; 6977 6978 static const struct amdgpu_irq_src_funcs gfx_v8_0_sq_irq_funcs = { 6979 .set = gfx_v8_0_set_sq_int_state, 6980 .process = gfx_v8_0_sq_irq, 6981 }; 6982 6983 static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev) 6984 { 6985 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 6986 adev->gfx.eop_irq.funcs = &gfx_v8_0_eop_irq_funcs; 6987 6988 adev->gfx.priv_reg_irq.num_types = 1; 6989 adev->gfx.priv_reg_irq.funcs = &gfx_v8_0_priv_reg_irq_funcs; 6990 6991 adev->gfx.priv_inst_irq.num_types = 1; 6992 adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs; 6993 6994 adev->gfx.cp_ecc_error_irq.num_types = 1; 6995 adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs; 6996 6997 adev->gfx.sq_irq.num_types = 1; 6998 adev->gfx.sq_irq.funcs = &gfx_v8_0_sq_irq_funcs; 6999 } 7000 7001 static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev) 7002 { 7003 adev->gfx.rlc.funcs = &iceland_rlc_funcs; 7004 } 7005 7006 static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev) 7007 { 7008 /* init asci gds info */ 7009 adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE); 7010 adev->gds.gws_size = 64; 7011 adev->gds.oa_size = 16; 7012 adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID); 7013 } 7014 7015 static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, 7016 u32 bitmap) 7017 { 7018 u32 data; 7019 7020 if (!bitmap) 7021 return; 7022 7023 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 7024 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 7025 7026 WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data); 7027 } 7028 7029 static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev) 7030 { 7031 u32 data, mask; 7032 7033 data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) | 7034 RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); 7035 7036 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh); 7037 7038 return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask; 7039 } 7040 7041 static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) 7042 { 7043 int i, j, k, counter, active_cu_number = 0; 7044 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; 7045 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; 7046 unsigned disable_masks[4 * 2]; 7047 u32 ao_cu_num; 7048 7049 memset(cu_info, 0, sizeof(*cu_info)); 7050 7051 if (adev->flags & AMD_IS_APU) 7052 ao_cu_num = 2; 7053 else 7054 ao_cu_num = adev->gfx.config.max_cu_per_sh; 7055 7056 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); 7057 7058 mutex_lock(&adev->grbm_idx_mutex); 7059 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 7060 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 7061 mask = 1; 7062 ao_bitmap = 0; 7063 counter = 0; 7064 gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); 7065 if (i < 4 && j < 2) 7066 gfx_v8_0_set_user_cu_inactive_bitmap( 7067 adev, disable_masks[i * 2 + j]); 7068 bitmap = gfx_v8_0_get_cu_active_bitmap(adev); 7069 cu_info->bitmap[i][j] = bitmap; 7070 7071 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { 7072 if (bitmap & mask) { 7073 if (counter < ao_cu_num) 7074 ao_bitmap |= mask; 7075 counter ++; 7076 } 7077 mask <<= 1; 7078 } 7079 active_cu_number += counter; 7080 if (i < 2 && j < 2) 7081 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); 7082 cu_info->ao_cu_bitmap[i][j] = ao_bitmap; 7083 } 7084 } 7085 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 7086 mutex_unlock(&adev->grbm_idx_mutex); 7087 7088 cu_info->number = active_cu_number; 7089 cu_info->ao_cu_mask = ao_cu_mask; 7090 cu_info->simd_per_cu = NUM_SIMD_PER_CU; 7091 cu_info->max_waves_per_simd = 10; 7092 cu_info->max_scratch_slots_per_cu = 32; 7093 cu_info->wave_front_size = 64; 7094 cu_info->lds_size = 64; 7095 } 7096 7097 const struct amdgpu_ip_block_version gfx_v8_0_ip_block = 7098 { 7099 .type = AMD_IP_BLOCK_TYPE_GFX, 7100 .major = 8, 7101 .minor = 0, 7102 .rev = 0, 7103 .funcs = &gfx_v8_0_ip_funcs, 7104 }; 7105 7106 const struct amdgpu_ip_block_version gfx_v8_1_ip_block = 7107 { 7108 .type = AMD_IP_BLOCK_TYPE_GFX, 7109 .major = 8, 7110 .minor = 1, 7111 .rev = 0, 7112 .funcs = &gfx_v8_0_ip_funcs, 7113 }; 7114 7115 static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring) 7116 { 7117 uint64_t ce_payload_addr; 7118 int cnt_ce; 7119 union { 7120 struct vi_ce_ib_state regular; 7121 struct vi_ce_ib_state_chained_ib chained; 7122 } ce_payload = {}; 7123 7124 if (ring->adev->virt.chained_ib_support) { 7125 ce_payload_addr = amdgpu_csa_vaddr(ring->adev) + 7126 offsetof(struct vi_gfx_meta_data_chained_ib, ce_payload); 7127 cnt_ce = (sizeof(ce_payload.chained) >> 2) + 4 - 2; 7128 } else { 7129 ce_payload_addr = amdgpu_csa_vaddr(ring->adev) + 7130 offsetof(struct vi_gfx_meta_data, ce_payload); 7131 cnt_ce = (sizeof(ce_payload.regular) >> 2) + 4 - 2; 7132 } 7133 7134 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_ce)); 7135 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | 7136 WRITE_DATA_DST_SEL(8) | 7137 WR_CONFIRM) | 7138 WRITE_DATA_CACHE_POLICY(0)); 7139 amdgpu_ring_write(ring, lower_32_bits(ce_payload_addr)); 7140 amdgpu_ring_write(ring, upper_32_bits(ce_payload_addr)); 7141 amdgpu_ring_write_multiple(ring, (void *)&ce_payload, cnt_ce - 2); 7142 } 7143 7144 static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring) 7145 { 7146 uint64_t de_payload_addr, gds_addr, csa_addr; 7147 int cnt_de; 7148 union { 7149 struct vi_de_ib_state regular; 7150 struct vi_de_ib_state_chained_ib chained; 7151 } de_payload = {}; 7152 7153 csa_addr = amdgpu_csa_vaddr(ring->adev); 7154 gds_addr = csa_addr + 4096; 7155 if (ring->adev->virt.chained_ib_support) { 7156 de_payload.chained.gds_backup_addrlo = lower_32_bits(gds_addr); 7157 de_payload.chained.gds_backup_addrhi = upper_32_bits(gds_addr); 7158 de_payload_addr = csa_addr + offsetof(struct vi_gfx_meta_data_chained_ib, de_payload); 7159 cnt_de = (sizeof(de_payload.chained) >> 2) + 4 - 2; 7160 } else { 7161 de_payload.regular.gds_backup_addrlo = lower_32_bits(gds_addr); 7162 de_payload.regular.gds_backup_addrhi = upper_32_bits(gds_addr); 7163 de_payload_addr = csa_addr + offsetof(struct vi_gfx_meta_data, de_payload); 7164 cnt_de = (sizeof(de_payload.regular) >> 2) + 4 - 2; 7165 } 7166 7167 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_de)); 7168 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | 7169 WRITE_DATA_DST_SEL(8) | 7170 WR_CONFIRM) | 7171 WRITE_DATA_CACHE_POLICY(0)); 7172 amdgpu_ring_write(ring, lower_32_bits(de_payload_addr)); 7173 amdgpu_ring_write(ring, upper_32_bits(de_payload_addr)); 7174 amdgpu_ring_write_multiple(ring, (void *)&de_payload, cnt_de - 2); 7175 } 7176