1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/pci.h> 25 26 #include "smumgr.h" 27 #include "smu10_inc.h" 28 #include "soc15_common.h" 29 #include "smu10_smumgr.h" 30 #include "ppatomctrl.h" 31 #include "rv_ppsmc.h" 32 #include "smu10_driver_if.h" 33 #include "smu10.h" 34 #include "pp_debug.h" 35 36 37 #define BUFFER_SIZE 80000 38 #define MAX_STRING_SIZE 15 39 #define BUFFER_SIZETWO 131072 40 41 #define MP0_Public 0x03800000 42 #define MP0_SRAM 0x03900000 43 #define MP1_Public 0x03b00000 44 #define MP1_SRAM 0x03c00004 45 46 #define smnMP1_FIRMWARE_FLAGS 0x3010028 47 48 49 static uint32_t smu10_wait_for_response(struct pp_hwmgr *hwmgr) 50 { 51 struct amdgpu_device *adev = hwmgr->adev; 52 uint32_t reg; 53 54 reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); 55 56 phm_wait_for_register_unequal(hwmgr, reg, 57 0, MP1_C2PMSG_90__CONTENT_MASK); 58 59 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); 60 } 61 62 static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, 63 uint16_t msg) 64 { 65 struct amdgpu_device *adev = hwmgr->adev; 66 67 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); 68 69 return 0; 70 } 71 72 static uint32_t smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr) 73 { 74 struct amdgpu_device *adev = hwmgr->adev; 75 76 return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); 77 } 78 79 static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) 80 { 81 struct amdgpu_device *adev = hwmgr->adev; 82 83 smu10_wait_for_response(hwmgr); 84 85 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); 86 87 smu10_send_msg_to_smc_without_waiting(hwmgr, msg); 88 89 if (smu10_wait_for_response(hwmgr) == 0) 90 printk("Failed to send Message %x.\n", msg); 91 92 return 0; 93 } 94 95 96 static int smu10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, 97 uint16_t msg, uint32_t parameter) 98 { 99 struct amdgpu_device *adev = hwmgr->adev; 100 101 smu10_wait_for_response(hwmgr); 102 103 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); 104 105 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); 106 107 smu10_send_msg_to_smc_without_waiting(hwmgr, msg); 108 109 110 if (smu10_wait_for_response(hwmgr) == 0) 111 printk("Failed to send Message %x.\n", msg); 112 113 return 0; 114 } 115 116 static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr, 117 uint8_t *table, int16_t table_id) 118 { 119 struct smu10_smumgr *priv = 120 (struct smu10_smumgr *)(hwmgr->smu_backend); 121 struct amdgpu_device *adev = hwmgr->adev; 122 123 PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, 124 "Invalid SMU Table ID!", return -EINVAL;); 125 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, 126 "Invalid SMU Table version!", return -EINVAL;); 127 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, 128 "Invalid SMU Table Length!", return -EINVAL;); 129 smum_send_msg_to_smc_with_parameter(hwmgr, 130 PPSMC_MSG_SetDriverDramAddrHigh, 131 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), 132 NULL); 133 smum_send_msg_to_smc_with_parameter(hwmgr, 134 PPSMC_MSG_SetDriverDramAddrLow, 135 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), 136 NULL); 137 smum_send_msg_to_smc_with_parameter(hwmgr, 138 PPSMC_MSG_TransferTableSmu2Dram, 139 priv->smu_tables.entry[table_id].table_id, 140 NULL); 141 142 /* flush hdp cache */ 143 amdgpu_asic_flush_hdp(adev, NULL); 144 145 memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table, 146 priv->smu_tables.entry[table_id].size); 147 148 return 0; 149 } 150 151 static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr, 152 uint8_t *table, int16_t table_id) 153 { 154 struct smu10_smumgr *priv = 155 (struct smu10_smumgr *)(hwmgr->smu_backend); 156 struct amdgpu_device *adev = hwmgr->adev; 157 158 PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, 159 "Invalid SMU Table ID!", return -EINVAL;); 160 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, 161 "Invalid SMU Table version!", return -EINVAL;); 162 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, 163 "Invalid SMU Table Length!", return -EINVAL;); 164 165 memcpy(priv->smu_tables.entry[table_id].table, table, 166 priv->smu_tables.entry[table_id].size); 167 168 amdgpu_asic_flush_hdp(adev, NULL); 169 170 smum_send_msg_to_smc_with_parameter(hwmgr, 171 PPSMC_MSG_SetDriverDramAddrHigh, 172 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), 173 NULL); 174 smum_send_msg_to_smc_with_parameter(hwmgr, 175 PPSMC_MSG_SetDriverDramAddrLow, 176 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), 177 NULL); 178 smum_send_msg_to_smc_with_parameter(hwmgr, 179 PPSMC_MSG_TransferTableDram2Smu, 180 priv->smu_tables.entry[table_id].table_id, 181 NULL); 182 183 return 0; 184 } 185 186 static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr) 187 { 188 uint32_t smc_driver_if_version; 189 190 smum_send_msg_to_smc(hwmgr, 191 PPSMC_MSG_GetDriverIfVersion, 192 &smc_driver_if_version); 193 194 if ((smc_driver_if_version != SMU10_DRIVER_IF_VERSION) && 195 (smc_driver_if_version != SMU10_DRIVER_IF_VERSION + 1)) { 196 pr_err("Attempt to read SMC IF Version Number Failed!\n"); 197 return -EINVAL; 198 } 199 200 return 0; 201 } 202 203 static int smu10_smu_fini(struct pp_hwmgr *hwmgr) 204 { 205 struct smu10_smumgr *priv = 206 (struct smu10_smumgr *)(hwmgr->smu_backend); 207 208 if (priv) { 209 amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle, 210 &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr, 211 &priv->smu_tables.entry[SMU10_WMTABLE].table); 212 amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_CLOCKTABLE].handle, 213 &priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr, 214 &priv->smu_tables.entry[SMU10_CLOCKTABLE].table); 215 kfree(hwmgr->smu_backend); 216 hwmgr->smu_backend = NULL; 217 } 218 219 return 0; 220 } 221 222 static int smu10_start_smu(struct pp_hwmgr *hwmgr) 223 { 224 struct amdgpu_device *adev = hwmgr->adev; 225 226 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version); 227 adev->pm.fw_version = hwmgr->smu_version >> 8; 228 229 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2) && 230 (adev->apu_flags & AMD_APU_IS_RAVEN) && 231 adev->pm.fw_version < 0x1e45) 232 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 233 234 if (smu10_verify_smc_interface(hwmgr)) 235 return -EINVAL; 236 237 return 0; 238 } 239 240 static int smu10_smu_init(struct pp_hwmgr *hwmgr) 241 { 242 struct smu10_smumgr *priv; 243 int r; 244 245 priv = kzalloc(sizeof(struct smu10_smumgr), GFP_KERNEL); 246 247 if (!priv) 248 return -ENOMEM; 249 250 hwmgr->smu_backend = priv; 251 252 /* allocate space for watermarks table */ 253 r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, 254 sizeof(Watermarks_t), 255 PAGE_SIZE, 256 AMDGPU_GEM_DOMAIN_VRAM, 257 &priv->smu_tables.entry[SMU10_WMTABLE].handle, 258 &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr, 259 &priv->smu_tables.entry[SMU10_WMTABLE].table); 260 261 if (r) 262 goto err0; 263 264 priv->smu_tables.entry[SMU10_WMTABLE].version = 0x01; 265 priv->smu_tables.entry[SMU10_WMTABLE].size = sizeof(Watermarks_t); 266 priv->smu_tables.entry[SMU10_WMTABLE].table_id = TABLE_WATERMARKS; 267 268 /* allocate space for watermarks table */ 269 r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, 270 sizeof(DpmClocks_t), 271 PAGE_SIZE, 272 AMDGPU_GEM_DOMAIN_VRAM, 273 &priv->smu_tables.entry[SMU10_CLOCKTABLE].handle, 274 &priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr, 275 &priv->smu_tables.entry[SMU10_CLOCKTABLE].table); 276 277 if (r) 278 goto err1; 279 280 priv->smu_tables.entry[SMU10_CLOCKTABLE].version = 0x01; 281 priv->smu_tables.entry[SMU10_CLOCKTABLE].size = sizeof(DpmClocks_t); 282 priv->smu_tables.entry[SMU10_CLOCKTABLE].table_id = TABLE_DPMCLOCKS; 283 284 return 0; 285 286 err1: 287 amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle, 288 &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr, 289 &priv->smu_tables.entry[SMU10_WMTABLE].table); 290 err0: 291 kfree(priv); 292 return -EINVAL; 293 } 294 295 static int smu10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw) 296 { 297 int ret; 298 299 if (rw) 300 ret = smu10_copy_table_from_smc(hwmgr, table, table_id); 301 else 302 ret = smu10_copy_table_to_smc(hwmgr, table, table_id); 303 304 return ret; 305 } 306 307 308 const struct pp_smumgr_func smu10_smu_funcs = { 309 .name = "smu10_smu", 310 .smu_init = &smu10_smu_init, 311 .smu_fini = &smu10_smu_fini, 312 .start_smu = &smu10_start_smu, 313 .request_smu_load_specific_fw = NULL, 314 .send_msg_to_smc = &smu10_send_msg_to_smc, 315 .send_msg_to_smc_with_parameter = &smu10_send_msg_to_smc_with_parameter, 316 .download_pptable_settings = NULL, 317 .upload_pptable_settings = NULL, 318 .get_argument = smu10_read_arg_from_smc, 319 .smc_table_manager = smu10_smc_table_manager, 320 }; 321 322 323